input
stringlengths
2.65k
237k
output
stringclasses
1 value
self.setglobal(__file__) self.runpy() class E36abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="abmag" self.setglobal(__file__) self.runpy() class E36stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="stmag" self.setglobal(__file__) self.runpy() class E36obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="obmag" self.setglobal(__file__) self.runpy() class E36counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="counts" self.setglobal(__file__) self.runpy() class E37photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="photlam" self.setglobal(__file__) self.runpy() class E37flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="flam" self.setglobal(__file__) self.runpy() class E37fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="fnu" self.setglobal(__file__) self.runpy() class E37vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E37abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="abmag" self.setglobal(__file__) self.runpy() class E37stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="stmag" self.setglobal(__file__) self.runpy() class E37obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="obmag" self.setglobal(__file__) self.runpy() class E37counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="counts" self.setglobal(__file__) self.runpy() class E38photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="photlam" self.setglobal(__file__) self.runpy() class E38flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="flam" self.setglobal(__file__) self.runpy() class E38fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="fnu" self.setglobal(__file__) self.runpy() class E38vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="vegamag" self.setglobal(__file__) self.runpy() class E38abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="abmag" self.setglobal(__file__) self.runpy() class E38stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="stmag" self.setglobal(__file__) self.runpy() class E38obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="obmag" self.setglobal(__file__) self.runpy() class E38counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="counts" self.setglobal(__file__) self.runpy() class E39photlam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="photlam" self.setglobal(__file__) self.runpy() class E39flam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="flam" self.setglobal(__file__) self.runpy() class E39fnu(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="fnu" self.setglobal(__file__) self.runpy() class E39vegamag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E39abmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="abmag" self.setglobal(__file__) self.runpy() class E39stmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="stmag" self.setglobal(__file__) self.runpy() class E39obmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="obmag" self.setglobal(__file__) self.runpy() class E39counts(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="counts" self.setglobal(__file__) self.runpy() class E40photlam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f475w" self.form="photlam" self.setglobal(__file__) self.runpy() class E40flam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f475w" self.form="flam" self.setglobal(__file__) self.runpy() class E40fnu(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f475w" self.form="fnu" self.setglobal(__file__) self.runpy() class E40vegamag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f475w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E40abmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f475w" self.form="abmag" self.setglobal(__file__) self.runpy() class E40stmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f475w" self.form="stmag" self.setglobal(__file__) self.runpy() class E40obmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f475w" self.form="obmag" self.setglobal(__file__) self.runpy() class E40counts(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f475w" self.form="counts" self.setglobal(__file__) self.runpy() class E41photlam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f555w" self.form="photlam" self.setglobal(__file__) self.runpy() class E41flam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f555w" self.form="flam" self.setglobal(__file__) self.runpy() class E41fnu(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f555w" self.form="fnu" self.setglobal(__file__) self.runpy() class E41vegamag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f555w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E41abmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f555w" self.form="abmag" self.setglobal(__file__) self.runpy() class E41stmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f555w" self.form="stmag" self.setglobal(__file__) self.runpy() class E41obmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f555w" self.form="obmag" self.setglobal(__file__) self.runpy() class E41counts(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f555w" self.form="counts" self.setglobal(__file__) self.runpy() class E42photlam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f606w" self.form="photlam" self.setglobal(__file__) self.runpy() class E42flam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f606w" self.form="flam" self.setglobal(__file__) self.runpy() class E42fnu(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f606w" self.form="fnu" self.setglobal(__file__) self.runpy() class E42vegamag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f606w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E42abmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f606w" self.form="abmag" self.setglobal(__file__) self.runpy() class E42stmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f606w" self.form="stmag" self.setglobal(__file__) self.runpy() class E42obmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f606w" self.form="obmag" self.setglobal(__file__) self.runpy() class E42counts(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f606w" self.form="counts" self.setglobal(__file__) self.runpy() class E43photlam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f775w" self.form="photlam" self.setglobal(__file__) self.runpy() class E43flam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f775w" self.form="flam" self.setglobal(__file__) self.runpy() class E43fnu(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f775w" self.form="fnu" self.setglobal(__file__) self.runpy() class E43vegamag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f775w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E43abmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f775w" self.form="abmag" self.setglobal(__file__) self.runpy() class E43stmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f775w" self.form="stmag" self.setglobal(__file__) self.runpy() class E43obmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f775w" self.form="obmag" self.setglobal(__file__) self.runpy() class E43counts(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f775w" self.form="counts" self.setglobal(__file__) self.runpy() class E44photlam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f814w" self.form="photlam" self.setglobal(__file__) self.runpy() class E44flam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f814w" self.form="flam" self.setglobal(__file__) self.runpy() class E44fnu(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f814w" self.form="fnu" self.setglobal(__file__) self.runpy() class E44vegamag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f814w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E44abmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f814w" self.form="abmag" self.setglobal(__file__) self.runpy() class E44stmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f814w" self.form="stmag" self.setglobal(__file__) self.runpy() class E44obmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f814w" self.form="obmag" self.setglobal(__file__) self.runpy() class E44counts(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f814w" self.form="counts" self.setglobal(__file__) self.runpy() class E45photlam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f850lp" self.form="photlam" self.setglobal(__file__) self.runpy() class E45flam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f850lp" self.form="flam" self.setglobal(__file__) self.runpy() class E45fnu(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f850lp" self.form="fnu" self.setglobal(__file__) self.runpy() class E45vegamag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f850lp" self.form="vegamag" self.setglobal(__file__) self.runpy() class E45abmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f850lp" self.form="abmag" self.setglobal(__file__) self.runpy() class E45stmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f850lp" self.form="stmag" self.setglobal(__file__) self.runpy() class E45obmag(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f850lp" self.form="obmag" self.setglobal(__file__) self.runpy() class E45counts(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f850lp" self.form="counts" self.setglobal(__file__) self.runpy() class E46photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f606w" self.form="photlam" self.setglobal(__file__) self.runpy() class E46flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f606w" self.form="flam" self.setglobal(__file__) self.runpy() class E46fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f606w" self.form="fnu" self.setglobal(__file__) self.runpy() class E46vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f606w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E46abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f606w" self.form="abmag" self.setglobal(__file__) self.runpy() class E46stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f606w" self.form="stmag" self.setglobal(__file__) self.runpy() class E46obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f606w" self.form="obmag" self.setglobal(__file__) self.runpy() class E46counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f606w" self.form="counts" self.setglobal(__file__) self.runpy() class E47photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f775w" self.form="photlam" self.setglobal(__file__) self.runpy() class E47flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f775w" self.form="flam" self.setglobal(__file__) self.runpy() class E47fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f775w" self.form="fnu" self.setglobal(__file__) self.runpy() class E47vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f775w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E47abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f775w" self.form="abmag" self.setglobal(__file__) self.runpy() class E47stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f775w" self.form="stmag" self.setglobal(__file__) self.runpy() class E47obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f775w" self.form="obmag" self.setglobal(__file__) self.runpy() class E47counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f775w" self.form="counts" self.setglobal(__file__) self.runpy() class E48photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f814w" self.form="photlam" self.setglobal(__file__) self.runpy() class E48flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f814w" self.form="flam" self.setglobal(__file__) self.runpy() class E48fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f814w" self.form="fnu" self.setglobal(__file__) self.runpy() class E48vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f814w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E48abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f814w" self.form="abmag" self.setglobal(__file__) self.runpy() class E48stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f814w" self.form="stmag" self.setglobal(__file__) self.runpy() class E48obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f814w" self.form="obmag" self.setglobal(__file__) self.runpy() class E48counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f814w" self.form="counts" self.setglobal(__file__) self.runpy() class E49photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f850lp" self.form="photlam" self.setglobal(__file__) self.runpy() class E49flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f850lp" self.form="flam" self.setglobal(__file__) self.runpy() class E49fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f850lp" self.form="fnu" self.setglobal(__file__) self.runpy() class E49vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f850lp" self.form="vegamag" self.setglobal(__file__) self.runpy() class E49abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f850lp" self.form="abmag" self.setglobal(__file__) self.runpy() class E49stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f850lp" self.form="stmag" self.setglobal(__file__) self.runpy() class E49obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f850lp" self.form="obmag" self.setglobal(__file__) self.runpy() class E49counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(2000) " self.obsmode="acs,wfc1,f850lp" self.form="counts" self.setglobal(__file__) self.runpy() class E50photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f606w" self.form="photlam" self.setglobal(__file__) self.runpy() class E50flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f606w" self.form="flam" self.setglobal(__file__) self.runpy() class E50fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f606w" self.form="fnu" self.setglobal(__file__) self.runpy() class E50vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f606w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E50abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f606w" self.form="abmag" self.setglobal(__file__) self.runpy() class E50stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f606w" self.form="stmag" self.setglobal(__file__) self.runpy() class E50obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f606w" self.form="obmag" self.setglobal(__file__) self.runpy() class E50counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f606w" self.form="counts" self.setglobal(__file__) self.runpy() class E51photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="photlam" self.setglobal(__file__) self.runpy() class E51flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="flam" self.setglobal(__file__) self.runpy() class E51fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="fnu" self.setglobal(__file__) self.runpy() class E51vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E51abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="abmag" self.setglobal(__file__) self.runpy() class E51stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="stmag" self.setglobal(__file__) self.runpy() class E51obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="obmag" self.setglobal(__file__) self.runpy() class E51counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f775w" self.form="counts" self.setglobal(__file__) self.runpy() class E52photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="photlam" self.setglobal(__file__) self.runpy() class E52flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="flam" self.setglobal(__file__) self.runpy() class E52fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="fnu" self.setglobal(__file__) self.runpy() class E52vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="vegamag" self.setglobal(__file__) self.runpy() class E52abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="abmag" self.setglobal(__file__) self.runpy() class E52stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="stmag" self.setglobal(__file__) self.runpy() class E52obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="obmag" self.setglobal(__file__) self.runpy() class E52counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f814w" self.form="counts" self.setglobal(__file__) self.runpy() class E53photlam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="photlam" self.setglobal(__file__) self.runpy() class E53flam(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="flam" self.setglobal(__file__) self.runpy() class E53fnu(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="fnu" self.setglobal(__file__) self.runpy() class E53vegamag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="vegamag" self.setglobal(__file__) self.runpy() class E53abmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="abmag" self.setglobal(__file__) self.runpy() class E53stmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="stmag" self.setglobal(__file__) self.runpy() class E53obmag(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="obmag" self.setglobal(__file__) self.runpy() class E53counts(basecase.effstimCase): def setUp(self): self.spectrum="bb(3000) " self.obsmode="acs,wfc1,f850lp" self.form="counts" self.setglobal(__file__) self.runpy() class E54photlam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="photlam" self.setglobal(__file__) self.runpy() class E54flam(basecase.effstimCase): def setUp(self): self.spectrum="crcalspec$alpha_lyr_stis_003.fits " self.obsmode="acs,wfc1,f435w" self.form="flam" self.setglobal(__file__) self.runpy() class
import torch from torch.autograd import Variable import numpy as np import util import classifier from util import cal_macc from lib import generate_syn_feature from binary_classifier import BINARY_CLASSIFIER from knn_classifier import KNNClassifier import os from datetime import datetime import pickle import numpy as np class Evaluate(): def __init__(self, netE, netG, netDec, netF, data, opt, model_file, exp_name, clf_epoch=5, alpha=1.0, siamese=False, netS=None): netG.eval() netDec.eval() netF.eval() netE.eval() self.netE = netE.cuda() self.netG = netG.cuda() self.netDec = netDec.cuda() if opt.feedback_loop == 1: self.netF = None else: self.netF = netF.cuda() self.data = data self.opt = opt self.model_file = model_file self.exp_name = exp_name self.epoch = clf_epoch if opt.concat_hy: self.cls_netDec = self.netDec else: self.cls_netDec = None self.alpha = alpha self.siamese = siamese self.netS = netS def conditional_sample(self, x, y, deterministic=False): # x is feature vector # y is attribute vector with torch.no_grad(): if not self.opt.survae: means, log_var = self.netE(x, y) if deterministic: z = means else: z = torch.normal(means, torch.exp(0.5 * log_var)) else: z, _ = self.netE(x, y) zv = Variable(z) yv = Variable(y) x_gen = self.netG(zv, c=yv) if self.netF is not None: _ = self.netDec(x_gen) dec_hidden_feat = self.netDec.getLayersOutDet() # no detach layers feedback_out = self.netF(dec_hidden_feat) x_gen = self.netG(zv, a1=self.opt.a2, c=yv, feedback_layers=feedback_out) return x_gen def generate_syn_feature_cf(self, x, classes, deterministic=False): attribute = self.data.attribute nclass = classes.size(0) opt = self.opt num = opt.syn_num syn_feature = torch.zeros(nclass * num, opt.resSize).cuda() syn_label = torch.zeros(nclass*num).long().cuda() syn_att = torch.zeros(num, opt.attSize).float().cuda() syn_noise = torch.zeros(num, opt.nz).float().cuda() with torch.no_grad(): for i in range(nclass): iclass = classes[i] iclass_att = attribute[iclass] if not self.opt.survae: means, log_var = self.netE(x.unsqueeze(0), iclass_att.unsqueeze(0)) means = means.expand(num, -1) log_var = log_var.expand(num, -1) syn_att.copy_(iclass_att.repeat(num, 1)) if deterministic: syn_noise = means else: syn_noise = torch.normal(means, torch.exp(0.5 * log_var)) else: syn_noise, _ = self.netE(x.unsqueeze(0), iclass_att.unsqueeze(0)) syn_noise = syn_noise.expand(num, -1) syn_noisev = Variable(syn_noise) syn_attv = Variable(syn_att) fake = self.netG(syn_noisev, c=syn_attv) if self.netF is not None: dec_out = self.netDec(fake) # only to call the forward function of decoder dec_hidden_feat = self.netDec.getLayersOutDet() # no detach layers feedback_out = self.netF(dec_hidden_feat) fake = self.netG(syn_noisev, a1=opt.a2, c=syn_attv, feedback_layers=feedback_out) output = fake syn_feature.narrow(0, i*num, num).copy_(output.data) syn_label.narrow(0, i*num, num).fill_(iclass) return syn_feature, syn_label def zsl(self, softmax_clf, cf, deterministic=False): opt = self.opt data = self.data if not cf: with torch.no_grad(): gen_x, gen_l = generate_syn_feature(self.netG, self.data.unseenclasses, self.data.attribute, opt.syn_num, netF=self.netF, netDec=self.netDec, opt=opt) if softmax_clf: zsl_cls = classifier.CLASSIFIER(gen_x, util.map_label(gen_l, data.unseenclasses), \ data, data.unseenclasses.size(0), opt.cuda, opt.classifier_lr, 0.5, self.epoch, opt.syn_num, \ generalized=False, netDec=self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096) acc = zsl_cls.acc else: zsl_cls = KNNClassifier(gen_x, gen_l, data.test_unseen_feature, self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, batch_size=100) preds = zsl_cls.fit() truths = data.test_unseen_label.cpu().numpy() acc = cal_macc(truth=truths, pred=preds) else: preds = [] truths = [] test_x = data.test_unseen_feature mapped_unseen_l = util.map_label(data.test_unseen_label, data.unseenclasses) unseen_label_np = data.test_unseen_label.cpu().numpy() # for i in range(501): for i in range(test_x.shape[0]): gen_x, gen_l = self.generate_syn_feature_cf(test_x[i], data.unseenclasses, deterministic=deterministic) gen_l = util.map_label(gen_l, data.unseenclasses) if softmax_clf: clf = classifier.CLASSIFIER(gen_x, gen_l, data, data.unseenclasses.size(0), opt.cuda, opt.classifier_lr, 0.5, self.epoch, opt.syn_num, generalized=False, netDec=self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, x=test_x[i]) pred = clf.pred truths.append(mapped_unseen_l[i]) preds.append(pred) else: clf = KNNClassifier(gen_x, gen_l, test_x[i].unsqueeze(0), self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, batch_size=100) pred = clf.fit()[0] preds.append(pred) truths.append(unseen_label_np[i]) if (i + 1) % 500 == 0: print("%dth acc: %.3f" % (i + 1, cal_macc(truth=truths, pred=preds))) if self.opt.sanity: break # Sanity check acc = cal_macc(truth=truths, pred=preds) return acc def two_stage(self, use_mask, use_tde, seen_mask=None, unseen_mask=None, save_clf=False): opt = self.opt data = self.data # Unseen: if unseen_mask is None: save_file = "out/%s-unseen.pickle" % use_mask with open(save_file, 'rb') as handle: clf_results = pickle.load(handle) preds = clf_results["preds"] mask = [pred in self.data.unseenclasses for pred in preds] mask = torch.from_numpy(np.array(mask).astype(int)) else: mask = unseen_mask with torch.no_grad(): gen_x, gen_l = generate_syn_feature(self.netG, self.data.unseenclasses, self.data.attribute, opt.u_num, netF=self.netF, netDec=self.netDec, opt=opt) if not save_clf or self.zsl_cls is None: zsl_cls = classifier.CLASSIFIER(gen_x, util.map_label(gen_l, data.unseenclasses), \ data, data.unseenclasses.size(0), opt.cuda, opt.u_lr, opt.u_beta, opt.u_epoch, opt.u_batch_size, \ generalized=False, netDec=self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, mask=mask, use_tde=False, alpha=self.alpha) u_acc = zsl_cls.acc else: zsl_cls = self.zsl_cls u_acc = zsl_cls.val(zsl_cls.test_unseen_feature, zsl_cls.test_unseen_label, zsl_cls.unseenclasses, mask) if save_clf: self.zsl_cls = zsl_cls # Seen: if seen_mask is None: save_file = "out/%s-seen.pickle" % use_mask with open(save_file, 'rb') as handle: clf_results = pickle.load(handle) preds = clf_results["preds"] mask = [pred in self.data.seenclasses for pred in preds] mask = torch.from_numpy(np.array(mask).astype(int)) else: mask = seen_mask if not opt.adjust_s: zsl_cls = classifier.CLASSIFIER(data.train_feature, util.map_label(data.train_label, data.seenclasses), \ data, data.seenclasses.size(0), opt.cuda, opt.classifier_lr, 0.5, 15, opt.syn_num, \ generalized=False, netDec=self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, mask=mask, zsl_on_seen=True, use_tde=use_tde, alpha=self.alpha) else: zsl_cls = classifier.CLASSIFIER(data.train_feature, util.map_label(data.train_label, data.seenclasses), \ data, data.seenclasses.size(0), opt.cuda, opt.s_lr, opt.s_beta, opt.s_epoch, opt.s_batch_size, \ generalized=False, netDec=self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, mask=mask, zsl_on_seen=True, use_tde=use_tde, alpha=self.alpha) s_acc = zsl_cls.acc h_acc = 2 * u_acc * s_acc / (u_acc + s_acc) if opt.log_two_stage: out_dir = "results/two_stage/%s/%s" % (opt.dataset, self.exp_name) if not os.path.exists(out_dir): os.makedirs(out_dir) mask_list = use_mask.split('/') mask_name = "%s_%s" % (mask_list[0], mask_list[1]) out_file = os.path.join(out_dir, "%s.txt" % mask_name) config_msg = "u_num:%d u_lr:%.4f u_beta:%.2f u_epoch:%d u_batch_size:%d" % (opt.u_num, opt.u_lr, opt.u_beta, opt.u_epoch, opt.u_batch_size) if self.opt.adjust_s: config_msg += " s_lr:%.4f s_beta:%.2f s_epoch:%d s_bs:%d" % (opt.s_lr, opt.s_beta, opt.s_epoch, opt.s_batch_size) log_msg = "%s---S:%.3f U:%.3f H:%.3f\n" % (config_msg, s_acc, u_acc, h_acc) with open(out_file, "a") as f: f.write(log_msg) return s_acc, u_acc, h_acc def gzsl(self, use_train, softmax_clf, cf, deterministic=False, additional_train=False, use_tde=False, binary=False): opt = self.opt data = self.data if self.siamese: clf = SiameseClassifier(data, opt, self.netE, self.netG, self.netF, self.cls_netDec, dec_size=opt.attSize, cf=cf, n_epochs=opt.clf_epoch, distance="l1") if self.netS is None: clf.train() else: clf.network = self.netS s_acc, u_acc = clf.validate(gzsl=True) if not cf: with torch.no_grad(): gen_x, gen_l = generate_syn_feature(self.netG, self.data.unseenclasses, self.data.attribute, opt.syn_num, netF=self.netF, netDec=self.netDec, opt=opt) d, l = generate_syn_feature(self.netG, self.data.unseenclasses, self.data.attribute, 500, netF=self.netF, netDec=self.netDec, opt=opt) np.save("./gcmcf_feat.npy", d.data.cpu().numpy()) np.save("./gcmcf_label.npy", l.data.cpu().numpy()) print(d.data.cpu().numpy().shape, l.data.cpu().numpy().shape) from scipy.io import savemat print(gen_x.cpu().detach().numpy().shape, gen_l.cpu().detach().numpy().shape, data.train_feature.cpu().detach().numpy().shape, data.train_label.cpu().detach().numpy().shape, data.test_unseen_feature.cpu().detach().numpy().shape, data.test_unseen_label.cpu().detach().numpy().shape, data.test_seen_feature.cpu().detach().numpy().shape, data.test_seen_label.cpu().detach().numpy().shape) mydata = {"train_unseen_data": gen_x.cpu().detach().numpy(), "train_unseen_label": gen_l.cpu().detach().numpy(), "train_seen_data": data.train_feature.cpu().detach().numpy(), "train_seen_label": data.train_label.cpu().detach().numpy(), "test_unseen_data": data.test_unseen_feature.cpu().detach().numpy(), "test_unseen_label": data.test_unseen_label.cpu().detach().numpy(), "test_seen_data": data.test_seen_feature.cpu().detach().numpy(), "test_seen_label": data.test_seen_label.cpu().detach().numpy()} savemat("gcmcf_data.mat", mydata) print("gcmcf_data.mat is saved!") if use_train: train_x = torch.cat((data.train_feature, gen_x), 0) train_y = torch.cat((data.train_label, gen_l), 0) else: with torch.no_grad(): gen_s_x, gen_s_l = generate_syn_feature(self.netG, self.data.seenclasses, self.data.attribute, opt.syn_num, netF=self.netF, netDec=self.netDec, opt=opt) train_x = torch.cat((gen_s_x, gen_x), 0) train_y = torch.cat((gen_s_l, gen_l), 0) if softmax_clf: if not binary: gzsl_cls = classifier.CLASSIFIER(train_x, train_y, \ data, data.allclasses.size(0), opt.cuda, opt.classifier_lr, 0.5, self.epoch, opt.syn_num, generalized=True, netDec=self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, use_tde=use_tde, alpha=self.alpha) self.test_logits = gzsl_cls.all_outputs else: gzsl_cls = BINARY_CLASSIFIER(train_x, train_y, data, 2, True, opt.classifier_lr, 0.5, self.epoch, opt.syn_num, netDec=self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, use_tde=use_tde, alpha=self.alpha) s_acc = gzsl_cls.acc_seen u_acc = gzsl_cls.acc_unseen h_acc = gzsl_cls.H self.s_bacc = gzsl_cls.s_bacc self.u_bacc = gzsl_cls.u_bacc if not binary: clf_results = { "preds": gzsl_cls.pred_s.cpu().numpy() } save_file = self.get_save_result_file("seen") if self.log_to_file and not binary: with open(save_file, 'wb') as handle: pickle.dump(clf_results, handle) if not binary: clf_results = { "preds": gzsl_cls.pred_u.cpu().numpy() } save_file = self.get_save_result_file("unseen") if self.log_to_file and not binary: with open(save_file, 'wb') as handle: pickle.dump(clf_results, handle) else: u_cls = KNNClassifier(train_x, train_y, data.test_unseen_feature, self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, batch_size=100) preds = u_cls.fit() truths = data.test_unseen_label.cpu().numpy() u_acc = cal_macc(truth=truths, pred=preds) s_cls = KNNClassifier(train_x, train_y, data.test_seen_feature, self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, batch_size=100) preds = s_cls.fit() truths = data.test_seen_label.cpu().numpy() s_acc = cal_macc(truth=truths, pred=preds) h_acc = 2 * u_acc * s_acc / (u_acc + s_acc) else: self.test_logits = None def cf_gzsl(test_x, test_l, split): preds = [] truths = [] test_l_np = test_l.cpu().numpy() test_l_binary = np.array([y in data.unseenclasses for y in test_l]) if additional_train: gen_sx, gen_sl = generate_syn_feature(self.netG, self.data.seenclasses, self.data.attribute, 100, netF=self.netF, netDec=self.netDec, opt=opt) #gen_sx = self.conditional_sample(data.train_feature, data.attribute[data.train_label], deterministic=False) #gen_sx2 = self.conditional_sample(data.train_feature, data.attribute[data.train_label], deterministic=False) #gen_sx3 = self.conditional_sample(data.train_feature, data.attribute[data.train_label], deterministic=False) #gen_sx = torch.cat((gen_sx, gen_sx2, gen_sx3), 0) #gen_sl = torch.cat((data.train_label.cuda(), data.train_label.cuda(), data.train_label.cuda()), 0) # for i in range(501): for i in range(test_x.shape[0]): gen_x, gen_l = self.generate_syn_feature_cf(test_x[i], data.unseenclasses, deterministic=deterministic) if use_train: #if additional_train: # train_x = torch.cat((gen_sx, gen_x), 0) # train_y = torch.cat((gen_sl, gen_l), 0) #else: train_x = torch.cat((data.train_feature, gen_x), 0) train_y = torch.cat((data.train_label.cuda(), gen_l), 0) else: gen_s_x, gen_s_l = self.generate_syn_feature_cf(test_x[i], data.seenclasses, deterministic=deterministic) train_x = torch.cat((gen_s_x, gen_x), 0) train_y = torch.cat((gen_s_l, gen_l), 0) if additional_train: train_x = torch.cat((train_x, gen_sx), 0) train_y = torch.cat((train_y, gen_sl.cuda()), 0) if softmax_clf: if not binary: clf = classifier.CLASSIFIER(train_x, train_y, data, self.opt.nclass_all, opt.cuda, opt.classifier_lr, opt.beta1,\ self.epoch, opt.syn_num, generalized=True, netDec=self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, x=test_x[i], use_tde=use_tde, alpha=self.alpha) if self.test_logits is None: self.test_logits = clf.logits else: self.test_logits = np.concatenate((self.test_logits, clf.logits), axis=0) else: clf = BINARY_CLASSIFIER(train_x, train_y, data, 2, True, opt.classifier_lr, 0.5, self.epoch, opt.syn_num, netDec=self.cls_netDec, dec_size=opt.attSize, dec_hidden_size=4096, use_tde=use_tde, alpha=self.alpha, x=test_x[i]) pred = clf.pred truths.append(test_l_np[i]) preds.append(pred.item()) else: clf = KNNClassifier(train_x,
#!/usr/bin/env python3 """ lang.py Type: module Description: a parser for a file type that makes it easier to implement various languages in a game Classes: - LangNode - LangEval Functions: - load(path, encoding, as_dict) - loads(s, encoding, as_dict, file) Lang syntax =========== The file can contain attributes or sets. Attributes contain a string, while sets can contain more attributes or other sets. Sets ---- To declare a set use dollar sign '$' for every level of the set followed by the name, without spaces. The file itself is a set of level 0. $set_of_level_1 $$set_of_level_2 Note that a higher level set must be contained in one with a lower level. So this: $set_of_level_1 $$$set_of_level_3 causes an error because a set of level 3 can be contained only in a set of level 2. To close a set you put a dollar sign '$' for every level of the set that you want to close followed by an exclamation mark '!'. If a new set with a lower or equal level is encountered, the current set is automatically closed. $set_of_level_1 $$set_of_level_2_under_set_1 $$another_set_of_level_2_under_set_1 $! Here all the sets are closed automatically (also the explicitly closed one if it weren't closed) and closing a set is very rare. Attributes ---------- To declare an attribute you put an at sign '@' followed by the name of the attribute. @attribute1 Any line that follows, unless it's an instruction, is added to the value of the attribute. @attribute1 Line 1 attribute1 = 'Line 1\\n' To give a value to the attribute on the same line use a colon ':' @attribute1:Line 1 attribute1 = 'Line 1' Doing this you can't set the value of the attribute to be more than one line and there is no newline at the end of the value set. References ---------- A reference is used to give to a new attribute the value of another. There are two different types of references: local references and absolute references. Absolute references are declared with a tilde followed by an at sign '~@' and have as base the file itself. The attribute name and attribute value are separated by a colon ':'. In the reference itself to access members of a set you can use dots '.', you can access both other sets and their attributes ~@attribute_name:set.other_set.attribute Relative references are very similar, to declare them you can use '.~@' and have the set where they are declared as the base. This means that you can access any attribute or child-set inside the current_set .~@attribute_name:other_set.attribute Comments -------- Comments can only be at the start of a line (excluding indentation) and are marked with '::' $set_1 @attr_set1 :: This is a comment and won't be added to the value of the attribute Encoding -------- You can specify the type of encoding of the file preceding it with '%='. This should be at the very first line of a file because it is reloaded entirely if the current encoding is not correct. Escapes ------- If you don't want a new-line character to be added at the end of the line you can add an and-percent '&' at the start. To escape instructions you can use \\ at the start of the line, this keeps any character after itself, including new-line characters, white space, $, @, ~@, .~@, &, ::, %=, and itself (\\). """ import re as _re from typing import Union as _Union from .stack import Stack as _Stack from .exceptions import ( LangError as _LangError, LangEncodingError as _LangEncodingError ) # With re.ASCII \w matches only [a-zA-Z0-9_] name_expr = _re.compile(r"[a-zA-Z_]\w*", _re.ASCII) class LangNode: """ LangNode Type: class Description: a container for sets and attributes Methods: - empty() - get() All attributes and sets (sets are other LangNode objects) are set as attributes to the object itself Ex: lang.s.attr # this gets the attribute 'attr' from the set 's' """ def empty(self) -> None: """Removes all the attributes and sets the LangNode contains""" self.__dict__.clear() def get(self, s: str) -> str: """ get(self, s) Type: method Description: returns an attribute or set of the node Args: 's' (str): the attribute to get Return type: str In case the attribute doesn't exist inside the LangNode, it returns 's' Usage: in the string put the chain of attributes to access, separated by dots Ex: lang.get('s.attr') # this gets the attribute 'attr' from the # set 's' without an error This can be done also by indexing the object: lang['s.attr'] # exactly like lang.get('s.attr') """ try: return eval(f"self.{s}") except Exception: return s def __getitem__(self, idx): return self.get(idx) def __repr__(self): return f"LangNode({self.__dict__})" class LangEval: """ LangEval Type: class Description: a lang reference not evaluated Methods: - get_value(lang_obj) """ def __init__(self, branches, local, l_no, file): self.branches = branches self.local = local self.l_no = l_no self.file = file self.added_value = "" def get_value(self, lang_obj: LangNode) -> str: """ get_value(self, lang_obj) Type: method Description: evaluates the reference Args: 'lang_obj' (LangNode): the node where the attribute resides """ c_obj = lang_obj for i, v in enumerate(self.branches): try: if isinstance(c_obj, dict): c_obj = c_obj[v] else: c_obj = getattr(c_obj, v) except (AttributeError, KeyError): raise _LangError( self.l_no, f"the value '{'.'.join(self.branches)}' is not valid", self.file ) if i == len(self.branches) - 1: return c_obj + self.added_value def __add__(self, other): self.added_value += str(other) return self def __repr__(self): return f"LangEval( at '{'.'.join(self.branches)}' )" def _make_lang_obj(d, root_node=None) -> LangNode: if root_node is None: root_node = this_node = LangNode() else: root_node = root_node this_node = LangNode() for i in d: v = d[i] if isinstance(v, dict): setattr(this_node, i, _make_lang_obj(v, root_node)) elif isinstance(v, LangEval): if v.local: setattr(this_node, i, v.get_value(this_node)) else: setattr(this_node, i, v.get_value(root_node)) elif isinstance(v, str): setattr(this_node, i, v) return this_node def _check_name(s, l_no, file) -> None: match = name_expr.match(s) if match is None or match[0] != s: raise _LangError(l_no, f"name '{s}' is not a valid", file) def _make_lang_dict(s, encoding, file) -> dict: if isinstance(s, bytes) or isinstance(s, bytearray): s = s.decode(encoding) lines = s.split("\n") root = {} dict_stack = _Stack(root) attr = "" for l_no, l in enumerate(lines): if l.strip() == "": continue l = l.lstrip() if not l: continue if l[:2] == "%=": name = l[2:] if name.lower() != encoding.lower(): return _LangEncodingError(f"encoding must be '{name.lower()}'") attr = "" continue elif l[:2] == "::": continue elif l[:1] == "$": dict_count = 0 while l[:1] == "$": l = l[1:] dict_count += 1 if dict_count > len(dict_stack): raise _LangError( l_no, "accessing child set with no parent", file ) while dict_count < len(dict_stack): dict_stack.pop() if l == "!": continue _check_name(l, l_no, file) dict_stack.peek()[l] = {} dict_stack.push(dict_stack.peek()[l]) attr = "" continue elif l[:1] == "@": l = l[1:] colon_idx = l.find(":") if colon_idx != -1: name = l[:colon_idx] _check_name(name, l_no, file) dict_stack.peek()[name] = l[colon_idx + 1:] attr = "" else: _check_name(l, l_no, file) attr = l continue elif l[:2] == "~@" or l[:3] == ".~@": local = l[:3] == ".~@" if local: l = l[3:] else: l = l[2:] try: name, val, *others = l.split(":") except ValueError: raise _LangError(l_no, "expected ':'", file) if others: raise _LangError(l_no, "invalid syntax", file) _check_name(name, l_no, file) dict_stack.peek()[name] = LangEval(val.split("."), local, l_no, file) attr = "" continue if l[:1] == "&": l = l[1:] else: if l[:1] == "\\": l = l[1:] l += "\n" if attr: try: dict_stack.peek()[attr] += l except KeyError: dict_stack.peek()[attr] = l else: raise _LangError(l_no, f"text with no attribute", file) return root def load(path: str, encoding: str = "utf-8", as_dict: bool = False) -> _Union[LangNode, dict]: """ load(path, encoding='utf-8', as_dict=False) Type: function Description: opens and parses a lang file Args: 'path' (str): the path of the file 'encoding' (str): the encoding to use when opening the file, defaults to utf-8 'as_dict' (bool): if the function should return a dictionary instead of a LangEval object Return type: dict | LangNode """ try: with open(path, encoding=encoding) as f: return loads(f.read(), encoding, as_dict, path) except _LangEncodingError as e: with open(path, encoding=e.args[0][17:-1]) as f: return loads(f.read(), e.args[0][17:-1], as_dict, path) def loads(s: str, encoding: str = "utf-8", as_dict: bool = False, file: str = "<string>") -> _Union[LangNode, dict]: """ loads(s, encoding='utf-8', as_dict=False, file='<string>') Type: function Description: parses a lang string Args: 's' (str): the string to parse 'encoding' (str): the encoding to use when opening the file, defaults to utf-8 'as_dict' (bool): if the function should return a dictionary instead of a LangEval object 'file' (str):
) # # # def sigmoid(self, x): return 1 / (1+numpy.exp(-x)) # # def compute_hidden_states(self): # every time it is called, # it computes the new hidden states of the LSTM # it gets the last event in the sequence # which is generated at t_(rec(t)) # and compute its hidden states # Note : for this event, we get its type # and time elapsed since last event # that is to say, this func is different than # rnn_unit in models # THERE : event, time_since_this_event_to_next # so first update, and then decay # HERE : time_since_last_event, event # so first decay, and then update # Note : this should be called # after one event is generated and appended # so the state is updated accordingly #TODO: decay cell_t_after_decay = self.cell_target + ( self.cell_t - self.cell_target ) * numpy.exp( -self.cell_decay * self.one_seq[-1][ 'time_since_last_event' ] ) hidden_t_after_decay = self.gate_output * numpy.tanh( cell_t_after_decay ) #TODO: update emb_event_t = self.Emb_event[ self.one_seq[-1]['type_event'], : ] post_transform = numpy.dot( numpy.concatenate( (emb_event_t, hidden_t_after_decay), axis = 0 ), self.W_recur ) + self.b_recur # gate_input = self.sigmoid( post_transform[:self.dim_model] ) gate_forget = self.sigmoid( post_transform[self.dim_model:2*self.dim_model] ) gate_output = self.sigmoid( post_transform[2*self.dim_model:3*self.dim_model] ) gate_pre_c = numpy.tanh( post_transform[3*self.dim_model:4*self.dim_model] ) # 2 -- input_bar and forget_bar gates gate_input_target = self.sigmoid( post_transform[4*self.dim_model:5*self.dim_model] ) gate_forget_target = self.sigmoid( post_transform[5*self.dim_model:6*self.dim_model] ) # cell memory decay cell_decay = self.soft_relu( post_transform[6*self.dim_model:] ) # cell_t = gate_forget * cell_t_after_decay + gate_input * gate_pre_c cell_target = gate_forget_target * self.cell_target + gate_input_target * gate_pre_c # self.cell_t = numpy.copy(cell_t) self.cell_target = numpy.copy(cell_target) self.cell_decay = numpy.copy(cell_decay) self.gate_output = numpy.copy(gate_output) # # # # def compute_intensity_given_past(self, time_current): # compute the intensity of current time # given the past events time_recent = self.one_seq[-1]['time_since_start'] # cell_t_after_decay = self.cell_target + ( self.cell_t - self.cell_target ) * numpy.exp( -self.cell_decay * ( time_current - time_recent ) ) hidden_t_after_decay = self.gate_output * numpy.tanh( cell_t_after_decay ) # self.intensity_tilde = numpy.dot( hidden_t_after_decay, self.W_alpha ) self.intensity = self.soft_relu_scale( self.intensity_tilde ) # intensity computation is finished # # # def compute_intensity_upper_bound(self, time_current): # compute the upper bound of intensity # at the current time # Note : this is very tricky !!! # in decomposable process, finding upper bound is easy # see B.3 in NIPS paper # but in neural model # it is not a combo of POSITIVE decreasing funcs # So how to do this? # we find the functon is a sum of temrs # some terms are decreasing, we keep them # some terms are increasing, we get their upper-limit # # In detail, we compose it to 4 parts : # (dc = c-c_target) # w + dc - increasing # w + dc + decreasing # w - dc - decreasing # w - dc + increasing # time_recent = self.one_seq[-1]['time_since_start'] # cell_gap = self.cell_t - self.cell_target cell_gap_matrix = numpy.outer( cell_gap, numpy.ones( (self.dim_process, ), dtype=dtype ) ) # dim * dim_process index_increasing_0 = (cell_gap_matrix > 0.0) & (self.W_alpha < 0.0) index_increasing_1 = (cell_gap_matrix < 0.0) & (self.W_alpha > 0.0) # cell_gap_matrix[ index_increasing_0 ] = numpy.float32(0.0) cell_gap_matrix[ index_increasing_1 ] = numpy.float32(0.0) # cell_t_after_decay = numpy.outer( self.cell_target, numpy.ones( (self.dim_process, ), dtype=dtype ) ) + cell_gap_matrix * numpy.exp( -numpy.outer( self.cell_decay, numpy.ones( (self.dim_process, ), dtype=dtype ) ) * ( time_current - time_recent ) ) hidden_t_after_decay = numpy.outer( self.gate_output, numpy.ones( (self.dim_process, ), dtype=dtype ) ) * numpy.tanh(cell_t_after_decay) # self.intensity_tilde_ub = numpy.sum( hidden_t_after_decay * self.W_alpha, axis=0 ) self.intensity_ub = self.soft_relu_scale( self.intensity_tilde_ub ) # # intensity computation is finished # # def sample_time_given_type(self, type_event): # type_event is the type of event for which we want to sample the time # it is the little k in our model formulation in paper time_current = numpy.float32(0.0) if len(self.one_seq) > 0: time_current = self.one_seq[-1]['time_since_start'] # #self.compute_intensity(time_current) self.compute_intensity_upper_bound(time_current) intensity_hazard = numpy.copy( self.intensity_ub[type_event] ) # u = 1.5 while u >= 1.0: #print("type is : ", type_event) E = numpy.random.exponential( scale=1.0, size=None ) U = numpy.random.uniform( low=0.0, high=1.0, size=None ) #print("E U time_current : ") #print(E, U, time_current) #print("intensity hazard is : ") #print(intensity_hazard) time_current += (E / intensity_hazard) self.compute_intensity_given_past(time_current) u = U * intensity_hazard / self.intensity[type_event] #print("new time_current and u : ") #print(time_current, u) #print("intensity and upper bound is : ") #print(self.intensity) #print(self.intensity_ub) # use adaptive thinning algorithm # that is, decreasing the upper bound # to make the sampling quicker # use adaptive method by # toggling on the following block ''' self.compute_intensity_upper_bound( time_current ) intensity_hazard = numpy.copy( self.intensity_ub[type_event] ) ''' return time_current # # # def sample_time_for_all_type(self): # type_event is the type of event for which we want to sample the time # it is the little k in our model formulation in paper time_current = numpy.float32(0.0) if len(self.one_seq) > 0: time_current = self.one_seq[-1]['time_since_start'] # #self.compute_intensity(time_current) self.compute_intensity_upper_bound(time_current) intensity_hazard = numpy.sum(self.intensity_ub) # u = 1.5 while u >= 1.0: #print("type is : ", type_event) E = numpy.random.exponential( scale=1.0, size=None ) U = numpy.random.uniform( low=0.0, high=1.0, size=None ) #print("E U time_current : ") #print(E, U, time_current) #print("intensity hazard is : ") #print(intensity_hazard) time_current += (E / intensity_hazard) self.compute_intensity_given_past(time_current) u = U * intensity_hazard / numpy.sum(self.intensity) #print("new time_current and u : ") #print(time_current, u) #print("intensity and upper bound is : ") #print(self.intensity) #print(self.intensity_ub) # use adaptive thinning algorithm # that is, decreasing the upper bound # to make the sampling quicker # use adaptive method by # toggling on the following block ''' self.compute_intensity_upper_bound( time_current ) intensity_hazard = numpy.sum(self.intensity_ub) ''' return time_current # # # def sample_one_event_sep(self): time_of_happen = numpy.zeros( (self.dim_process,), dtype=dtype ) for type_event in range(self.dim_process): # sample one event using "thinning algorithm" time_of_happen[type_event] = numpy.copy( self.sample_time_given_type( type_event ) ) # time_since_start_new = numpy.min(time_of_happen) type_event_new = numpy.argmin(time_of_happen) return time_since_start_new, type_event_new # # def sample_one_event_tog(self): time_since_start_new = self.sample_time_for_all_type() self.compute_intensity_given_past( time_since_start_new ) prob = self.intensity / numpy.sum(self.intensity) type_event_new = numpy.random.choice( range(self.dim_process), p = prob ) return time_since_start_new, numpy.int32(type_event_new) # # def sample_one_event(self): if self.sum_for_time: return self.sample_one_event_tog() else: return self.sample_one_event_sep() # # def gen_one_seq(self, max_len): self.restart_sequence() ''' Liiniger (2009), p. 28, describes a "thinning algorithm": generate one event of each type, take the minimum, and discard the others. Details found in NIPS 17 Appendix max_len is a pre-sampled value to set the length of seq ''' # initialize the seq time_since_start = numpy.float32(0.0) time_since_start_each_event = numpy.zeros( (self.dim_process,), dtype=dtype ) # for idx_event in range(max_len): # # compute the hidden states # of the most recent event in sequence self.compute_hidden_states() # time_since_start_new, type_event_new = self.sample_one_event() self.cnt_total_event += 1 # # update sequence time_since_last_event = time_since_start_new - time_since_start time_since_start = time_since_start_new time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new] time_since_start_each_event[type_event_new] = time_since_start self.one_seq.append( { 'idx_event': self.cnt_total_event, 'type_event': type_event_new, 'time_since_start': time_since_start, 'time_since_last_event': time_since_last_event, 'time_since_last_same_event': time_since_last_same_event } ) # # throw away the BOS item # at the head of the sequence self.one_seq.pop(0) # # # def gen_seqs(self, settings): #print(settings) print("generating sequences ... ") num_seqs = settings['num_seqs'] # self.list_seqs = [] cnt_seqs = 0 #for idx_seq in range(num_seqs): while cnt_seqs < num_seqs: # max_len = numpy.int32( round( numpy.random.uniform( low=settings['min_len'], high=settings['max_len'] ) ) ) # self.gen_one_seq(max_len) self.list_seqs.append(self.one_seq) cnt_seqs += 1 if cnt_seqs % 10 == 9: print("idx seq of gen : ", (cnt_seqs, self.name)) print("total number of seqs : ", num_seqs) # # def print_some(self): print("printing some seqs ... ") for idx_seq in range(10): print("the id of this seq is : ", idx_seq) seq = self.list_seqs[idx_seq] list_events = [] list_time = [] list_dtime = [] list_items = [] for event_item in seq: list_events.append(event_item['type_event']) list_time.append( round(event_item['time_since_start'], 4) ) list_dtime.append( round(event_item['time_since_last_event'], 4) ) list_items.append( ( event_item['type_event'], round( event_item['time_since_last_event'], 4 ) ) ) print("the events, time and diff time for : ", idx_seq) print(list_events) print(list_time) print(list_dtime) print("the list of items is : ") print(list_items) # # def save_seqs(self, file_save): with open(file_save, 'wb') as f: pickle.dump(self.list_seqs, f) # # # # # # # deprecated generators # TODO: modules below are deprecated # they are models that we tried over this project # most of them work, better than Hawkes baseline # but still
---------- resource_path : str Path to the method endpoint, relative to the base URL. method : str HTTP method verb to call. path_params : Union[Dict[str, Union[str, int]], List[Tuple]] Path parameters to pass in the URL. query_params : Union[Dict[str, Union[str, int]], List[Tuple]] Query parameters to pass in the URL. header_params : Union[Dict[str, Union[str, int]], List[Tuple]] Header parameters to place in the request header. body : DeserializedType Request body. post_params : List[Tuple] Request POST form parameters for ``application/x-www-form-urlencoded`` and ``multipart/form-data``. response_type : str, optional Expected response data type. files : Dict[str, str] Dictionary of the file name and path for ``multipart/form-data``. _return_http_data_only : bool, optional Whether to return response data without head status code and headers. The default is ``False``. collection_formats : Dict[str, str] Collection format name for path, query, header, and post parameters. This parameter maps the parameter name to the collection type. _preload_content : bool, optional Whether to return the underlying response without reading or decoding response data. The default is ``True``, in which case response data is read or decoded. If ``False``, response data is not read or decoded. _request_timeout : Union[float, Tuple[float]] Timeout setting for the request. If only one number is provided, it is used as a total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. This parameter overrides the session-level timeout setting. """ return self.__call_api( resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, _return_http_data_only, collection_formats, _preload_content, _request_timeout, ) def request( self, method: str, url: str, query_params: Optional[str] = None, headers: Optional[Dict] = None, post_params: Optional[List[Tuple]] = None, body: Optional[Any] = None, _preload_content: bool = True, _request_timeout: Union[float, Tuple[float], None] = None, ) -> requests.Response: """Make the HTTP request and return it directly. Parameters ---------- method : str HTTP method verb. url : str Absolute URL of the target endpoint, including any path and query parameters. query_params : str Query parameters to pass in the URL. headers : Dict Headers to attach to the request. post_params : List[Tuple] Request post form parameters for ``multipart/form-data``. body : SerializedType Request body. _preload_content : bool, optional Whether to return the underlying response without reading or decoding response data. The default is ``True``, in which case the response data is read or decoded. If ``False``, the response data is not read or decoded. _request_timeout : Union[float, Tuple[float]] Timeout setting for the request. If only one number is provided, it is used as a total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. This parameter overrides the session-level timeout setting. """ if method == "GET": return handle_response( self.rest_client.get( url, params=query_params, stream=_preload_content, timeout=_request_timeout, headers=headers, ) ) elif method == "HEAD": return handle_response( self.rest_client.head( url, params=query_params, stream=_preload_content, timeout=_request_timeout, headers=headers, ) ) elif method == "OPTIONS": return handle_response( self.rest_client.options( url, params=query_params, headers=headers, files=post_params, stream=_preload_content, timeout=_request_timeout, data=body, ) ) elif method == "POST": return handle_response( self.rest_client.post( url, params=query_params, headers=headers, files=post_params, stream=_preload_content, timeout=_request_timeout, data=body, ) ) elif method == "PUT": return handle_response( self.rest_client.put( url, params=query_params, headers=headers, files=post_params, stream=_preload_content, timeout=_request_timeout, data=body, ) ) elif method == "PATCH": return handle_response( self.rest_client.patch( url, params=query_params, headers=headers, files=post_params, stream=_preload_content, timeout=_request_timeout, data=body, ) ) elif method == "DELETE": return handle_response( self.rest_client.delete( url, params=query_params, headers=headers, stream=_preload_content, timeout=_request_timeout, data=body, ) ) else: raise ValueError( "http method must be `GET`, `HEAD`, `OPTIONS`," " `POST`, `PATCH`, `PUT`, or `DELETE`." ) @staticmethod def parameters_to_tuples( params: Union[Dict, List[Tuple]], collection_formats: Optional[Dict[str, str]] ) -> List[Tuple[Any, Any]]: """Get parameters as a list of tuples, formatting collections. Parameters ---------- params : Union[Dict, List[Tuple]] Parameters for the request, either a dictionary with a name and value or a list of tuples with names and values. collection_formats : Dict[str, str] Dictionary with a parameter name and collection type specifier. """ new_params: List[Tuple[Any, Any]] = [] if collection_formats is None: collection_formats = {} for k, v in params.items() if isinstance(params, dict) else params: if k in collection_formats: collection_format = collection_formats[k] if collection_format == "multi": new_params.extend((k, value) for value in v) else: if collection_format == "ssv": delimiter = " " elif collection_format == "tsv": delimiter = "\t" elif collection_format == "pipes": delimiter = "|" else: # csv is the default delimiter = "," new_params.append((k, delimiter.join(str(value) for value in v))) else: new_params.append((k, v)) return new_params @staticmethod def prepare_post_parameters( post_params: Optional[List[Tuple]] = None, files: Optional[Dict[str, Union[str, List[str]]]] = None, ) -> List[Tuple]: """Build form parameters. This method combines plain form parameters and file parameters into a structure suitable for transmission. Parameters ---------- post_params : List[Tuple] Plain form parameters. files : Dict[str, Union[str, List[str]]] File parameters. """ params = [] if post_params: params = post_params if files: for parameter, file_entry in files.items(): if not file_entry: continue file_names = ( file_entry if isinstance(file_entry, list) else [file_entry] ) for file_name in file_names: with open(file_name, "rb") as f: filename = os.path.basename(f.name) file_data = f.read() mimetype = ( mimetypes.guess_type(filename)[0] or "application/octet-stream" ) params.append((parameter, (filename, file_data, mimetype))) return params @staticmethod def select_header_accept(accepts: Optional[List[str]]) -> Optional[str]: """Return a correctly formatted ``Accept`` header value from the provided array of accepted content types. Parameters ---------- accepts : List[str], optional List of accepted content types. Examples -------- >>> ApiClient.select_header_accept(['Application/JSON', 'text/xml']) 'application/json, text/xml' """ if not accepts: return None accepts = [accept.lower() for accept in accepts] return ", ".join(accepts) @staticmethod def select_header_content_type(content_types: Optional[List[str]]) -> str: """Return the preferred ``Content-Type`` header value from the provided array of valid content types. Parameters ---------- content_types : List[str], optional List of content types. Notes ----- If more than one valid ``Content-Type`` is provided, the first one in the list is used. Examples -------- >>> ApiClient.select_header_content_type() 'application/json' >>> ApiClient.select_header_content_type(['text/xml', 'Application/JSON']) 'text/xml' >>> ApiClient.select_header_content_type(['*/*']) 'application/json' """ if not content_types: return "application/json" content_types = [content_type.lower() for content_type in content_types] if "application/json" in content_types or "*/*" in content_types: return "application/json" else: return content_types[0] def __deserialize_file(self, response: requests.Response) -> str: """Deserialize the body to a file. This method saves the response body in a file in a temporary folder, using the file name from the ``Content-Disposition`` header if provided. Parameters ---------- response : requests.Response The API response object to deserialize. """ fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path) os.close(fd) os.remove(path) if "Content-Disposition" in response.headers: filename_match = re.search( r'filename=[\'"]?([^\'"\s]+)[\'"]?', response.headers["Content-Disposition"], ) if filename_match is not None: filename = filename_match.group(1) path = os.path.join(os.path.dirname(path), filename) with open(path, "wb") as f: f.write(response.content) return path @staticmethod def __deserialize_primitive( data: PrimitiveType, klass: Callable[[PrimitiveType], PrimitiveType] ) -> PrimitiveType: """Deserialize to the primitive type. Parameters ---------- data : Union[str, int, float, bool, bytes] Data to deserialize into the primitive type. klass : Type Type of target object for deserialization. """ try: return klass(data) except UnicodeEncodeError: return str(data) except (ValueError, TypeError): return data @staticmethod def __deserialize_object(value: object) -> object: """Return an original value. Parameters ---------- value : object Generic object that does not match any specific deserialization strategy. """ return value @staticmethod def __deserialize_date(value: str) -> datetime.date: """Deserialize string to ``datetime.date``. Parameters ---------- value : str String representation of a date object in ISO 8601 format or otherwise. """ try: return parse(value).date() except ValueError: raise ApiException( status_code=0, reason_phrase=f"Failed to parse `{value}` as date object", ) @staticmethod def __deserialize_datetime(value: str) -> datetime.datetime: """Deserialize string to ``datetime.datetime``. Parameters ---------- value : str String representation of the ``datetime`` object in ISO 8601 format. """ try: return parse(value) except ValueError: raise ApiException( status_code=0, reason_phrase=f"Failed to parse `{value}` as datetime object", ) def __deserialize_model( self, data: Dict, klass: Type[ModelBase] ) -> Union[ModelBase, Dict]: """Deserialize ``dict`` to model. Given a model type and the serialized data, deserialize into an instance of the model class. Parameters ---------- data : Dict Serialized representation of the model object. klass : ModelType Type of the model to deserialize. """ kwargs = {} if klass.swagger_types is not None: for attr, attr_type in klass.swagger_types.items(): if ( data is not None and klass.attribute_map[attr] in data and isinstance(data, (list, dict)) ): value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) if (
self.testers = testers self.temp_dir = args.tempdir or tempfile.mkdtemp() self.debug = args.debug self.stop_on_error = args.stop_on_error self.gold_dirs = args.gold_dirs def run(self): failures = [] for producer, consumer in itertools.product( filter(lambda t: t.PRODUCER, self.testers), filter(lambda t: t.CONSUMER, self.testers)): for failure in self._compare_implementations( producer, consumer, self._produce_consume, self.json_files): failures.append(failure) if self.gold_dirs: for gold_dir, consumer in itertools.product( self.gold_dirs, filter(lambda t: t.CONSUMER, self.testers)): print('\n\n\n\n') print('******************************************************') print('Tests against golden files in {}'.format(gold_dir)) print('******************************************************') def run_gold(producer, consumer, test_case): self._run_gold(gold_dir, producer, consumer, test_case) for failure in self._compare_implementations( consumer, consumer, run_gold, self._gold_tests(gold_dir)): failures.append(failure) return failures def run_flight(self): failures = [] servers = filter(lambda t: t.FLIGHT_SERVER, self.testers) clients = filter(lambda t: (t.FLIGHT_CLIENT and t.CONSUMER), self.testers) for server, client in itertools.product(servers, clients): for failure in self._compare_flight_implementations(server, client): failures.append(failure) return failures def _gold_tests(self, gold_dir): prefix = os.path.basename(os.path.normpath(gold_dir)) SUFFIX = ".json.gz" golds = [jf for jf in os.listdir(gold_dir) if jf.endswith(SUFFIX)] for json_path in golds: name = json_path[json_path.index('_')+1: -len(SUFFIX)] base_name = prefix + "_" + name + ".gold.json" out_path = os.path.join(self.temp_dir, base_name) with gzip.open(os.path.join(gold_dir, json_path)) as i: with open(out_path, "wb") as out: out.write(i.read()) try: skip = next(f for f in self.json_files if f.name == name).skip except StopIteration: skip = set() yield JsonFile(name, None, None, skip=skip, path=out_path) def _compare_implementations( self, producer, consumer, run_binaries, test_cases): print('##########################################################') print( '{0} producing, {1} consuming'.format(producer.name, consumer.name) ) print('##########################################################') for test_case in test_cases: json_path = test_case.path print('==========================================================') print('Testing file {0}'.format(json_path)) print('==========================================================') if producer.name in test_case.skip: print('-- Skipping test because producer {0} does ' 'not support'.format(producer.name)) continue if consumer.name in test_case.skip: print('-- Skipping test because consumer {0} does ' 'not support'.format(consumer.name)) continue if SKIP_ARROW in test_case.skip: print('-- Skipping test') continue try: run_binaries(producer, consumer, test_case) except Exception: traceback.print_exc() yield (test_case, producer, consumer, sys.exc_info()) if self.stop_on_error: break else: continue def _produce_consume(self, producer, consumer, test_case): # Make the random access file json_path = test_case.path file_id = guid()[:8] name = os.path.splitext(os.path.basename(json_path))[0] producer_file_path = os.path.join(self.temp_dir, file_id + '_' + name + '.json_as_file') producer_stream_path = os.path.join(self.temp_dir, file_id + '_' + name + '.producer_file_as_stream') consumer_file_path = os.path.join(self.temp_dir, file_id + '_' + name + '.consumer_stream_as_file') print('-- Creating binary inputs') producer.json_to_file(json_path, producer_file_path) # Validate the file print('-- Validating file') consumer.validate(json_path, producer_file_path) print('-- Validating stream') producer.file_to_stream(producer_file_path, producer_stream_path) consumer.stream_to_file(producer_stream_path, consumer_file_path) consumer.validate(json_path, consumer_file_path) def _run_gold(self, gold_dir, producer, consumer, test_case): json_path = test_case.path # Validate the file print('-- Validating file') producer_file_path = os.path.join( gold_dir, "generated_" + test_case.name + ".arrow_file") consumer.validate(json_path, producer_file_path) print('-- Validating stream') consumer_stream_path = os.path.join( gold_dir, "generated_" + test_case.name + ".stream") file_id = guid()[:8] name = os.path.splitext(os.path.basename(json_path))[0] consumer_file_path = os.path.join(self.temp_dir, file_id + '_' + name + '.consumer_stream_as_file') consumer.stream_to_file(consumer_stream_path, consumer_file_path) consumer.validate(json_path, consumer_file_path) def _compare_flight_implementations(self, producer, consumer): print('##########################################################') print( '{0} serving, {1} requesting'.format(producer.name, consumer.name) ) print('##########################################################') for test_case in self.json_files: json_path = test_case.path print('=' * 58) print('Testing file {0}'.format(json_path)) print('=' * 58) if ('Java' in (producer.name, consumer.name) and "map" in test_case.name): print('TODO(ARROW-1279): Enable map tests ' + ' for Java and JS once Java supports them and JS\'' + ' are unbroken') continue if SKIP_FLIGHT in test_case.skip: print('-- Skipping test') continue try: with producer.flight_server(): # Have the client upload the file, then download and # compare consumer.flight_request(producer.FLIGHT_PORT, json_path) except Exception: traceback.print_exc() yield (test_case, producer, consumer, sys.exc_info()) continue class Tester(object): PRODUCER = False CONSUMER = False FLIGHT_SERVER = False FLIGHT_CLIENT = False FLIGHT_PORT = 31337 def __init__(self, args): self.args = args self.debug = args.debug def run_shell_command(self, cmd): cmd = ' '.join(cmd) if self.debug: print(cmd) subprocess.check_call(cmd, shell=True) def json_to_file(self, json_path, arrow_path): raise NotImplementedError def stream_to_file(self, stream_path, file_path): raise NotImplementedError def file_to_stream(self, file_path, stream_path): raise NotImplementedError def validate(self, json_path, arrow_path): raise NotImplementedError def flight_server(self): raise NotImplementedError def flight_request(self, port, json_path): raise NotImplementedError class JavaTester(Tester): PRODUCER = True CONSUMER = True FLIGHT_SERVER = True FLIGHT_CLIENT = True FLIGHT_PORT = 31338 JAVA_OPTS = ['-Dio.netty.tryReflectionSetAccessible=true'] _arrow_version = load_version_from_pom() ARROW_TOOLS_JAR = os.environ.get( 'ARROW_JAVA_INTEGRATION_JAR', os.path.join(ARROW_HOME, 'java/tools/target/arrow-tools-{}-' 'jar-with-dependencies.jar'.format(_arrow_version))) ARROW_FLIGHT_JAR = os.environ.get( 'ARROW_FLIGHT_JAVA_INTEGRATION_JAR', os.path.join(ARROW_HOME, 'java/flight/target/arrow-flight-{}-' 'jar-with-dependencies.jar'.format(_arrow_version))) ARROW_FLIGHT_SERVER = ('org.apache.arrow.flight.example.integration.' 'IntegrationTestServer') ARROW_FLIGHT_CLIENT = ('org.apache.arrow.flight.example.integration.' 'IntegrationTestClient') name = 'Java' def _run(self, arrow_path=None, json_path=None, command='VALIDATE'): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_TOOLS_JAR, 'org.apache.arrow.tools.Integration'] if arrow_path is not None: cmd.extend(['-a', arrow_path]) if json_path is not None: cmd.extend(['-j', json_path]) cmd.extend(['-c', command]) if self.debug: print(' '.join(cmd)) run_cmd(cmd) def validate(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'VALIDATE') def json_to_file(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'JSON_TO_ARROW') def stream_to_file(self, stream_path, file_path): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_TOOLS_JAR, 'org.apache.arrow.tools.StreamToFile', stream_path, file_path] if self.debug: print(' '.join(cmd)) run_cmd(cmd) def file_to_stream(self, file_path, stream_path): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_TOOLS_JAR, 'org.apache.arrow.tools.FileToStream', file_path, stream_path] if self.debug: print(' '.join(cmd)) run_cmd(cmd) def flight_request(self, port, json_path): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_FLIGHT_JAR, self.ARROW_FLIGHT_CLIENT, '-port', str(port), '-j', json_path] if self.debug: print(' '.join(cmd)) run_cmd(cmd) @contextlib.contextmanager def flight_server(self): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_FLIGHT_JAR, self.ARROW_FLIGHT_SERVER, '-port', str(self.FLIGHT_PORT)] if self.debug: print(' '.join(cmd)) server = subprocess.Popen(cmd, stdout=subprocess.PIPE) try: output = server.stdout.readline().decode() if not output.startswith("Server listening on localhost"): raise RuntimeError( "Flight-Java server did not start properly, output: " + output) yield finally: server.kill() server.wait(5) class CPPTester(Tester): PRODUCER = True CONSUMER = True FLIGHT_SERVER = True FLIGHT_CLIENT = True EXE_PATH = os.environ.get( 'ARROW_CPP_EXE_PATH', os.path.join(ARROW_HOME, 'cpp/build/debug')) CPP_INTEGRATION_EXE = os.path.join(EXE_PATH, 'arrow-json-integration-test') STREAM_TO_FILE = os.path.join(EXE_PATH, 'arrow-stream-to-file') FILE_TO_STREAM = os.path.join(EXE_PATH, 'arrow-file-to-stream') FLIGHT_PORT = 31337 FLIGHT_SERVER_CMD = [ os.path.join(EXE_PATH, 'flight-test-integration-server'), "-port", str(FLIGHT_PORT)] FLIGHT_CLIENT_CMD = [ os.path.join(EXE_PATH, 'flight-test-integration-client'), "-host", "localhost"] name = 'C++' def _run(self, arrow_path=None, json_path=None, command='VALIDATE'): cmd = [self.CPP_INTEGRATION_EXE, '--integration'] if arrow_path is not None: cmd.append('--arrow=' + arrow_path) if json_path is not None: cmd.append('--json=' + json_path) cmd.append('--mode=' + command) if self.debug: print(' '.join(cmd)) run_cmd(cmd) def validate(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'VALIDATE') def json_to_file(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'JSON_TO_ARROW') def stream_to_file(self, stream_path, file_path): cmd = ['cat', stream_path, '|', self.STREAM_TO_FILE, '>', file_path] self.run_shell_command(cmd) def file_to_stream(self, file_path, stream_path): cmd = [self.FILE_TO_STREAM, file_path, '>', stream_path] self.run_shell_command(cmd) @contextlib.contextmanager def flight_server(self): if self.debug: print(' '.join(self.FLIGHT_SERVER_CMD)) server = subprocess.Popen(self.FLIGHT_SERVER_CMD, stdout=subprocess.PIPE) try: output = server.stdout.readline().decode() if not output.startswith("Server listening on localhost"): raise RuntimeError( "Flight-C++ server did not start properly, output: " + output) yield finally: server.kill() server.wait(5) def flight_request(self, port, json_path): cmd = self.FLIGHT_CLIENT_CMD + [ '-port=' + str(port), '-path=' + json_path, ] if self.debug: print(' '.join(cmd)) run_cmd(cmd) class JSTester(Tester): PRODUCER = True CONSUMER = True EXE_PATH = os.path.join(ARROW_HOME, 'js/bin') VALIDATE = os.path.join(EXE_PATH, 'integration.js') JSON_TO_ARROW = os.path.join(EXE_PATH, 'json-to-arrow.js') STREAM_TO_FILE = os.path.join(EXE_PATH, 'stream-to-file.js') FILE_TO_STREAM = os.path.join(EXE_PATH, 'file-to-stream.js') name = 'JS' def _run(self, exe_cmd, arrow_path=None, json_path=None, command='VALIDATE'): cmd = [exe_cmd] if arrow_path is not None: cmd.extend(['-a', arrow_path]) if json_path is not None: cmd.extend(['-j', json_path]) cmd.extend(['--mode', command]) if self.debug: print(' '.join(cmd)) run_cmd(cmd) def validate(self, json_path, arrow_path): return self._run(self.VALIDATE, arrow_path, json_path, 'VALIDATE') def json_to_file(self, json_path, arrow_path): cmd = ['node', '--no-warnings', self.JSON_TO_ARROW, '-a', arrow_path, '-j', json_path] self.run_shell_command(cmd) def stream_to_file(self, stream_path, file_path): cmd = ['cat', stream_path, '|', 'node', '--no-warnings', self.STREAM_TO_FILE, '>', file_path] self.run_shell_command(cmd) def file_to_stream(self, file_path, stream_path): cmd = ['cat', file_path, '|', 'node', '--no-warnings', self.FILE_TO_STREAM, '>', stream_path] self.run_shell_command(cmd) class GoTester(Tester): PRODUCER = True CONSUMER = True # FIXME(sbinet): revisit for Go modules GOPATH = os.getenv('GOPATH', '~/go') GOBIN = os.environ.get('GOBIN', os.path.join(GOPATH, 'bin')) GO_INTEGRATION_EXE = os.path.join(GOBIN, 'arrow-json-integration-test') STREAM_TO_FILE = os.path.join(GOBIN, 'arrow-stream-to-file') FILE_TO_STREAM = os.path.join(GOBIN, 'arrow-file-to-stream') name = 'Go' def _run(self, arrow_path=None, json_path=None, command='VALIDATE'): cmd = [self.GO_INTEGRATION_EXE] if arrow_path is not None: cmd.extend(['-arrow', arrow_path]) if json_path is not None: cmd.extend(['-json', json_path]) cmd.extend(['-mode', command]) if self.debug: print(' '.join(cmd)) run_cmd(cmd) def validate(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'VALIDATE') def json_to_file(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'JSON_TO_ARROW') def stream_to_file(self, stream_path, file_path): cmd = ['cat', stream_path, '|', self.STREAM_TO_FILE, '>', file_path] self.run_shell_command(cmd) def file_to_stream(self, file_path, stream_path): cmd = [self.FILE_TO_STREAM, file_path, '>', stream_path] self.run_shell_command(cmd) def get_static_json_files(): glob_pattern = os.path.join(ARROW_HOME, 'integration', 'data', '*.json') return [JsonFile(name=os.path.basename(p), path=p, skip=set(), schema=None, batches=None) for p in glob.glob(glob_pattern)] def run_all_tests(args): testers = [] if args.enable_cpp: testers.append(CPPTester(args)) if args.enable_java: testers.append(JavaTester(args)) if args.enable_js: testers.append(JSTester(args)) if args.enable_go: testers.append(GoTester(args)) static_json_files = get_static_json_files() generated_json_files = get_generated_json_files(tempdir=args.tempdir, flight=args.run_flight) json_files = static_json_files + generated_json_files runner = IntegrationRunner(json_files, testers, args) failures = [] failures.extend(runner.run()) if args.run_flight: failures.extend(runner.run_flight()) fail_count = 0 if failures: print("################# FAILURES #################") for test_case, producer, consumer, exc_info in failures: fail_count += 1 print("FAILED TEST:", end=" ") print(test_case.name, producer.name, "producing, ", consumer.name, "consuming") if exc_info: traceback.print_exception(*exc_info) print() print(fail_count, "failures") if fail_count > 0:
#************************************************************** # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # #************************************************************** import pyuno import uno import unittest import exceptions import types def suite(ctx): suite = unittest.TestSuite() suite.addTest(TestCase("testErrors",ctx)) suite.addTest(TestCase("testBaseTypes",ctx)) suite.addTest(TestCase("testOutparam",ctx)) suite.addTest(TestCase("testStruct",ctx)) suite.addTest(TestCase("testType",ctx)) suite.addTest(TestCase("testEnum",ctx)) suite.addTest(TestCase("testBool",ctx)) suite.addTest(TestCase("testChar",ctx)) suite.addTest(TestCase("testUnicode",ctx)) suite.addTest(TestCase("testConstant",ctx)) suite.addTest(TestCase("testExceptions",ctx)) suite.addTest(TestCase("testInterface",ctx)) suite.addTest(TestCase("testByteSequence",ctx)) suite.addTest(TestCase("testInvoke",ctx)) return suite def equalsEps( a,b,eps ): if a - eps <= b and a+eps >= b: return 1 return 0 def assign( rData, bBool, cChar, nByte, nShort, nUShort, nLong, nULong, nHyper,\ nUHyper, fFloat, fDouble, eEnum, rStr, xTest, rAny ): rData.Bool = bBool; rData.Char = cChar; rData.Byte = nByte; rData.Short = nShort; rData.UShort = nUShort; rData.Long = nLong; rData.ULong = nULong; rData.Hyper = nHyper; rData.UHyper = nUHyper; rData.Float = fFloat; rData.Double = fDouble; rData.Enum = eEnum; rData.String = rStr; rData.Interface = xTest; rData.Any = rAny; class PythonTransporter: def __init__( self ): pass def transportAny( self, arg ): return arg class TestCase( unittest.TestCase): def __init__(self,method,ctx): unittest.TestCase.__init__(self,method) self.ctx = ctx def setUp(self): # the testcomponent from the testtools project self.tobj = self.ctx.ServiceManager.createInstanceWithContext( 'com.sun.star.test.bridge.CppTestObject' , self.ctx ) self.tobj.Bool = 1 self.tobj.Char = 'h' self.tobj.Byte = 43 self.tobj.Short = -42 self.tobj.UShort = 44 self.tobj.Long = 42 self.tobj.ULong = 41 self.tobj.Hyper = 46 self.tobj.UHyper = 47 self.tobj.Float = 4.3 self.tobj.Double = 4.2 self.tobj.Enum = 4 self.tobj.String = "yabadabadoo" self.tobj.Interface = self.ctx self.tobj.Any = self.tobj.String mystruct = uno.createUnoStruct( "test.testtools.bridgetest.TestData" ) assign( mystruct, 1, 'h', 43, -42,44,42,41,46,47,4.3,4.2,4,"yabadabadoo",self.ctx,"yabadabadoo") self.tobj.Struct = mystruct self.testElement = uno.createUnoStruct( "test.testtools.bridgetest.TestElement" ) self.testElement.String = "foo" self.testElement2 = uno.createUnoStruct( "test.testtools.bridgetest.TestElement" ) self.testElement2.String = "42" self.tobj.Sequence = (self.testElement,self.testElement2) def testBaseTypes(self): self.failUnless( 42 == self.tobj.Long , "Long attribute" ) self.failUnless( 41 == self.tobj.ULong , "ULong attribute" ) self.failUnless( 43 == self.tobj.Byte , "Byte attribute" ) self.failUnless( 44 == self.tobj.UShort , "UShort attribute" ) self.failUnless( -42 == self.tobj.Short , "Short attribute" ) self.failUnless( 46 == self.tobj.Hyper , "Hyper attribute" ) self.failUnless( 47 == self.tobj.UHyper , "UHyper attribute" ) self.failUnless( self.tobj.Bool , "Bool attribute2" ) self.failUnless( "yabadabadoo" == self.tobj.String , "String attribute" ) self.failUnless( self.tobj.Sequence[0] == self.testElement , "Sequence test") self.failUnless( self.tobj.Sequence[1] == self.testElement2 , "Sequence2 test") self.failUnless( equalsEps( 4.3,self.tobj.Float,0.0001) , "float test" ) self.failUnless( 4.2 == self.tobj.Double , "double test" ) self.failUnless( self.ctx == self.tobj.Interface , "object identity test with C++ object" ) self.failUnless( not self.ctx == self.tobj , "object not identical test " ) self.failUnless( 42 == self.tobj.transportAny( 42 ), "transportAny long" ) self.failUnless( "woo, this is python" == self.tobj.transportAny( "woo, this is python" ), \ "string roundtrip via any test" ) def testEnum( self ): e1 = uno.Enum( "com.sun.star.uno.TypeClass" , "LONG" ) e2 = uno.Enum( "com.sun.star.uno.TypeClass" , "LONG" ) e3 = uno.Enum( "com.sun.star.uno.TypeClass" , "UNSIGNED_LONG" ) e4 = uno.Enum( "test.testtools.bridgetest.TestEnum" , "TWO" ) self.failUnless( e1 == e2 , "equal enum test" ) self.failUnless( not (e1 == e3) , "different enums test" ) self.failUnless( self.tobj.transportAny( e3 ) == e3, "enum roundtrip test" ) self.tobj.Enum = e4 self.failUnless( e4 == self.tobj.Enum , "enum assignment failed" ) def testType(self ): t1 = uno.getTypeByName( "com.sun.star.lang.XComponent" ) t2 = uno.getTypeByName( "com.sun.star.lang.XComponent" ) t3 = uno.getTypeByName( "com.sun.star.lang.EventObject" ) self.failUnless( t1.typeClass == \ uno.Enum( "com.sun.star.uno.TypeClass", "INTERFACE" ), "typeclass of type test" ) self.failUnless( t3.typeClass == \ uno.Enum( "com.sun.star.uno.TypeClass", "STRUCT" ), "typeclass of type test") self.failUnless( t1 == t2 , "equal type test" ) self.failUnless( t1 == t2 , "equal type test" ) self.failUnless( t1 == self.tobj.transportAny( t1 ), "type rountrip test" ) def testBool( self ): self.failUnless( uno.Bool(1) , "uno.Bool true test" ) self.failUnless( not uno.Bool(0) , "uno.Bool false test" ) self.failUnless( uno.Bool( "true") , "uno.Bool true1 test" ) self.failUnless( not uno.Bool( "false") , "uno.Bool true1 test" ) self.tobj.Bool = uno.Bool(1) self.failUnless( self.tobj.Bool , "bool true attribute test" ) self.tobj.Bool = uno.Bool(0) self.failUnless( not self.tobj.Bool , "bool true attribute test" ) # new boolean semantic self.failUnless( id( self.tobj.transportAny( True ) ) == id(True) , "boolean preserve test") self.failUnless( id( self.tobj.transportAny( False ) ) == id(False) , "boolean preserve test" ) self.failUnless( id( self.tobj.transportAny(1) ) != id( True ), "boolean preserve test" ) self.failUnless( id( self.tobj.transportAny(0) ) != id( False ), "boolean preserve test" ) def testChar( self ): self.tobj.Char = uno.Char( u'h' ) self.failUnless( self.tobj.Char == uno.Char( u'h' ), "char type test" ) self.failUnless( isinstance( self.tobj.transportAny( uno.Char(u'h') ),uno.Char),"char preserve test" ) def testStruct( self ): mystruct = uno.createUnoStruct( "test.testtools.bridgetest.TestData" ) assign( mystruct, 1, 'h', 43, -42,44,42,41,46,47,4.3,4.2,4,"yabadabadoo",self.ctx,"yabadabadoo") self.tobj.Struct = mystruct aSecondStruct = self.tobj.Struct self.failUnless( self.tobj.Struct == mystruct, "struct roundtrip for equality test" ) self.failUnless( aSecondStruct == mystruct, "struct roundtrip for equality test2" ) aSecondStruct.Short = 720 self.failUnless( not aSecondStruct == mystruct , "different structs equality test" ) self.failUnless( not self.ctx == mystruct , "object is not equal to struct test" ) self.failUnless( mystruct == self.tobj.transportAny( mystruct ), "struct roundtrip with any test" ) my2ndstruct = uno.createUnoStruct( "test.testtools.bridgetest.TestData", \ 1, 'h', 43, -42,44,42,41,46,47,4.3,4.2,4,"yabadabadoo",self.ctx,"yabadabadoo",()) self.failUnless( my2ndstruct == mystruct, "struct non-default ctor test" ) def testUnicode( self ): uni = u'\0148' self.tobj.String = uni self.failUnless( uni == self.tobj.String ) self.tobj.String = u'dubidu' self.failUnless( u'dubidu' == self.tobj.String , "unicode comparison test") self.failUnless( 'dubidu' == self.tobj.String , "unicode vs. string comparison test" ) def testConstant( self ): self.failUnless( uno.getConstantByName( "com.sun.star.beans.PropertyConcept.ATTRIBUTES" ) == 4,\ "constant retrieval test" ) def testExceptions( self ): unoExc = uno.getClass( "com.sun.star.uno.Exception" ) ioExc = uno.getClass( "com.sun.star.io.IOException" ) dispExc = uno.getClass( "com.sun.star.lang.DisposedException" ) wasHere = 0 try: raise ioExc( "huhuh" , self.tobj ) except unoExc as instance: wasHere = 1 self.failUnless( wasHere , "exceptiont test 1" ) wasHere = 0 try: raise ioExc except ioExc: wasHere = 1 else: self.failUnless( wasHere, "exception test 2" ) wasHere = 0 try: raise dispExc except ioExc: pass except unoExc: wasHere = 1 self.failUnless(wasHere, "exception test 3") illegalArg = uno.getClass( "com.sun.star.lang.IllegalArgumentException" ) wasHere = 0 try: self.tobj.raiseException( 1 , "foo" , self.tobj ) self.failUnless( 0 , "exception test 5a" ) except ioExc: self.failUnless( 0 , "exception test 5b" ) except illegalArg as i: self.failUnless( 1 == i.ArgumentPosition , "exception member test" ) self.failUnless( "foo" == i.Message , "exception member test 2 " ) wasHere = 1 else: self.failUnless( 0, "except test 5c" ) self.failUnless( wasHere, "illegal argument exception test failed" ) def testInterface(self): clazz = uno.getClass( "com.sun.star.lang.XComponent" ) self.failUnless( "com.sun.star.lang.XComponent" == clazz.__pyunointerface__ ) self.failUnless( issubclass( clazz, uno.getClass( "com.sun.star.uno.XInterface" ) ) ) self.tobj.Interface = None def testOutparam( self): # outparameter struct, mybool,mychar,mybyte,myshort,myushort,mylong,myulong,myhyper,myuhyper,myfloat, \ mydouble,myenum,mystring,myinterface,myany,myseq,my2ndstruct = self.tobj.getValues( \ None,None,None,None,None,None,None,None,None,None, \ None,None,None,None,None,None,None) self.failUnless(struct == self.tobj.Struct, "outparam 1 test") self.failUnless(self.tobj.Bool, "outparam 2 test") self.failUnless(mychar == self.tobj.Char, "outparam 3 test") self.failUnless(mybyte == self.tobj.Byte, "outparam 4 test") self.failUnless(myshort == self.tobj.Short, "outparam 5 test") self.failUnless(myushort == self.tobj.UShort, "outparam 6 test") self.failUnless(mylong == self.tobj.Long, "outparam 7 test") self.failUnless(myulong == self.tobj.ULong, "outparam 8 test") self.failUnless(myhyper == self.tobj.Hyper, "outparam 9 test") self.failUnless(myuhyper == self.tobj.UHyper, "outparam 10 test") self.failUnless(myfloat == self.tobj.Float, "outparam 11 test") self.failUnless(mydouble == self.tobj.Double, "outparam 12 test") self.failUnless(myenum == self.tobj.Enum, "outparam 13 test") self.failUnless(mystring == self.tobj.String, "outparam 14 test") self.failUnless(myinterface == self.tobj.Interface, "outparam 15 test") self.failUnless(myany == self.tobj.Any, "outparam 16 test") self.failUnless(myseq == self.tobj.Sequence, "outparam 17 test") self.failUnless(my2ndstruct == struct, "outparam 18 test") # should work, debug on windows, why not # struct, mybool,mychar,mybyte,myshort,myushort,mylong,myulong,myhyper,myuhyper,myfloat,\ # mydouble,myenum,mystring,myinterface,myany,myseq,my2ndstruct = self.tobj.setValues2( \ # mybool,mychar,mybyte,myshort,myushort,mylong,myulong,myhyper,myuhyper,myfloat,\ # mydouble,myenum,mystring,myinterface,myany,myseq,my2ndstruct) # self.failUnless(struct == self.tobj.Struct, "outparam 1 test") # self.failUnless( mybool and self.tobj.Bool, "outparam 2 test") # self.failUnless(mychar == self.tobj.Char, "outparam 3 test") # self.failUnless(mybyte == self.tobj.Byte, "outparam 4 test") # self.failUnless(myshort == self.tobj.Short, "outparam 5 test") # self.failUnless(myushort ==
is a command. cmd = msg.startswith(glob.config.command_prefix) \ and await commands.process_commands(p, t, msg) if cmd and 'resp' in cmd: # Command triggered and there is a response to send. p.enqueue(await packets.sendMessage(t.name, cmd['resp'], client, t.id)) else: # No command triggered. if match := regexes.now_playing.match(msg): # User is /np'ing a map. # Save it to their player instance # so we can use this elsewhere owo.. p.last_np = await Beatmap.from_bid(int(match['bid']), cache_pp=True) # Since this is a DM to the bot, we should # send back a list of general PP values. # TODO: !acc and !mods in commands to # modify these values :P msg = 'PP Values: ' + ' | '.join( f'{acc}%: {pp:.2f}pp' for acc, pp in zip( (90, 95, 98, 99, 100), p.last_np.pp_values )) if p.last_np else 'Could not find map.' p.enqueue(await packets.sendMessage(t.name, msg, client, t.id)) else: # Not Aika t.enqueue(await packets.sendMessage(client, msg, target, client_id)) await plog(f'{p} @ {t}: {msg}', Ansi.CYAN, fd = 'logs/chat.log') # PacketID: 29 @bancho_packet(Packet.c_partLobby) async def lobbyPart(p: Player, pr: PacketReader) -> None: p.in_lobby = False # PacketID: 30 @bancho_packet(Packet.c_joinLobby) async def lobbyJoin(p: Player, pr: PacketReader) -> None: p.in_lobby = True for m in filter(lambda m: m is not None, glob.matches): p.enqueue(await packets.newMatch(m)) # PacketID: 31 @bancho_packet(Packet.c_createMatch) async def matchCreate(p: Player, pr: PacketReader) -> None: m, = await pr.read(osuTypes.match) m.host = p await p.join_match(m, m.passwd) await plog(f'{p} created a new multiplayer match.') # PacketID: 32 @bancho_packet(Packet.c_joinMatch) async def matchJoin(p: Player, pr: PacketReader) -> None: m_id, passwd = await pr.read(osuTypes.i32, osuTypes.string) if m_id not in range(64): return if not (m := glob.matches.get_by_id(m_id)): await plog(f'{p} tried to join a non-existant mp lobby?') return await p.join_match(m, passwd) # PacketID: 33 @bancho_packet(Packet.c_partMatch) async def matchPart(p: Player, pr: PacketReader) -> None: await p.leave_match() # PacketID: 38 @bancho_packet(Packet.c_matchChangeSlot) async def matchChangeSlot(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} tried changing slot outside of a match?') return # Read new slot ID slot_id, = await pr.read(osuTypes.i32) if slot_id not in range(16): return if m.slots[slot_id].status & SlotStatus.has_player: await plog(f'{p} tried to switch to slot {slot_id} which has a player.') return # Swap with current slot. s = m.get_slot(p) m.slots[slot_id].copy(s) s.reset() m.enqueue(await packets.updateMatch(m)) # PacketID: 39 @bancho_packet(Packet.c_matchReady) async def matchReady(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} tried readying outside of a match? (1)') return m.get_slot(p).status = SlotStatus.ready m.enqueue(await packets.updateMatch(m)) # PacketID: 40 @bancho_packet(Packet.c_matchLock) async def matchLock(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} tried locking a slot outside of a match?') return # Read new slot ID slot_id, = await pr.read(osuTypes.i32) if slot_id not in range(16): return slot = m.slots[slot_id] if slot.status & SlotStatus.locked: slot.status = SlotStatus.open else: if slot.player: slot.reset() slot.status = SlotStatus.locked m.enqueue(await packets.updateMatch(m)) # PacketID: 41 @bancho_packet(Packet.c_matchChangeSettings) async def matchChangeSettings(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} tried changing multi settings outside of a match?') return # Read new match data new, = await pr.read(osuTypes.match) if new.freemods != m.freemods: # Freemods status has been changed. if new.freemods: # Switching to freemods. # Central mods -> all players mods. for s in m.slots: if s.status & SlotStatus.has_player: s.mods = m.mods & ~Mods.SPEED_CHANGING m.mods = m.mods & Mods.SPEED_CHANGING else: # Switching to centralized mods. # Host mods -> Central mods. for s in m.slots: if s.player and s.player.id == m.host.id: m.mods = s.mods | (m.mods & Mods.SPEED_CHANGING) break if not new.bmap: # Map being changed, unready players. for s in m.slots: if s.status & SlotStatus.ready: s.status = SlotStatus.not_ready elif not m.bmap: # New map has been chosen, send to match chat. await m.chat.send(glob.bot, f'Map selected: {new.bmap.embed}.') # Copy basic match info into our match. m.bmap = new.bmap m.freemods = new.freemods m.game_mode = new.game_mode m.team_type = new.team_type m.match_scoring = new.match_scoring m.name = new.name m.enqueue(await packets.updateMatch(m)) # PacketID: 44 @bancho_packet(Packet.c_matchStart) async def matchStart(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} tried starting match outside of a match?') return for s in m.slots: if s.status & SlotStatus.ready: s.status = SlotStatus.playing m.in_progress = True m.enqueue(await packets.matchStart(m)) # PacketID: 48 @bancho_packet(Packet.c_matchScoreUpdate) async def matchScoreUpdate(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} sent a scoreframe outside of a match?') return # Read 37 bytes if using scorev2, # otherwise only read 29 bytes. size = 37 if pr.data[28] else 29 data = pr.data[:size] data[4] = m.get_slot_id(p) m.enqueue(b'0\x00\x00' + size.to_bytes(4, 'little') + data, lobby = False) pr.ignore(size) # PacketID: 49 @bancho_packet(Packet.c_matchComplete) async def matchComplete(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} sent a scoreframe outside of a match?') return m.get_slot(p).status = SlotStatus.complete all_completed = True for s in m.slots: if s.status & SlotStatus.playing: all_completed = False break if all_completed: m.in_progress = False m.enqueue(await packets.matchComplete()) for s in m.slots: # Reset match statuses if s.status == SlotStatus.complete: s.status = SlotStatus.not_ready # PacketID: 51 @bancho_packet(Packet.c_matchChangeMods) async def matchChangeMods(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} tried changing multi mods outside of a match?') return mods, = await pr.read(osuTypes.i32) if m.freemods: if p.id == m.host.id: # Allow host to change speed-changing mods. m.mods = mods & Mods.SPEED_CHANGING # Set slot mods m.get_slot(p).mods = mods & ~Mods.SPEED_CHANGING else: # Not freemods, set match mods. m.mods = mods m.enqueue(await packets.updateMatch(m)) # PacketID: 52 @bancho_packet(Packet.c_matchLoadComplete) async def matchLoadComplete(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} sent a scoreframe outside of a match?') return # Ready up our player. m.get_slot(p).loaded = True # Check if all players are ready. if not any(s.status & SlotStatus.playing and not s.loaded for s in m.slots): m.enqueue(await packets.matchAllPlayerLoaded(), lobby = False) # PacketID: 54 @bancho_packet(Packet.c_matchNoBeatmap) async def matchNoBeatmap(p: Player, pr: PacketReader) -> None: if not (m := p.match): return m.get_slot(p).status = SlotStatus.no_map m.enqueue(await packets.updateMatch(m)) # PacketID: 55 @bancho_packet(Packet.c_matchNotReady) async def matchNotReady(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} tried unreadying outside of a match? (1)') return m.get_slot(p).status = SlotStatus.not_ready m.enqueue(await packets.updateMatch(m), lobby = False) # PacketID: 56 @bancho_packet(Packet.c_matchFailed) async def matchFailed(p: Player, pr: PacketReader) -> None: if not (m := p.match): return m.enqueue(await packets.matchPlayerFailed(m.get_slot_id(p))) # PacketID: 59 @bancho_packet(Packet.c_matchHasBeatmap) async def matchHasBeatmap(p: Player, pr: PacketReader) -> None: if not (m := p.match): return m.get_slot(p).status = SlotStatus.not_ready m.enqueue(await packets.updateMatch(m)) # PacketID: 60 @bancho_packet(Packet.c_matchSkipRequest) async def matchSkipRequest(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} tried unreadying outside of a match? (1)') return m.get_slot(p).skipped = True m.enqueue(await packets.matchPlayerSkipped(p.id)) for s in m.slots: if s.status & SlotStatus.playing and not s.skipped: return # All users have skipped, enqueue a skip. m.enqueue(await packets.matchSkip(), lobby = False) # PacketID: 63 @bancho_packet(Packet.c_channelJoin) async def channelJoin(p: Player, pr: PacketReader) -> None: chan_name, = await pr.read(osuTypes.string) c = glob.channels.get(chan_name) if not c or not await p.join_channel(c): await plog(f'{p} failed to join {chan_name}.', Ansi.YELLOW) return # Enqueue new channelinfo (playercount) to a ll players. #glob.players.enqueue(await packets.channelInfo(*c.basic_info)) # Enqueue channelJoin to our player. p.enqueue(await packets.channelJoin(c.name)) # PacketID: 68 @bancho_packet(Packet.c_beatmapInfoRequest) async def beatmapInfoRequest(p: Player, pr: PacketReader) -> None: req: BeatmapInfoRequest req, = await pr.read(osuTypes.mapInfoRequest) info_list = [] # Filenames for fname in req.filenames: # Attempt to regex pattern match the filename. # If there is no match, simply ignore this map. # XXX: Sometimes a map will be requested without a # diff name, not really sure how to handle this? lol if not (r := regexes.mapfile.match(fname)): continue res = await glob.db.fetch( 'SELECT id, set_id, status, md5 ' 'FROM maps WHERE artist = %s AND ' 'title = %s AND creator = %s AND ' 'version = %s', [ r['artist'], r['title'], r['creator'], r['version'] ] ) if not res: continue to_osuapi_status = lambda s: { 0: 0, 2: 1, 3: 2, 4: 3, 5: 4 }[s] info_list.append(BeatmapInfo( 0, res['id'], res['set_id'], 0, to_osuapi_status(res['status']), # TODO: best grade letter rank # the order of these doesn't follow # gamemode ids in osu! either. # (std, ctb, taiko, mania) Rank.N, Rank.N, Rank.N, Rank.N, res['md5'] )) # Ids for m in req.ids: breakpoint() p.enqueue(await packets.beatmapInfoReply(info_list)) # PacketID: 70 @bancho_packet(Packet.c_matchTransferHost) async def matchTransferHost(p: Player, pr: PacketReader) -> None: if not (m := p.match): await plog(f'{p} tried transferring host of a match? (1)') return # Read new slot ID slot_id, = await pr.read(osuTypes.i32) if slot_id
# -*- coding:UTF8 -*- #!/usr/bin/python #Shieber on 2018/8/7 #树,二叉堆的结构 ######################################################## #树的实现方法一:列表 def BinaryTree1(tree): '''二叉树的实现''' return [tree,[],[]] def getRootVal(root): return root[0] def setRootVal(root,newVal): root[0] = newVal def getLeftChild(root): return root[1] def getRightChild(root): return root[2] def insertLeft(root,newBranch): '''加入左树''' t = root.pop(1) if len(t) > 1: root.insert(1,[newBranch,t,[]]) else: root.insert(1,[newBranch,[],[]]) return root def insertRight(root,newBranch): '''加入右树''' t = root.pop(2) if len(t) > 1: root.insert(2,[newBranch,[],t]) else: root.insert(2,[newBranch,[],[]]) return root #树的实现方法二:节点和引用 class BinaryTree2: '''二叉树的实现''' def __init__(self,root): self.root = root self.leftChild = None self.rightChild = None def getRootVal(self): return self.root def setRootVal(self,Obj): self.root = Obj def getLeftChild(self): return self.leftChild def getRightChild(self): return self.rightChild def preorder(self): '''前序遍历,内部实现:中左右''' print(self.root) if self.leftChild: self.leftChild.preorder() if self.rightChild: self.rightChild.preorder() def midorder(self): '''中序遍历,内部实现:右中左''' if self.rightChild: self.rightChild.midorder() print(self.root) if self.leftChild: self.leftChild.midorder() def postorder(self): '''后序遍历,内部实现:右左中''' if self.rightChild: self.rightChild.postorder() if self.leftChild: self.leftChild.postorder() print(self.root) def insertLeft(self,newBranch): if None == self.leftChild: self.leftChild = BinaryTree(newBranch) else: t = BinaryTree(newBranch) t.leftChild = self.leftChild self.leftChild = t def insertRight(self,newBranch): if None == self.rightChild: self.rightChild = BinaryTree(newBranch) else: t = BinaryTree(newBranch) t.rightChild = self.rightChild self.rightChild = t def preorder(tree): '''前序遍历,外部实现''' if tree: print(tree.getRootVal()) preorder(tree.getLeftChild()) preorder(tree.getRightChild()) def midorder(tree): '''中序遍历,外部实现''' if tree: midorder(tree.getLeftChild()) print(tree.getRootVal()) midorder(tree.getRightChild()) def midorder1(tree): '''中序遍历,加上括号,外部实现''' sVal = '' if tree: sVal = '(' + midorder1(tree.getLeftChild()) sVal = sVal + str(tree.getRootVal()) sVal = sVal + midorder1(tree.getRightChild()) + ')' return sVal def postorder(tree): '''后序遍历,外部实现''' if tree: postorder(tree.getLeftChild()) postorder(tree.getRightChild()) print(tree.getRootVal()) #################################################################### #from pythonds.basic.stack import Stack def analiTree(string): '''计算式分析树,利用了栈''' strlist = string.split() stack = Stack() tree = BinaryTree('') stack.push(tree) currTree = tree for token in strlist: if '(' == token: currTree.insertLeft('') stack.push(currTree) currTree = currTree.getLeftChild() elif token not in ['+','-','*','/',')']: currTree.setRootVal(int(token)) currTree = stack.pop() elif token in ['+','-','*','/']: currTree.setRootVal(token) currTree.insertRight('') stack.push(currTree) currTree = currTree.getRightChild() elif ')' == token: currTree = stack.pop() else: raise ValueError return tree def evaluate(tree): '''计算分析树的评估函数''' opers = {'+':operator.add,'-':operator.sub,'*':operator.mul,'/':operator.truediv} left = tree.getLeftChild() right = tree.getRightChild() if left and right: func = opers[tree.getRootVal()] return func(evaluat(left),evaluate(right)) else: return tree.getRootVal() def postorderEval(tree): '''后序遍历的评估函数''' opers = {'+':operator.add,'-':operator.sub,'*':operator.mul,'/':operator.truediv} res1,res1 = None,None if tree: res1 = postorderEval(tree.getLeftChild()) res2 = postorderEval(tree.getRightChild()) if res1 and res2: return opers[tree.getRootVal()](res1,res2) else: return tree.getRootVal() ############################################################################ #二叉堆类 class BinHeap: '''二叉堆的实现,可用于优先级队列的创建''' def __init__(self): self.heapList = [0] #占位,不使用,数据项从1开始 self.currentSize = 0 def findMin(self): return self.heapList[1] def size(self): return self.currentSize def isEmpty(self): return 0 == self.currentSize #/************************1.插值函数相关函数开始*******************************/# def insert(self,pos): '''A:插值函数,添加到列表末尾''' self.heapList.append(pos) self.currentSize += 1 self.percUp(self.currentSize) def percUp(self,size): '''A的辅助函数:实现父子项交换''' while size >> 1 > 0: if self.heapList[size] < self.heapList[size >> 1]: tmp = self.heapList[size >> 1] self.heapList[size >> 1] = self.heapList[size] self.heapList[size] = tmp size >>= 1 #*获取上一个父节点的位置*# #/************************1.插值函数相关函数结束*******************************/# #/************************2.删除最小值函数相关函数开始**************************/# def delMin(self): '''B:删除最小项''' returnVal = self.heapList[1] #*注意不是0*# self.heapList[1] = self.heapList.pop() self.currentSize -= 1 self.percDown(1) #*从根向下交换父子项*# return returnVal def percDown(self,size): '''B的辅助函数1:实现父子项交换''' while size << 1 < self.currentSize: minc = self.minChild(size) #*返回子节点的最小位置*# if self.heapList[size] > self.heapList[minc]: tmp = self.heapList[minc] self.heapList[minc] = self.heapList[size] self.heapList[size] = tmp size = minc def minChild(self,size): '''B的辅助函数2:返回最小子项的位置''' if (size << 1) + 1 > self.currentSize: #此时表示没有右子 return size << 1 elif self.heapList[size << 1] < self.heapList[(size << 1)+1]: #因为大的值尽量放在右边,所以判断用<,而非<= return size << 1 else: return (size << 1) + 1 #/************************2.删除最小值函数相关函数结束**************************/# def buildHeap(self,Olist): '''利用列表构建新的干净的二叉堆 时间复杂度:O(n)''' self.heapList = [0] + Olist[:] self.currentSize = len(Olist) size = len(Olist) >> 1 while size > 0: self.percDown(size) size -= 1 ############################################################################ class TreeNode: '''树节点''' def __init__(self,key,val,left=None,right=None,parent=None): self.key = key self.payload = val self.leftChild = left self.rightChild = right self.parent = parent #self.balFactor = 0 def __iter__(self): '''迭代生产器yield''' if self: if self.hasLeftChild(): for elem in self.leftChild(): yield elem yield self.key if self.hasRightChild(): for elem in self.rightChild(): yield elem def hasChild(self): return self.leftChild or self.rightChild def has2Child(self): return self.leftChild and self.rightChild def hasLeftChild(self): return self.leftChild def hasRightChild(self): return self.rightChild def isLeftChild(self): return self.parent and self.parent.leftChild == self def isRightChild(self): return self.parent and self.parent.rightChild == self def isRoot(self): return not self.parent def isLeaf(self): return not (self.leftChild or self.rightchild) def replaceNodeData(self,key,val,leftC,rightC): self.key = key self.payload = val self.leftChild = leftC self.rightChild = rightC if self.hasLeftChild(): self.leftChild = self if self.hasRightChild(): self.rightChild = self class BinarySearchTree: '''二叉搜索树''' def __init__(self): self.root = None self.size = 0 def length(self): return self.size def __len__(self): return self.size def __iter__(self): return self.root.__iter__() #/*************1.创建二叉搜索树的相关函数开始****************/# def __setitem__(self,key,val): '''重新赋值[],实现 myTree['key'] = val操作''' self.put(key,val) def put(self,key,val): '''创建二叉搜索树''' if self.root: self._put(key,val,self.root) else: self.root = TreeNode(key,val) #引用树节点类 self.size += 1 def _put(self,key,val,currNode): '''创建二叉搜索树迭代判断函数''' #key == currNode.key 需要考虑吗? if key < currNode.key: if currNode.hasLeftChild(): self._put(key,val,currNode.leftChild) else: currNode.leftChild = TreeNode(key,val,parent=currNode) else: if currNode.hasRightChild(): self._put(key,val,currNode.rightChild) else: currNode.rightChild = TreeNode(key,val,parent=currNode) #/*************1.创建二叉搜索树的相关函数结束****************/# #/*************2.获取二叉搜索树数据的相关函数开始****************/# def __getitem__(self,key): '''重新赋值[],实现 val = myTree['key']操作''' return self.get(key) def get(self,key): '''获取二叉搜索树数据''' if self.root: data = self._get(key,self.root) if data: return data.payload else: return None else: return None def _get(self,key,currNode): '''获取二叉树数据迭代判断函数''' if not currNode.key: return None elif key == currNode.key: return currNode elif key < currNode.key: return self._get(key,currNode.leftChild) else: return self._get(key,currNode.rightChild) def __contains__(self,key): '''重载in函数''' if self._get(key,self.root): return True else: return False #/*************2.获取二叉搜索树数据的相关函数结束****************/# #/*************3.删除二叉搜索树节点的相关函数开始****************/# def __delitem__(self,key): self.delete(key) def delete(self,key): '''删除键''' if self.size > 1: node2Remove = self._get(key,self.root) if node2Remove: self.remove(node2Remove) self.size -= 1 else: raise KeyError('Error,key does not exist') elif 1 == self.size and key == self.root.key: self.root = None self.size -= 1 else: raise KeyError('Error,key does not exist') def remove(self,node): '''删除键,维持树的平恒''' if node.isLeaf(): #叶节点  if node == node.parent.leftChild: node.parent.leftChild = None else: node.parent.rightChild = None elif node.has2Child():#父节点,两个孩子 succ = node.findSuccessor() succ.spliceOut() node.key = succ.key node.payload = succ.payload else: #父节点,一个孩子 六种情况 if node.hasLeftChild(): if node.isLeftChild(): node.leftChild.parent = node.parent node.patent.leftChild = node.leftChild elif node.isRightChild(): node.leftChild.parent = node.parent node.patent.rightChild = node.leftChild else: node.replaceNodeData(node.leftChild.key, node.leftChild.payload, node.leftChild.leftChild, node.leftChild.rightChild) else: if node.isLeftChild(): node.rightChild.parent = node.parent node.patent.leftChild = node.rightChild elif node.isRightChild(): node.rightChild.parent = node.parent node.patent.rightChild = node.rightChild else: node.replaceNodeData(node.rightChild.key, node.rightChild.payload, node.rightChild.leftChild, node.rightChild.rightChild) def findSuccessor(self): '''找到后继节点,在右子树的最小左子树位置''' succ = None if self.hasRightChild(): succ = self.rightChild.findMin() else: if self.parent: if self.isLeftChild(): succ = self.parent else: self.parent.rightChild = None succ = self.parent.findSuccessor() self.parent.rightChild = self return succ def findMin(self): '''找到最小左子树''' current = self while current.hasLeftChild(): current = current.leftChild return current def spliceOut(self): '''删除后继节点''' if self.isLeaf(): if self.isLeftChild(): self.parent.leftChild = None else: self.parent.rightChild = None elif self.hasChild(): if self.hasLeftChild(): if self.isLeftChild(): self.parent.leftChild = self.leftChild else: self.parent.rightChild = self.leftChild self.leftChild.parent = self.parent else: if self.isLeftChild(): self.parent.leftChild = self.rightChild else: self.parent.rightChild = self.rightChild self.rightChild.parent = self.parent else: pass #/*************3.删除二叉搜索树节点的相关函数结束****************/# #mytree = BinarySearchTree() #mytree[3] = 'red' #mytree[4] = 'blue' #mytree[6] = 'yellow' #print(mytree[]) #print(mytree[3]) ############################################################################ #平衡二叉搜索树AVL #平衡因子:height(leftTree) - height(rightTree) #N=O^(h+2)/sqrt(5) -1 树节点 #h=1.44*logNh 树高 class BinarySearchTreeAVL: '''平衡二叉搜索树,只需要稍微 改变一下二叉搜索树的一些 函数,限制搜索为O(logn)''' def __init__(self): self.root = None self.size = 0 def length(self): return self.size def __len__(self): return self.size def __iter__(self): return self.root.__iter__() #/*************1.创建平衡二叉搜索树的相关函数开始****************/# def __setitem__(self,key,val): '''重新赋值[],实现 myTree['key'] = val操作''' self.put(key,val) def put(self,key,val): '''创建二叉搜索树''' if self.root: self._put(key,val,self.root) else: self.root = TreeNode(key,val) #引用树节点类 self.size += 1 def _put(self,key,val,currNode): '''创建二叉搜索树迭代判断函数''' #key == currNode.key 需要考虑吗? if key < currNode.key: if currNode.hasLeftChild(): self._put(key,val,currNode.leftChild) else: currNode.leftChild = TreeNode(key,val,parent=currNode) self.updateBalance(currNode.leftChild) else: if currNode.hasRightChild(): self._put(key,val,currNode.rightChild) else: currNode.rightChild = TreeNode(key,val,parent=currNode) self.updateBalance(currNode.rightChild) def updateBalance(self,node): '''每次插入数据项时,实现树的平衡''' if node.balFactor > 1 or node.balFactor < -1: self.rebalance(node) return True if node.parent: if node.isLeftChild(): node.parent.balFactor += 1 elif node.isRightChild(): node.parent.balFactor -= 1 else: pass if node.parent.balFactor != 0: self.updateBalance(node.parent) def rebalance(self,node): '''调整极端情况''' if node.balFactor < 0: if node.rightChild.balFactor > 0: self.rotateRight(node.rightChild) self.rotateLeft(node) else: self.rotateLeft(node) elif node.balFactor >0: if node.leftChild.balFactor < 0: self.rotateLeft(node.leftChild) self.rotateRight(node) else: self.rotateRight(node) else: pass def rotateLeft(self,rotRoot): '''子树左旋转''' newRoot = rotRoot.rightChild rotRoot.rightChild = newRoot.leftChild if None != newRoot.leftChild: newRoot.leftChild.parent = rotRoot newRoot.parent = rotRoot.parent if rotRoot.isRoot(): self.root = newRoot else: if rotRoot.isLeftChild(): rotRoot.parent.leftChild = newRoot else: rotRoot.parent.rightChild = newRoot newRoot.leftChild = rotRoot rotRoot.parent = newRoot rotRoot.balFactor = rotRoot.balFactor + 1 - min(newRoot.balFactor,0) newRoot.balFactor = newRoot.balFactor + 1 + max(rotRoot.balFactor,0) def rotateRight(self,rotnode): '''子树右旋转''' newRoot = rotRoot.leftChild rotRoot.leftChild = newRoot.rightChild if None != newRoot.rightChild: newRoot.rightChild.parent = rotRoot newRoot.parent = rotRoot.parent if rotRoot.isRoot(): self.root = newRoot else: if rotRoot.isRightChild(): rotRoot.parent.rightChild = newRoot else: rotRoot.parent.leftChild = newRoot newRoot.rightChild = rotRoot rotRoot.parent = newRoot rotRoot.balFactor = rotRoot.balFactor - 1 - max(newRoot.balFactor,0) newRoot.balFactor = newRoot.balFactor - 1 + min(rotRoot.balFactor,0) #/*************1.创建平衡二叉搜索树的相关函数结束****************/# #/*************2.获取二叉搜索树数据的相关函数开始****************/# def __getitem__(self,key): '''重新赋值[],实现 val = myTree['key']操作''' return self.get(key) def get(self,key): '''获取二叉搜索树数据''' if self.root: data = self._get(key,self.root) if data: return data.payload else: return None else: return None def _get(self,key,currNode): '''获取二叉树数据迭代判断函数''' if not currNode.key: return None elif key == currNode.key: return currNode elif key < currNode.key:
""" mcpython - a minecraft clone written in python licenced under the MIT-licence (https://github.com/mcpython4-coding/core) Contributors: uuk, xkcdjerry (inactive) Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA (https://account.mojang.com/documents/minecraft_eula) Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar This project is not official by mojang and does not relate to it. """ import asyncio import itertools import math import typing from abc import ABC import mcpython.client.gui.ContainerRenderer import mcpython.client.gui.Slot import mcpython.client.rendering.ui.Buttons import mcpython.client.rendering.ui.SearchBar import mcpython.common.event.TickHandler import mcpython.engine.event.EventBus import mcpython.engine.ResourceLoader import mcpython.util.texture as texture_util import PIL.Image import pyglet from mcpython import shared from mcpython.client.gui.util import getTabTexture, CreativeTabScrollbar from mcpython.common.container.ItemGroup import FilteredItemGroup, ItemGroup from mcpython.common.container.ResourceStack import ItemStack, LazyClassLoadItemstack from mcpython.engine import logger from pyglet.window import key, mouse class ICreativeView(mcpython.client.gui.ContainerRenderer.ContainerRenderer, ABC): """ Base class for a creative tab Comes with some helper code """ def __init__(self): super().__init__() self.tab_icon = None self.tab_icon_selected = None self.is_selected = False self.tab_slot = mcpython.client.gui.Slot.Slot() self.icon_position = 0, 0 def update_rendering(self): pass def get_icon_stack(self) -> ItemStack: raise NotImplementedError def get_view_size(self) -> typing.Tuple[int, int]: raise NotImplementedError def draw_at(self, position: typing.Tuple[int, int], hovering_slot=None): pass def draw(self, hovering_slot=None): self.bg_anchor = "MM" self.window_anchor = "MM" self.bg_image_size = self.get_view_size() x, y = self.get_position() self.draw_at((x, y), hovering_slot=hovering_slot) CT_MANAGER.draw_tabs((x, y), self.bg_image_size) for slot in self.get_draw_slots(): slot.draw(x, y, hovering=slot == hovering_slot) for slot in self.get_draw_slots(): slot.draw_label() if self.custom_name is not None: if self.custom_name_label.text != self.custom_name: self.custom_name_label.text = self.custom_name self.custom_name_label.x = x + 15 self.custom_name_label.y = y + self.bg_image_size[1] - 10 self.custom_name_label.draw() async def on_activate(self): await super().on_activate() await CT_MANAGER.activate() async def on_deactivate(self): await super().on_deactivate() shared.state_handler.active_state.parts[0].activate_mouse = True await CT_MANAGER.deactivate() class CreativeItemTab(ICreativeView): bg_texture: pyglet.image.AbstractImage = None @classmethod async def reload(cls): cls.bg_texture = texture_util.to_pyglet_image( mcpython.util.texture.to_pillow_image( (await mcpython.engine.ResourceLoader.read_pyglet_image( "minecraft:gui/container/creative_inventory/tab_items" )).get_region(0, 120, 194, 255 - 120) ).resize((2 * 195, 2 * 136), PIL.Image.NEAREST) ) def __init__( self, name: str, icon: ItemStack, group: ItemGroup = None, linked_tag=None ): super().__init__() self.icon = icon self.group = group if group is not None else ItemGroup() self.scroll_offset = 0 self.old_scroll_offset = 0 self.linked_tag = linked_tag self.custom_name = self.name = name if linked_tag is not None: # If there is a tag linked to this tab, subscribe to the reload event import mcpython.common.data.ResourcePipe mcpython.common.data.ResourcePipe.handler.register_data_processor( self.load_from_tag ) self.scroll_bar = CreativeTabScrollbar(self.set_scrolling) def set_scrolling(self, progress: int): self.scroll_offset = round(progress - 1) self.update_rendering() def load_from_tag(self): """ Helper method for reloading the content from the underlying tag Use only when self.linked_tag is set, otherwise, this will crash """ if self.linked_tag is None: raise RuntimeError("tag must be set for reloading") tag = shared.tag_handler.get_entries_for(self.linked_tag, "items") self.group.entries.clear() self.group.entries += filter( lambda stack: not stack.is_empty(), (ItemStack(e, warn_if_unarrival=False) for e in tag), ) self.scroll_bar.set_max_value( max(1, (math.ceil(len(self.group.entries) / 9) - 4)) ) self.update_rendering(force=True) def update_rendering(self, force=False): """ Updates the slot content of the rendering system :param force: force update, also when nothing changed """ self.group.load_lazy() if self.old_scroll_offset == self.scroll_offset and not force: return self.old_scroll_offset = self.scroll_offset entries = list(self.group.view()) # todo: cache value! self.scroll_bar.set_max_value(max(math.ceil(len(entries) / 9) - 4, 1)) # print("cycling at", self.name, "entries:", entries) entries = iter(entries) if self.scroll_offset != 0: for _ in range(9 * self.scroll_offset): next(entries) for i, slot in enumerate(self.slots[9:]): try: entry = next(entries) except StopIteration: # todo: can we simply clean the itemstack? slot.set_itemstack_force(ItemStack.create_empty()) else: # print("writing at", i, "stack", entry) # todo: can we change the item in the stack? slot.set_itemstack_force(entry) for slot in self.slots[:9]: slot.invalidate() async def create_slot_renderers(self): """ Creates the slots """ def work(i): return ( lambda: shared.world.get_active_player().inventory_main.slots[i] if shared.world.world_loaded else None ) slots = [ [ mcpython.client.gui.Slot.SlotInfiniteStack( ItemStack.create_empty(), position=(18 + x * 36, 61 + y * 36) ) for x in range(9) ] for y in range(4, -1, -1) ] # some black magic... return [ mcpython.client.gui.Slot.SlotCopyWithDynamicTarget( work(j), position=( 20 + j * 36, 16, ), ) for j in range(9) ] + sum(slots, []) def add_item(self, item: typing.Union[ItemStack, LazyClassLoadItemstack, str]): """ Adds an item to the underlying item group :param item: the item stack or the item name """ if isinstance(item, str): item = LazyClassLoadItemstack(item) self.group.add(item) return self def get_icon_stack(self) -> ItemStack: return self.icon def get_view_size(self) -> typing.Tuple[int, int]: return 2 * 195, 2 * 136 def draw_at(self, position: typing.Tuple[int, int], hovering_slot=None): self.bg_texture.blit(*position) self.scroll_bar.draw_at( (position[0] + 176 * 2, position[1] + 8 * 2), self.get_view_size()[1] - 50 ) def clear(self): pass async def on_deactivate(self): await super().on_deactivate() self.scroll_bar.deactivate() async def on_activate(self): await super().on_activate() self.scroll_bar.activate() self.update_rendering(True) def on_mouse_button_press( self, relative_x: int, relative_y: int, button: int, modifiers: int, item_stack, slot, ) -> bool: if ( 2 * 16 <= relative_x <= 2 * 170 and 24 * 2 <= relative_y <= 119 * 2 and not item_stack.is_empty() and ( slot is None or not slot.get_itemstack().contains_same_resource(item_stack) ) ): item_stack.clean() return True return False def __repr__(self): return f"CreateItemTab({self.name}, entry_count={len(self.group.entries)})" def update_shift_container(self): shared.inventory_handler.shift_container_handler.container_A = self.slots[:9] shared.inventory_handler.shift_container_handler.container_B = self.slots[9:] if not shared.IS_TEST_ENV: shared.tick_handler.schedule_once(CreativeItemTab.reload()) class CreativeTabSearchBar(CreativeItemTab): @classmethod async def reload(cls): cls.bg_texture = texture_util.to_pyglet_image( mcpython.util.texture.to_pillow_image( (await mcpython.engine.ResourceLoader.read_pyglet_image( "minecraft:gui/container/creative_inventory/tab_item_search" )).get_region(0, 120, 194, 255 - 120) ).resize((2 * 195, 2 * 136), PIL.Image.NEAREST) ) def __init__( self, name: str, icon: ItemStack, group: ItemGroup = None, linked_tag=None ): super().__init__(name, icon, group, linked_tag) self.group: FilteredItemGroup = self.group.filtered() self.search_bar = mcpython.client.rendering.ui.SearchBar.SearchBar( change_callback=lambda text: self.group.apply_raw_filter(f"(.*){text}(.*)"), enter_callback=lambda: self.search_bar.disable(), exit_callback=lambda: self.search_bar.disable(), enable_mouse_to_enter=True, ) self.tab_icon = CreativeTabManager.UPPER_TAB self.tab_icon_selected = CreativeTabManager.UPPER_TAB_SELECTED self.need_reload = True def setNeedReload(): self.need_reload = True import mcpython.common.data.ResourcePipe as ResourcePipe ResourcePipe.handler.register_data_processor(setNeedReload) async def on_deactivate(self): await super().on_deactivate() self.search_bar.disable() async def on_activate(self): await super().on_activate() self.group.apply_raw_filter("(.*)") if self.need_reload: self.need_reload = False self.group.entries.clear() for page in CT_MANAGER.pages: for tab in page: if isinstance(tab, CreativeItemTab): self.group.entries += tab.group.entries self.group.sort_after_item_name() self.update_rendering(True) class CreativePlayerInventory(ICreativeView): TEXTURE_SIZE = 195 * 2, 136 * 2 TEXTURE = None @classmethod async def reload(cls): cls.TEXTURE = texture_util.resize_image_pyglet( (await mcpython.engine.ResourceLoader.read_pyglet_image( "minecraft:gui/container/creative_inventory/tab_inventory" )).get_region(0, 120, 195, 136), cls.TEXTURE_SIZE, ) def __init__(self): super().__init__() self.stack = ItemStack("minecraft:chest") self.tab_icon = CreativeTabManager.LOWER_TAB self.tab_icon_selected = CreativeTabManager.LOWER_TAB_SELECTED async def on_activate(self): await super().on_activate() shared.tick_handler.schedule_once(self.reload_config()) def get_icon_stack(self) -> ItemStack: return self.stack def get_view_size(self) -> typing.Tuple[int, int]: return self.TEXTURE_SIZE def draw_at(self, position: typing.Tuple[int, int], hovering_slot=None): self.TEXTURE.blit(*position) async def create_slot_renderers(self): """ Creates the slots """ def work(i): return lambda: shared.world.get_active_player().inventory_main.slots[i] # some black magic... return [ mcpython.client.gui.Slot.SlotCopyWithDynamicTarget( work(j), ) for j in range(40) ] + [ mcpython.client.gui.Slot.SlotCopyWithDynamicTarget( work(45), ), mcpython.client.gui.Slot.SlotTrashCan(), ] @staticmethod def get_config_file() -> str or None: return "assets/config/inventory/player_inventory_main_creative.json" if not shared.IS_TEST_ENV: shared.tick_handler.schedule_once(CreativePlayerInventory.reload()) class CreativeTabManager: TAB_SIZE = 28 * 2, 30 * 2 # todo: make this reload-able! UPPER_TAB = None UPPER_TAB_SELECTED = None LOWER_TAB = None LOWER_TAB_SELECTED = None @classmethod async def reload(cls): cls.UPPER_TAB = texture_util.resize_image_pyglet( getTabTexture().get_region(0, 224, 28, 30), cls.TAB_SIZE ) cls.UPPER_TAB_SELECTED = texture_util.resize_image_pyglet( getTabTexture().get_region(0, 224 - 30, 28, 30), cls.TAB_SIZE ) cls.LOWER_TAB = texture_util.resize_image_pyglet( getTabTexture().get_region(0, 164, 28, 30), cls.TAB_SIZE ) cls.LOWER_TAB_SELECTED = texture_util.resize_image_pyglet( getTabTexture().get_region(0, 128, 28, 30), cls.TAB_SIZE ) def __init__(self): self.pages: typing.List[typing.List[ICreativeView]] = [[]] self.inventory_instance = None self.search_instance = None self.saved_hotbars = None self.current_page = 0 self.underlying_event_bus: mcpython.engine.event.EventBus.EventBus = ( shared.event_handler.create_bus(active=False) ) self.underlying_event_bus.subscribe("user:mouse:press", self.on_mouse_press) self.underlying_event_bus.subscribe("user:mouse:drag", self.on_mouse_move) self.underlying_event_bus.subscribe("user:mouse:motion", self.on_mouse_move) self.underlying_event_bus.subscribe("user:keyboard:press", self.on_key_press) self.hovering_tab = None self.page_left = ( mcpython.client.rendering.ui.Buttons.arrow_button_left( (0, 0), lambda: self.increase_page(-1) ) if not shared.IS_TEST_ENV else None ) self.page_right = ( mcpython.client.rendering.ui.Buttons.arrow_button_right( (0, 0), lambda: self.increase_page(1) ) if not shared.IS_TEST_ENV else None ) self.page_label = pyglet.text.Label(anchor_x="center", anchor_y="center") self.lower_left_position = 0, 0 self.container_size = 1, 1 self.current_tab: typing.Optional[ICreativeView] = None def is_multi_page(self): return len(self.pages) > 1 async def on_key_press(self, button, mod): if shared.state_handler.global_key_bind_toggle: return if button == key.E: await shared.inventory_handler.hide(self.current_tab) elif button == key.N and self.is_multi_page(): self.current_page = max(self.current_page - 1, 0) elif button == key.M and self.is_multi_page(): self.current_page = min(self.current_page + 1, len(self.pages) - 1) def on_mouse_move(self, x, y, dx, dy, *_): tab = self.get_tab_at(x, y) self.hovering_tab = tab def init_tabs_if_needed(self): if self.inventory_instance is None: self.inventory_instance = CreativePlayerInventory() if self.search_instance is None: self.search_instance = CreativeTabSearchBar( "Search", ItemStack("minecraft:paper") ) async def activate(self): mcpython.common.event.TickHandler.handler.bind( self.underlying_event_bus.activate, 1 ) if self.is_multi_page(): await self.page_left.activate() await self.page_right.activate() async def deactivate(self): self.underlying_event_bus.deactivate() await self.page_left.deactivate() await self.page_right.deactivate() async def on_mouse_press(self, mx, my, button, modifiers): if not button & mouse.LEFT: return tab = self.get_tab_at(mx, my) if tab is not None: await self.switch_to_tab(tab) def get_tab_at(self, mx, my) -> typing.Optional[ICreativeView]: tx, ty = self.TAB_SIZE tabs = self.pages[self.current_page] x, y = self.lower_left_position for tab in tabs[4:]: # y is here not a mistake as tabs are going down, instead of up
batch_size, self.n_hidden) return hidden def forward(self, x): batch_size = x.size(0) x = x.permute(1,0,2) # Initializing hidden state for first input using method defined below hidden = self.init_hidden(batch_size) pdb.set_trace() # Passing in the input and hidden state into the model and obtaining outputs out, hidden = self.rnn(x, hidden) pdb.set_trace() # # Reshaping the outputs such that it can be fit into the fully connected layer # out = out.contiguous().view(-1, self.hidden_dim) # out = self.fc(out) return out, hidden class Classifier_transformer(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1): super().__init__() self.desc_enc = DescEncoder_OneHot_RNN() self.state_enc = StateEncoder() self.fc1 = nn.Linear(656, 312) self.fc2 = nn.Linear(312, 1) self.leakyrelu = nn.LeakyReLU(0.1) # self.fc3 = nn.Linear(128, 64) # self.fc4 = nn.Linear(64, 32) # self.fc5 = nn.Linear(32, 1) def forward(self, state, desc): state_encoding = self.state_enc(state) state_encoding = state_encoding.flatten(start_dim=1) desc_encoding = self.desc_enc(desc) desc_encoding = desc_encoding.flatten(start_dim=1) joint_encoding = torch.cat((state_encoding, desc_encoding), 1) joint_encoding = torch.flatten(joint_encoding, start_dim = 1) out = self.leakyrelu(self.fc1(joint_encoding)) out = self.leakyrelu(self.fc2(out)) # out = torch.relu(self.fc3(out)) # out = torch.relu(self.fc4(out)) # out = self.fc5(out) out = torch.sigmoid(out) return out class Classifier(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1): super().__init__() self.desc_enc = DescEncoder_BOW() self.state_enc = StateEncoder() self.fc1 = nn.Linear(276, 256) self.bn1 = nn.BatchNorm1d(num_features=256) self.fc2 = nn.Linear(256, 128) self.bn2 = nn.BatchNorm1d(num_features=128) self.fc3 = nn.Linear(128, 64) self.bn3 = nn.BatchNorm1d(num_features=64) self.fc4 = nn.Linear(64, 32) self.bn4 = nn.BatchNorm1d(num_features=32) self.fc5 = nn.Linear(32, 2) self.bn5 = nn.BatchNorm1d(num_features=1) self.leakyrelu = nn.LeakyReLU(0.1) self.relu = nn.ReLU() def forward(self, state, desc): state_encoding = self.state_enc(state) state_encoding = state_encoding.flatten(start_dim=1) desc_encoding = self.desc_enc(desc) desc_encoding = desc_encoding.flatten(start_dim=1) joint_encoding = torch.cat((state_encoding, desc_encoding), 1) joint_encoding = torch.flatten(joint_encoding, start_dim = 1) out = self.bn1(self.relu(self.fc1(joint_encoding))) out = self.bn2(self.relu(self.fc2(out))) out = self.bn3(self.relu(self.fc3(out))) out = self.bn4(self.relu(self.fc4(out))) out = torch.softmax(self.fc5(out), dim =1) return out class ClassifierBB(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1): super().__init__() self.desc_enc = DescEncoder_BOW() self.state_enc = DQNImgEncoder(in_channels = 4) self.fc1 = nn.Linear(522, 128) self.bn1 = nn.BatchNorm1d(num_features=128) self.fc2 = nn.Linear(128, 64) self.bn2 = nn.BatchNorm1d(num_features=64) self.fc3 = nn.Linear(64, 32) self.bn3 = nn.BatchNorm1d(num_features=32) self.fc4 = nn.Linear(32, 2) self.bn4 = nn.BatchNorm1d(num_features=1) self.leakyrelu = nn.LeakyReLU(0.1) self.relu = nn.ReLU() def forward(self, state, desc): state = state.permute(0,3,1,2)/256 state_encoding = self.state_enc(state) state_encoding = state_encoding.flatten(start_dim=1) desc_encoding = desc.squeeze() joint_encoding = torch.cat((state_encoding, desc_encoding), 1) joint_encoding = torch.flatten(joint_encoding, start_dim = 1) out = self.bn1(self.relu(self.fc1(joint_encoding))) out = self.bn2(self.relu(self.fc2(out))) out = self.bn3(self.relu(self.fc3(out))) out = torch.softmax(self.fc4(out), dim =1) return out class ClassifierMNIST(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1): super().__init__() self.desc_enc = DescEncoder_BOW() self.state_enc = StateEncoder() self.fc1 = nn.Linear(276, 256) self.bn1 = nn.BatchNorm1d(num_features=256) self.fc2 = nn.Linear(256, 128) self.bn2 = nn.BatchNorm1d(num_features=128) self.fc3 = nn.Linear(128, 64) self.bn3 = nn.BatchNorm1d(num_features=64) self.fc4 = nn.Linear(64, 32) self.bn4 = nn.BatchNorm1d(num_features=32) self.fc5 = nn.Linear(32, 1) self.bn5 = nn.BatchNorm1d(num_features=1) self.leakyrelu = nn.LeakyReLU(0.1) self.relu = nn.ReLU() def forward(self, state, desc): state_encoding = self.state_enc(state) state_encoding = state_encoding.flatten(start_dim=1) desc_encoding = self.desc_enc(desc) desc_encoding = desc_encoding.flatten(start_dim=1) joint_encoding = torch.cat((state_encoding, desc_encoding), 1) joint_encoding = torch.flatten(joint_encoding, start_dim = 1) out = self.bn1(self.relu(self.fc1(joint_encoding))) out = self.bn2(self.relu(self.fc2(out))) out = self.bn3(self.relu(self.fc3(out))) out = self.bn4(self.relu(self.fc4(out))) out = self.relu(self.fc5(out)) return out class SimpleClassifier(nn.Module): def __init__(self, input_state_feats = 12288, input_desc_feats = 50): super().__init__() self.fc1 = nn.Linear(input_state_feats, 4096) self.fc2 = nn.Linear(4096, 1024) self.fc3 = nn.Linear(1024, 256) self.fc4 = nn.Linear(256, 2) def forward(self, state,desc): x = state.reshape(-1, 3*64*64) x = torch.relu(self.fc1(x)) x = torch.relu(self.fc2(x)) x = torch.relu(self.fc3(x)) #x = torch.cat((x, desc), 1) x = self.fc4(x) x = torch.softmax(x, dim = 1) return x class SimpleCNNClassifier(nn.Module): def __init__(self, input_state_feats = 12288, input_desc_feats = 50): super().__init__() self.cnn_layers = Sequential( # Defining a 2D convolution layer Conv2d(3, 4, kernel_size=3, stride=1, padding=1), BatchNorm2d(4), ReLU(inplace=True), MaxPool2d(kernel_size=2, stride=2), # Defining another 2D convolution layer Conv2d(4, 4, kernel_size=3, stride=1, padding=1), BatchNorm2d(4), ReLU(inplace=True), MaxPool2d(kernel_size=2, stride=2), Conv2d(4, 4, kernel_size=3, stride=1, padding=1), BatchNorm2d(4), ReLU(inplace=True), MaxPool2d(kernel_size=2, stride=2), ) self.fc1 = nn.Linear(256+ 50, 128) self.fc2 = nn.Linear(128, 2) def forward(self, state,desc): x = self.cnn_layers(state) x = x.flatten(1) x = torch.cat((x, desc), 1) x = torch.relu(self.fc1(x)) x = torch.relu(self.fc2(x)) x = torch.softmax(x, dim = 1) return x class ClassifierStateBB(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1): super().__init__() self.fc1 = nn.Linear(14, 8) self.bn1 = nn.BatchNorm1d(num_features=8) self.fc2 = nn.Linear(8, 2) self.bn2 = nn.BatchNorm1d(num_features=2) self.leakyrelu = nn.LeakyReLU(0.1) self.relu = nn.ReLU() def forward(self, state, desc): state_encoding = state.flatten(start_dim=1) desc_encoding = desc joint_encoding = torch.cat((state_encoding, desc_encoding), 1) joint_encoding = torch.flatten(joint_encoding, start_dim = 1) out = self.bn1(self.relu(self.fc1(joint_encoding))) out = self.bn2(self.relu(self.fc2(out))) out = torch.softmax(out, dim =1) return out class ClassifierBBActionResnet(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1): super().__init__() self.desc_enc = DescEncoder_BOW() self.state_enc = DQNImgEncoder(in_channels = 4) self.fc0 = nn.Linear(1034, 512) self.bn0 = nn.BatchNorm1d(num_features=512) self.fc1 = nn.Linear(522, 128) self.bn1 = nn.BatchNorm1d(num_features=128) self.fc2 = nn.Linear(128, 64) self.bn2 = nn.BatchNorm1d(num_features=64) self.fc3 = nn.Linear(64, 32) self.bn3 = nn.BatchNorm1d(num_features=32) self.fc4 = nn.Linear(32, 2) self.bn4 = nn.BatchNorm1d(num_features=2) self.leakyrelu = nn.LeakyReLU(0.1) self.relu = nn.ReLU() #FILM Networks self.gamma1 = nn.Linear(4, 128) self.gamma2 = nn.Linear(128, 256) self.gamma3 = nn.Linear(256, 512) self.beta1 = nn.Linear(4, 128) self.beta2 = nn.Linear(128, 256) self.beta3 = nn.Linear(256, 512) def FiLM(self, x, gamma, beta): x = gamma * x #+ beta return x def forward(self, state, desc, action): state = state.permute(0,3,1,2)/256 state_encoding = self.state_enc(state) state_encoding = state_encoding.flatten(start_dim=1) action_encoding = action.flatten(start_dim=1).float() desc_encoding = desc.squeeze() #FiLM conditioning gamma_action_encoding = self.gamma3(self.gamma2(self.gamma1(action_encoding))) beta_action_encoding = self.beta3(self.beta2(self.beta1(action_encoding))) state_act = self.FiLM(state_encoding, gamma_action_encoding, beta_action_encoding) #joint_encoding = torch.cat((state_encoding, desc_encoding, gamma_action_encoding), 1) out = torch.cat((state_act, desc_encoding), 1).float() #out = self.bn0(self.relu(self.fc0(joint_encoding.float()))) out = self.bn1(self.relu(self.fc1(out))) out = self.bn2(self.relu(self.fc2(out))) out = self.bn3(self.relu(self.fc3(out))) out = torch.softmax(self.fc4(out), dim =1) return out class DQNImgEncoder(nn.Module): def __init__(self, in_channels): """ Initialize Deep Q Network Args: in_channels (int): number of input channels """ super().__init__() self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.bn2 = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.bn3 = nn.BatchNorm2d(64) self.fc4 = nn.Linear(7 * 7 * 64, 512) def forward(self, x): x = x.float() x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) x = F.relu(self.bn3(self.conv3(x))) x = F.relu(self.fc4(x.flatten(start_dim = 1))) return x class ClassifierBBActionResnetNoMult(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1): super().__init__() self.state_enc = DQNImgEncoder(in_channels = 4) self.fc0 = nn.Linear(1034, 512) self.bn0 = nn.BatchNorm1d(num_features=512) self.fc1 = nn.Linear(512, 128) self.bn1 = nn.BatchNorm1d(num_features=128) self.fc2 = nn.Linear(128, 64) self.bn2 = nn.BatchNorm1d(num_features=64) self.fc3 = nn.Linear(64, 32) self.bn3 = nn.BatchNorm1d(num_features=32) self.fc4 = nn.Linear(32, 2) self.bn4 = nn.BatchNorm1d(num_features=2) self.leakyrelu = nn.LeakyReLU(0.1) self.relu = nn.ReLU() #FILM Networks self.gamma1 = nn.Linear(4, 128) self.gamma2 = nn.Linear(128, 256) self.gamma3 = nn.Linear(256, 512) def forward(self, state, desc, action): state = state.permute(0,3,1,2)/256 state_encoding = self.state_enc(state) state_encoding = state_encoding.flatten(start_dim=1) action_encoding = action.flatten(start_dim=1).float() desc_encoding = desc.squeeze() #FiLM conditioning gamma_action_encoding = self.gamma3(self.gamma2(self.gamma1(action_encoding))) print("HI") pdb.set_trace() joint_encoding = torch.cat((state_encoding, gamma_action_encoding, desc_encoding), 1) #out = torch.cat((state_act, desc_encoding), 1).float() pdb.set_trace() out = self.bn0(self.relu(self.fc0(joint_encoding.float()))) out = self.bn1(self.relu(self.fc1(out))) out = self.bn2(self.relu(self.fc2(out))) out = self.bn3(self.relu(self.fc3(out))) out = torch.softmax(self.fc4(out), dim =1) return out class ClassifierCR(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1): super().__init__() self.desc_enc = DescEncoder_BOW() self.state_enc = DQNImgEncoderClever(in_channels = 3) self.fc1 = nn.Linear(562, 128) self.bn1 = nn.BatchNorm1d(num_features=128) self.fc2 = nn.Linear(128, 64) self.bn2 = nn.BatchNorm1d(num_features=64) self.fc3 = nn.Linear(64, 32) self.bn3 = nn.BatchNorm1d(num_features=32) self.fc4 = nn.Linear(32, 2) self.bn4 = nn.BatchNorm1d(num_features=1) self.leakyrelu = nn.LeakyReLU(0.1) self.relu = nn.ReLU() def forward(self, state, desc): state = state.permute(0,3,1,2)/256 state_encoding = self.state_enc(state) state_encoding = state_encoding.flatten(start_dim=1) desc_encoding = desc.squeeze() joint_encoding = torch.cat((state_encoding, desc_encoding), 1) joint_encoding = torch.flatten(joint_encoding, start_dim = 1) out = self.bn1(self.relu(self.fc1(joint_encoding))) out = self.bn2(self.relu(self.fc2(out))) out = self.bn3(self.relu(self.fc3(out))) out = torch.softmax(self.fc4(out), dim =1) return out class ClassifierCRActionResnetNoMult(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1): super().__init__() self.state_enc = DQNImgEncoderClever(in_channels = 3) self.fc0 = nn.Linear(1074, 512) self.bn0 = nn.BatchNorm1d(num_features=512) self.fc1 = nn.Linear(512, 128) self.bn1 = nn.BatchNorm1d(num_features=128) self.fc2 = nn.Linear(128, 64) self.bn2 = nn.BatchNorm1d(num_features=64) self.fc3 = nn.Linear(64, 32) self.bn3 = nn.BatchNorm1d(num_features=32) self.fc4 = nn.Linear(32, 2) self.bn4 = nn.BatchNorm1d(num_features=2) self.leakyrelu = nn.LeakyReLU(0.1) self.relu = nn.ReLU() #FILM Networks self.gamma1 = nn.Linear(4, 128) self.gamma2 = nn.Linear(128, 256) self.gamma3 = nn.Linear(256, 512) def forward(self, state, desc, action): state = state.permute(0,3,1,2)/256 state_encoding = self.state_enc(state) state_encoding = state_encoding.flatten(start_dim=1) action_encoding = action.flatten(start_dim=1).float() desc_encoding = desc.squeeze() #FiLM conditioning gamma_action_encoding = self.gamma3(self.gamma2(self.gamma1(action_encoding))) joint_encoding = torch.cat((state_encoding, gamma_action_encoding, desc_encoding), 1) #out = torch.cat((state_act, desc_encoding), 1).float() out = self.bn0(self.relu(self.fc0(joint_encoding.float()))) out = self.bn1(self.relu(self.fc1(out))) out = self.bn2(self.relu(self.fc2(out))) out = self.bn3(self.relu(self.fc3(out))) out = torch.softmax(self.fc4(out), dim =1) return out class ClassifierCRActionResnet(nn.Module): def __init__(self, output_feats=64, kernel_size=None, stride=None, p=0, groups=1):
################### # 0. General Setup: ################### import sqlite3 import numpy as np MAX_IMAGE_ID = 2**31 - 1 # Strings of SQL Commands: ########################## CREATE_CAMERAS_TABLE = """CREATE TABLE IF NOT EXISTS cameras ( camera_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, model INTEGER NOT NULL, width INTEGER NOT NULL, height INTEGER NOT NULL, params BLOB, prior_focal_length INTEGER NOT NULL)""" CREATE_DESCRIPTORS_TABLE = """DROP TABLE IF EXISTS descriptors; CREATE TABLE IF NOT EXISTS descriptors ( image_id INTEGER PRIMARY KEY NOT NULL, rows INTEGER NOT NULL, cols INTEGER NOT NULL, data BLOB, FOREIGN KEY(image_id) REFERENCES images(image_id) ON DELETE CASCADE)""" CREATE_IMAGES_TABLE = """DROP TABLE IF EXISTS images; CREATE TABLE IF NOT EXISTS images ( image_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, name TEXT NOT NULL UNIQUE, camera_id INTEGER NOT NULL, prior_qw REAL, prior_qx REAL, prior_qy REAL, prior_qz REAL, prior_tx REAL, prior_ty REAL, prior_tz REAL, CONSTRAINT image_id_check CHECK(image_id >= 0 and image_id < {}), FOREIGN KEY(camera_id) REFERENCES cameras(camera_id)) """.format(MAX_IMAGE_ID) CREATE_TWO_VIEW_GEOMETRIES_TABLE = """ DROP TABLE IF EXISTS two_view_geometries; CREATE TABLE IF NOT EXISTS two_view_geometries ( pair_id INTEGER PRIMARY KEY NOT NULL, rows INTEGER NOT NULL, cols INTEGER NOT NULL, data BLOB, config INTEGER NOT NULL, F BLOB, E BLOB, H BLOB) """ CREATE_KEYPOINTS_TABLE = """DROP TABLE IF EXISTS keypoints; CREATE TABLE IF NOT EXISTS keypoints ( image_id INTEGER PRIMARY KEY NOT NULL, rows INTEGER NOT NULL, cols INTEGER NOT NULL, data BLOB, FOREIGN KEY(image_id) REFERENCES images(image_id) ON DELETE CASCADE) """ CREATE_MATCHES_TABLE = """DROP TABLE IF EXISTS matches; CREATE TABLE IF NOT EXISTS matches ( pair_id INTEGER PRIMARY KEY NOT NULL, rows INTEGER NOT NULL, cols INTEGER NOT NULL, data BLOB)""" CREATE_NAME_INDEX = "CREATE UNIQUE INDEX IF NOT EXISTS index_name ON images(name)" CREATE_ALL = '; '.join([CREATE_CAMERAS_TABLE, CREATE_IMAGES_TABLE, CREATE_KEYPOINTS_TABLE, CREATE_DESCRIPTORS_TABLE, CREATE_MATCHES_TABLE, CREATE_TWO_VIEW_GEOMETRIES_TABLE, CREATE_NAME_INDEX]) # Custom Functions: ################### def image_ids_to_pair_id(image_id1, image_id2): """ Converts two image ids into a single unique pair id. :param image_id1: image id 1 - int :param image_id2: image id 2 - int :return pair_id: a unique pair id - int """ if image_id1 > image_id2: image_id1, image_id2 = image_id2, image_id1 return image_id1 * MAX_IMAGE_ID + image_id2 def pair_id_to_image_ids(pair_id): """ Converts a single pair id into two image ids. :param pair_id: image pair id - int :return image_id1: image id 1 - int :return image_id2: image id 2 - int """ image_id2 = pair_id % MAX_IMAGE_ID image_id1 = (pair_id - image_id2) / MAX_IMAGE_ID return image_id1, image_id2 def array_to_blob(array): """ Converts an array of numbers into raw data bytes. :param array: an array of numbers - np.array :return blob: a bytes representation of the array - bytes """ return array.tostring() def blob_to_array(blob, dtype, shape=(-1,)): """ Converts a raw data bytes representation into an array. :param blob: row data bytes representation - bytes :param dtype: data type for the array - str :param shape: data shape for the array - tuple :return array: an array of numbers - np.array """ return np.fromstring(blob, dtype=dtype).reshape(*shape) ######################################################################################################################## ########################### # 1. COLMAP Database Class: ########################### class COLMAPDatabase(sqlite3.Connection): @staticmethod def connect(database_path): return sqlite3.connect(database_path, factory=COLMAPDatabase) def __init__(self, *args, **kwargs): """ Initializes an SQL file of a COLMAP database format at input path. All database tables are created upfront (all are empty). """ super(COLMAPDatabase, self).__init__(*args, **kwargs) self.create_tables = lambda: self.executescript(CREATE_ALL) self.create_cameras_table = \ lambda: self.executescript(CREATE_CAMERAS_TABLE) self.create_descriptors_table = \ lambda: self.executescript(CREATE_DESCRIPTORS_TABLE) self.create_images_table = \ lambda: self.executescript(CREATE_IMAGES_TABLE) self.create_two_view_geometries_table = \ lambda: self.executescript(CREATE_TWO_VIEW_GEOMETRIES_TABLE) self.create_keypoints_table = \ lambda: self.executescript(CREATE_KEYPOINTS_TABLE) self.create_matches_table = \ lambda: self.executescript(CREATE_MATCHES_TABLE) self.create_name_index = lambda: self.executescript(CREATE_NAME_INDEX) def add_camera(self, model, width, height, params, prior_focal_length=False, camera_id=None): """ Add camera model to database. :param model: camera model to use out of [0-SIMPLE_PINHOLE, 1-PINHOLE, 2-SIMPLE_RADIAL...] - int :param width: frame width for current camera - int :param height: frame height for current camera - int :param params: camera parameters (amount depends on model) - tuple :param prior_focal_length: known camera focal length - float :param camera_id: unique camera id, if not specified, a new id is created - int :return: camera_id: the unique camera id - int """ params = np.asarray(params, np.float64) cursor = self.execute( "INSERT INTO cameras VALUES (?, ?, ?, ?, ?, ?)", (camera_id, model, width, height, array_to_blob(params), prior_focal_length)) return cursor.lastrowid def add_image(self, name, camera_id, prior_q=np.zeros(4), prior_t=np.zeros(3), image_id=None): """ Add camera model to database. :param name: path to image in image folder - str :param camera_id: the unique camera id - int :param prior_q: image's camera coordinate system quaternion (Qw, Qx, Qy, Qz) - np.array :param prior_t: image's camera coordinate system translation vector (Tx, Ty, Tz) - np.array :param image_id: unique image id, if not specified, a new id is created - int :return: image_id: the unique image id - int """ cursor = self.execute( "INSERT INTO images VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (image_id, name, camera_id, prior_q[0], prior_q[1], prior_q[2], prior_q[3], prior_t[0], prior_t[1], prior_t[2])) return cursor.lastrowid def add_keypoints(self, image_id, keypoints): """ Add keypoints to an image. :param image_id: the unique image id - int :param keypoints: a list of keypoints, each is [x,y,scale,angle] - list of lists """ assert(len(keypoints.shape) == 2) assert(keypoints.shape[1] in [2, 4, 6]) keypoints = np.asarray(keypoints, np.float32) self.execute( "INSERT INTO keypoints VALUES (?, ?, ?, ?)", (image_id,) + keypoints.shape + (array_to_blob(keypoints),)) def add_descriptors(self, image_id, descriptors): """ Add keypoint descriptors to an image. :param image_id: the unique image id - int :param descriptors: a list of descriptors, each of length 128 - list of np.arrays """ descriptors = np.ascontiguousarray(descriptors, np.uint8) self.execute( "INSERT INTO descriptors VALUES (?, ?, ?, ?)", (image_id,) + descriptors.shape + (array_to_blob(descriptors),)) def add_matches(self, image_id1, image_id2, matches): """ Add keypoints matches between a pair of images. :param image_id1: image id 1 - int :param image_id2: image id 2 - int :param matches: an array of feature matches of size (n,2) - np.array """ assert(len(matches.shape) == 2) assert(matches.shape[1] == 2) if image_id1 > image_id2: matches = matches[:,::-1] pair_id = image_ids_to_pair_id(image_id1, image_id2) matches = np.asarray(matches, np.uint32) self.execute( "INSERT INTO matches VALUES (?, ?, ?, ?)", (pair_id,) + matches.shape + (array_to_blob(matches),)) def add_two_view_geometry(self, image_id1, image_id2, matches, F=np.eye(3), E=np.eye(3), H=np.eye(3), config=2): """ Add known geometrical transformation between a pair of images. :param image_id1: image id 1 - int :param image_id2: image id 2 - int :param matches: an array of feature matches of size (n,2) - np.array :param F: Fundamental matrix of size (3,3) - np.array :param E: Essential matrix of size (3,3) - np.array :param H: Homography matrix of size (3,3) - np.array :param config: configuration of two-view geometry [1-DEGENERATE, 2-CALIBRATED, 3-UNCALIBRATED...] - int """ assert(len(matches.shape) == 2) assert(matches.shape[1] == 2) if image_id1 > image_id2: matches = matches[:,::-1] pair_id = image_ids_to_pair_id(image_id1, image_id2) matches = np.asarray(matches, np.uint32) F = np.asarray(F, dtype=np.float64) E = np.asarray(E, dtype=np.float64) H = np.asarray(H, dtype=np.float64) self.execute( "INSERT INTO two_view_geometries VALUES (?, ?, ?, ?, ?, ?, ?, ?)", (pair_id,) + matches.shape + (array_to_blob(matches), config, array_to_blob(F), array_to_blob(E), array_to_blob(H))) ######################################################################################################################## ################### # 2. Example Usage: ################### def example_usage(): import os import argparse parser = argparse.ArgumentParser() parser.add_argument("--database_path", default="database.db") args = parser.parse_args() if os.path.exists(args.database_path): print("ERROR: database path already exists -- will not modify it.") return # Open the database: db = COLMAPDatabase.connect(args.database_path) # For convenience, create all tables upfront: db.create_tables() # Create dummy cameras: model1, width1, height1, params1 = 0, 1024, 768, np.array((1024., 512., 384.)) model2, width2, height2, params2 = 2, 1024, 768, np.array((1024., 512., 384., 0.1)) camera_id1 = db.add_camera(model1, width1, height1, params1) camera_id2 = db.add_camera(model2, width2, height2, params2) # Create dummy images: image_id1 = db.add_image("image1.png", camera_id1) image_id2 = db.add_image("image2.png", camera_id1) image_id3 = db.add_image("image3.png", camera_id2) image_id4 = db.add_image("image4.png", camera_id2) # Create dummy keypoints. # Note that COLMAP supports: # - 2D keypoints: (x, y) # - 4D keypoints: (x, y, scale, angle) # - 6D affine keypoints: (x, y, a_11, a_12, a_21, a_22) num_keypoints = 1000 keypoints1 = np.random.rand(num_keypoints, 2) * (width1, height1) keypoints2 = np.random.rand(num_keypoints, 2) * (width1, height1) keypoints3 = np.random.rand(num_keypoints, 2) * (width2, height2) keypoints4 = np.random.rand(num_keypoints, 2) * (width2, height2) db.add_keypoints(image_id1, keypoints1) db.add_keypoints(image_id2, keypoints2) db.add_keypoints(image_id3, keypoints3) db.add_keypoints(image_id4, keypoints4) # Create dummy matches: M = 50 matches12 = np.random.randint(num_keypoints, size=(M, 2)) matches23 = np.random.randint(num_keypoints, size=(M, 2)) matches34 = np.random.randint(num_keypoints, size=(M, 2)) db.add_matches(image_id1, image_id2, matches12) db.add_matches(image_id2, image_id3, matches23) db.add_matches(image_id3, image_id4,
v in self.params.items() if v is not None) ) # Verify that the name is unquoted correctly in the # secrets.on_get function prior to searching the repo. self.secret_repo.get_by_create_date \ .assert_called_once_with(self.keystone_id, offset_arg=u'{0}'.format(self.offset), limit_arg=u'{0}'.format(self.limit), suppress_exception=True, name=self.name, alg=None, mode=None, bits=0) self.assertIn('secrets', resp.namespace) secrets = resp.namespace['secrets'] # The result should be the unquoted name self.assertEqual(secrets[0]['name'], self.name) def test_should_get_list_secrets(self): resp = self.app.get( '/%s/secrets/' % self.keystone_id, dict((k, v) for k, v in self.params.items() if v is not None) ) self.secret_repo.get_by_create_date \ .assert_called_once_with(self.keystone_id, offset_arg=u'{0}'.format(self.offset), limit_arg=u'{0}'.format(self.limit), suppress_exception=True, name='', alg=None, mode=None, bits=0) self.assertTrue('previous' in resp.namespace) self.assertTrue('next' in resp.namespace) url_nav_next = self._create_url(self.keystone_id, self.offset + self.limit, self.limit) self.assertTrue(resp.body.count(url_nav_next) == 1) url_nav_prev = self._create_url(self.keystone_id, 0, self.limit) self.assertTrue(resp.body.count(url_nav_prev) == 1) url_hrefs = self._create_url(self.keystone_id) self.assertTrue(resp.body.count(url_hrefs) == (self.num_secrets + 2)) def test_response_should_include_total(self): resp = self.app.get( '/%s/secrets/' % self.keystone_id, dict((k, v) for k, v in self.params.items() if v is not None) ) self.assertIn('total', resp.namespace) self.assertEqual(resp.namespace['total'], self.total) def test_should_handle_no_secrets(self): del self.secrets[:] resp = self.app.get( '/%s/secrets/' % self.keystone_id, dict((k, v) for k, v in self.params.items() if v is not None) ) self.secret_repo.get_by_create_date \ .assert_called_once_with(self.keystone_id, offset_arg=u'{0}'.format(self.offset), limit_arg=u'{0}'.format(self.limit), suppress_exception=True, name='', alg=None, mode=None, bits=0) self.assertFalse('previous' in resp.namespace) self.assertFalse('next' in resp.namespace) def _create_url(self, keystone_id, offset_arg=None, limit_arg=None): if limit_arg: offset = int(offset_arg) limit = int(limit_arg) return '/{0}/secrets?limit={1}&offset={2}'.format(keystone_id, limit, offset) else: return '/{0}/secrets'.format(keystone_id) class WhenGettingPuttingOrDeletingSecretUsingSecretResource(FunctionalTest): def setUp(self): super( WhenGettingPuttingOrDeletingSecretUsingSecretResource, self ).setUp() self.app = webtest.TestApp(app.PecanAPI(self.root)) @property def root(self): self._init() class RootController(object): secrets = controllers.secrets.SecretsController( self.crypto_mgr, self.tenant_repo, self.secret_repo, self.tenant_secret_repo, self.datum_repo, self.kek_repo ) return RootController() def _init(self): self.tenant_id = 'tenantid1234' self.keystone_id = 'keystone1234' self.name = 'name1234' secret_id = "idsecret1" datum_id = "iddatum1" kek_id = "idkek1" self.secret_algorithm = "AES" self.secret_bit_length = 256 self.secret_mode = "CBC" self.kek_tenant = models.KEKDatum() self.kek_tenant.id = kek_id self.kek_tenant.active = True self.kek_tenant.bind_completed = False self.kek_tenant.kek_label = "kek_label" self.kek_tenant.plugin_name = utils.generate_fullname_for( ctp.TestCryptoPlugin()) self.datum = models.EncryptedDatum() self.datum.id = datum_id self.datum.secret_id = secret_id self.datum.kek_id = kek_id self.datum.kek_meta_tenant = self.kek_tenant self.datum.content_type = "text/plain" self.datum.cypher_text = "aaaa" # base64 value. self.secret = create_secret(id_ref=secret_id, name=self.name, algorithm=self.secret_algorithm, bit_length=self.secret_bit_length, mode=self.secret_mode, encrypted_datum=self.datum) self.tenant = models.Tenant() self.tenant.id = self.tenant_id self.keystone_id = self.keystone_id self.tenant_repo = mock.MagicMock() self.tenant_repo.get.return_value = self.tenant self.secret_repo = mock.MagicMock() self.secret_repo.get.return_value = self.secret self.secret_repo.delete_entity_by_id.return_value = None self.tenant_secret_repo = mock.MagicMock() self.datum_repo = mock.MagicMock() self.datum_repo.create_from.return_value = None self.kek_repo = mock.MagicMock() self.conf = mock.MagicMock() self.conf.crypto.namespace = 'barbican.test.crypto.plugin' self.conf.crypto.enabled_crypto_plugins = ['test_crypto'] self.crypto_mgr = em.CryptoExtensionManager(conf=self.conf) def test_should_get_secret_as_json(self): resp = self.app.get( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), headers={'Accept': 'application/json', 'Accept-Encoding': 'gzip'} ) self.secret_repo \ .get.assert_called_once_with(entity_id=self.secret.id, keystone_id=self.keystone_id, suppress_exception=True) self.assertEqual(resp.status_int, 200) self.assertNotIn('content_encodings', resp.namespace) self.assertIn('content_types', resp.namespace) self.assertIn(self.datum.content_type, resp.namespace['content_types'].itervalues()) self.assertNotIn('mime_type', resp.namespace) def test_should_get_secret_as_plain(self): resp = self.app.get( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), headers={'Accept': 'text/plain'} ) self.secret_repo \ .get.assert_called_once_with(entity_id=self.secret.id, keystone_id=self.keystone_id, suppress_exception=True) self.assertEqual(resp.status_int, 200) self.assertIsNotNone(resp.body) def test_should_get_secret_meta_for_binary(self): self.datum.content_type = "application/octet-stream" self.datum.cypher_text = 'aaaa' resp = self.app.get( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), headers={'Accept': 'application/json', 'Accept-Encoding': 'gzip'} ) self.secret_repo \ .get.assert_called_once_with(entity_id=self.secret.id, keystone_id=self.keystone_id, suppress_exception=True) self.assertEqual(resp.status_int, 200) self.assertIsNotNone(resp.namespace) self.assertIn('content_types', resp.namespace) self.assertIn(self.datum.content_type, resp.namespace['content_types'].itervalues()) def test_should_get_secret_as_binary(self): self.datum.content_type = "application/octet-stream" self.datum.cypher_text = 'aaaa' resp = self.app.get( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), headers={ 'Accept': 'application/octet-stream', 'Accept-Encoding': 'gzip' } ) self.assertEqual(resp.body, 'unencrypted_data') def test_should_throw_exception_for_get_when_secret_not_found(self): self.secret_repo.get.return_value = None resp = self.app.get( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), headers={'Accept': 'application/json', 'Accept-Encoding': 'gzip'}, expect_errors=True ) self.assertEqual(resp.status_int, 404) def test_should_throw_exception_for_get_when_accept_not_supported(self): resp = self.app.get( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), headers={'Accept': 'bogusaccept', 'Accept-Encoding': 'gzip'}, expect_errors=True ) self.assertEqual(resp.status_int, 406) def test_should_throw_exception_for_get_when_datum_not_available(self): self.secret.encrypted_data = [] resp = self.app.get( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), headers={'Accept': 'text/plain'}, expect_errors=True ) self.assertEqual(resp.status_int, 404) def test_should_put_secret_as_plain(self): self.secret.encrypted_data = [] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), 'plain text', headers={'Accept': 'text/plain', 'Content-Type': 'text/plain'}, ) self.assertEqual(resp.status_int, 200) args, kwargs = self.datum_repo.create_from.call_args datum = args[0] self.assertIsInstance(datum, models.EncryptedDatum) self.assertEqual(base64.b64encode('cypher_text'), datum.cypher_text) validate_datum(self, datum) def test_should_put_secret_as_binary(self): self.secret.encrypted_data = [] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), 'plain text', headers={ 'Accept': 'text/plain', 'Content-Type': 'application/octet-stream' }, ) self.assertEqual(resp.status_int, 200) args, kwargs = self.datum_repo.create_from.call_args datum = args[0] self.assertIsInstance(datum, models.EncryptedDatum) def test_should_put_encoded_secret_as_binary(self): self.secret.encrypted_data = [] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), base64.b64encode('plain text'), headers={ 'Accept': 'text/plain', 'Content-Type': 'application/octet-stream', 'Content-Encoding': 'base64' }, ) self.assertEqual(resp.status_int, 200) def test_should_fail_to_put_secret_with_unsupported_encoding(self): self.secret.encrypted_data = [] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), 'plain text', headers={ 'Accept': 'text/plain', 'Content-Type': 'application/octet-stream', 'Content-Encoding': 'bogusencoding' }, expect_errors=True ) self.assertEqual(resp.status_int, 400) def test_should_fail_put_secret_as_json(self): self.secret.encrypted_data = [] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), 'plain text', headers={ 'Accept': 'text/plain', 'Content-Type': 'application/json' }, expect_errors=True ) self.assertEqual(resp.status_int, 415) def test_should_fail_put_secret_not_found(self): # Force error, due to secret not found. self.secret_repo.get.return_value = None self.secret.encrypted_data = [] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), 'plain text', headers={'Accept': 'text/plain', 'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(resp.status_int, 404) def test_should_fail_put_secret_no_payload(self): self.secret.encrypted_data = [] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), # response.body = None headers={'Accept': 'text/plain', 'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(resp.status_int, 400) def test_should_fail_put_secret_with_existing_datum(self): # Force error due to secret already having data self.secret.encrypted_data = [self.datum] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), 'plain text', headers={'Accept': 'text/plain', 'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(resp.status_int, 409) def test_should_fail_due_to_empty_payload(self): self.secret.encrypted_data = [] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), '', headers={'Accept': 'text/plain', 'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(resp.status_int, 400) def test_should_fail_due_to_plain_text_too_large(self): big_text = ''.join(['A' for x in xrange( 2 * validators.DEFAULT_MAX_SECRET_BYTES)]) self.secret.encrypted_data = [] resp = self.app.put( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), big_text, headers={'Accept': 'text/plain', 'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(resp.status_int, 413) def test_should_delete_secret(self): self.app.delete( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id) ) self.secret_repo.delete_entity_by_id \ .assert_called_once_with(entity_id=self.secret.id, keystone_id=self.keystone_id) def test_should_throw_exception_for_delete_when_secret_not_found(self): self.secret_repo.delete_entity_by_id.side_effect = excep.NotFound( "Test not found exception") resp = self.app.delete( '/%s/secrets/%s/' % (self.keystone_id, self.secret.id), expect_errors=True ) self.assertEqual(resp.status_int, 404) class WhenCreatingOrdersUsingOrdersResource(FunctionalTest): def setUp(self): super( WhenCreatingOrdersUsingOrdersResource, self ).setUp() self.app = webtest.TestApp(app.PecanAPI(self.root)) @property def root(self): self._init() class RootController(object): orders = controllers.orders.OrdersController(self.tenant_repo, self.order_repo, self.queue_resource) return RootController() def _init(self): self.secret_name = 'name' self.secret_payload_content_type = 'application/octet-stream' self.secret_algorithm = "aes" self.secret_bit_length = 128 self.secret_mode = "cbc" self.tenant_internal_id = 'tenantid1234' self.tenant_keystone_id = 'keystoneid1234' self.tenant = models.Tenant() self.tenant.id = self.tenant_internal_id self.tenant.keystone_id = self.tenant_keystone_id self.tenant_repo = mock.MagicMock() self.tenant_repo.get.return_value = self.tenant self.order_repo = mock.MagicMock() self.order_repo.create_from.return_value = None self.queue_resource = mock.MagicMock() self.queue_resource.process_order.return_value = None self.order_req = { 'secret': { 'name': self.secret_name, 'payload_content_type': self.secret_payload_content_type, 'algorithm': self.secret_algorithm, 'bit_length': self.secret_bit_length, 'mode': self.secret_mode } } def test_should_add_new_order(self): resp = self.app.post_json( '/%s/orders/' % self.tenant_keystone_id, self.order_req ) self.assertEqual(resp.status_int, 202) self.queue_resource.process_order \ .assert_called_once_with(order_id=None, keystone_id=self.tenant_keystone_id) args, kwargs = self.order_repo.create_from.call_args order = args[0] self.assertIsInstance(order, models.Order) def test_should_fail_add_new_order_no_secret(self): resp = self.app.post_json( '/%s/orders/' % self.tenant_keystone_id, {}, expect_errors=True ) self.assertEqual(resp.status_int, 400) def test_should_fail_add_new_order_bad_json(self): resp = self.app.post( '/%s/orders/' % self.tenant_keystone_id, '', expect_errors=True ) self.assertEqual(resp.status_int, 400) class WhenGettingOrdersListUsingOrdersResource(FunctionalTest): def setUp(self): super( WhenGettingOrdersListUsingOrdersResource, self ).setUp() self.app = webtest.TestApp(app.PecanAPI(self.root)) @property def root(self): self._init() class RootController(object): orders = controllers.orders.OrdersController(self.tenant_repo, self.order_repo, self.queue_resource) return RootController() def _init(self): self.tenant_id = 'tenant1234' self.keystone_id = 'keystoneid1234' self.name = 'name1234' self.mime_type = 'text/plain' self.secret_algorithm = "algo" self.secret_bit_length = 512 self.secret_mode = "cytype" self.params = {'offset': 2, 'limit': 2} self.num_orders = 10 self.offset = 2 self.limit = 2 order_params = {'name': self.name, 'algorithm': self.secret_algorithm, 'bit_length': self.secret_bit_length, 'mode': self.secret_mode} self.orders = [create_order(id_ref='id' + str(id), **order_params) for id in xrange(self.num_orders)] self.total = len(self.orders) self.order_repo = mock.MagicMock() self.order_repo.get_by_create_date.return_value = (self.orders, self.offset, self.limit, self.total) self.tenant_repo = mock.MagicMock() self.queue_resource = mock.MagicMock() self.queue_resource.process_order.return_value = None self.params = { 'offset': self.offset, 'limit': self.limit } def test_should_get_list_orders(self): resp = self.app.get('/%s/orders/' % self.keystone_id, self.params) self.order_repo.get_by_create_date \ .assert_called_once_with(self.keystone_id, offset_arg=u'{0}'.format(self.offset), limit_arg=u'{0}'.format(self.limit), suppress_exception=True) self.assertTrue('previous' in resp.namespace) self.assertTrue('next' in resp.namespace) url_nav_next = self._create_url(self.keystone_id, self.offset + self.limit, self.limit) self.assertTrue(resp.body.count(url_nav_next) == 1) url_nav_prev = self._create_url(self.keystone_id, 0, self.limit) self.assertTrue(resp.body.count(url_nav_prev) == 1) url_hrefs = self._create_url(self.keystone_id) self.assertTrue(resp.body.count(url_hrefs) == (self.num_orders + 2)) def test_response_should_include_total(self): resp = self.app.get('/%s/orders/' % self.keystone_id, self.params) self.assertIn('total', resp.namespace) self.assertEqual(resp.namespace['total'], self.total) def test_should_handle_no_orders(self): del self.orders[:] resp = self.app.get('/%s/orders/' % self.keystone_id, self.params) self.order_repo.get_by_create_date \ .assert_called_once_with(self.keystone_id, offset_arg=u'{0}'.format(self.offset), limit_arg=u'{0}'.format(self.limit), suppress_exception=True) self.assertFalse('previous' in resp.namespace) self.assertFalse('next' in resp.namespace) def _create_url(self, keystone_id, offset_arg=None, limit_arg=None): if limit_arg: offset = int(offset_arg) limit = int(limit_arg) return '/{0}/orders?limit={1}&offset={2}'.format(keystone_id, limit, offset) else: return '/{0}/orders'.format(self.keystone_id) class WhenGettingOrDeletingOrderUsingOrderResource(FunctionalTest): def setUp(self): super( WhenGettingOrDeletingOrderUsingOrderResource, self ).setUp() self.app = webtest.TestApp(app.PecanAPI(self.root)) @property def root(self): self._init() class RootController(object): orders = controllers.orders.OrdersController(self.tenant_repo, self.order_repo, self.queue_resource) return RootController() def _init(self): self.tenant_keystone_id = 'keystoneid1234' self.requestor = 'requestor1234' self.order = create_order(id_ref="id1", name="name") self.order_repo = mock.MagicMock() self.order_repo.get.return_value = self.order self.order_repo.delete_entity_by_id.return_value = None self.tenant_repo = mock.MagicMock() self.queue_resource = mock.MagicMock() def test_should_get_order(self): self.app.get('/%s/orders/%s/' % (self.tenant_keystone_id, self.order.id)) self.order_repo.get \ .assert_called_once_with(entity_id=self.order.id, keystone_id=self.tenant_keystone_id, suppress_exception=True) def test_should_delete_order(self): self.app.delete('/%s/orders/%s/' % (self.tenant_keystone_id, self.order.id)) self.order_repo.delete_entity_by_id \ .assert_called_once_with(entity_id=self.order.id, keystone_id=self.tenant_keystone_id) def test_should_throw_exception_for_get_when_order_not_found(self): self.order_repo.get.return_value = None resp = self.app.get( '/%s/orders/%s/' % (self.tenant_keystone_id, self.order.id), expect_errors=True ) self.assertEqual(resp.status_int, 404) def test_should_throw_exception_for_delete_when_order_not_found(self): self.order_repo.delete_entity_by_id.side_effect = excep.NotFound( "Test not found exception") resp = self.app.delete( '/%s/orders/%s/' %
from __future__ import print_function, absolute_import, division from contextlib import contextmanager import numpy as np import scipy.sparse as ss from numba import cuda from .binding import (cuSparse, CUSPARSE_INDEX_BASE_ZERO, CUSPARSE_INDEX_BASE_ONE) dtype_to_char = { np.dtype(np.float32): 'S', np.dtype(np.float64): 'D', np.dtype(np.complex64): 'C', np.dtype(np.complex128): 'Z', } def _sentry_ndim(ndim, **kws): for k, a in kws.items(): if a.ndim != ndim: raise ValueError("%s.ndim must be %dD" % (k, ndim)) def _sentry_dtype(dtype, **kws): for k, a in kws.items(): if a.dtype != dtype: raise TypeError("%s.dtype is not %s" % (k, dtype)) @contextmanager def _readonly(*arys): ds = [] for a in arys: dmem, _ = cuda._auto_device(a) ds.append(dmem) yield ds @contextmanager def _readwrite(*arys): ds = [] ws = [] for a in arys: dmem, conv = cuda._auto_device(a) ds.append(dmem) if conv: ws.append((a, dmem)) yield ds for a, d in ws: d.copy_to_host(a) class Sparse(object): """All cuSPARSE functions are available under the Sparse object. :param idxbase: The base for indexing, either 0 or 1. Optional, defaults to 0. """ @cuda.require_context def __init__(self, idxbase=0): """ Args ---- - idxbase int Index base. Must be 0 or 1 """ if idxbase not in (0, 1): raise ValueError("Invalid index base") self.api = cuSparse() self.idxbase = (CUSPARSE_INDEX_BASE_ZERO, CUSPARSE_INDEX_BASE_ONE)[idxbase] @property def stream(self): return self.api.stream @stream.setter def stream(self, stream): self.api.stream = stream def _get_api(self, fname, dtype): ch = dtype_to_char[np.dtype(dtype)] fn = "%s%s" % (ch, fname) return getattr(self.api, fn) def matdescr(self, indexbase=None, diagtype='N', fillmode='L', matrixtype='G'): descr = self.api.matdescr() descr.indexbase = self.idxbase if indexbase is None else indexbase descr.diagtype = diagtype descr.fillmode = fillmode descr.matrixtype = matrixtype return descr # ------------------------------------------------------------------------ # Level 1 API def axpyi(self, alpha, xVal, xInd, y): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-axpyi """ _sentry_ndim(1, xVal=xVal, xInd=xVal, y=y) _sentry_dtype(np.int32, xInd=xInd) _sentry_dtype(xVal.dtype, y=y) fn = self._get_api("axpyi", xVal.dtype) nnz = xVal.size with _readonly(xVal, xInd) as [dxval, dxind]: with _readwrite(y) as [dy]: fn(nnz=nnz, alpha=alpha, xVal=dxval, xInd=dxind, y=dy, idxBase=self.idxbase) return y def doti(self, xVal, xInd, y): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-doti """ _sentry_ndim(1, xVal=xVal, xInd=xInd, y=y) _sentry_dtype(np.int32, xInd=xInd) _sentry_dtype(xVal.dtype, y=y) fn = self._get_api("doti", xVal.dtype) nnz = xVal.size with _readonly(xVal, xInd) as [dxval, dxind]: with _readwrite(y) as [dy]: result = fn(nnz=nnz, xVal=dxval, xInd=dxind, y=dy, idxBase=self.idxbase) return result def dotci(self, xVal, xInd, y): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-dotci """ _sentry_ndim(1, xVal=xVal, xInd=xInd, y=y) _sentry_dtype(np.int32, xInd=xInd) _sentry_dtype(xVal.dtype, y=y) fn = self._get_api("dotci", xVal.dtype) nnz = xVal.size with _readonly(xVal, xInd) as [dxval, dxind]: with _readwrite(y) as [dy]: result = fn(nnz=nnz, xVal=dxval, xInd=dxind, y=dy, idxBase=self.idxbase) return result def gthr(self, y, xVal, xInd): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-gthr """ _sentry_ndim(1, xVal=xVal, xInd=xInd, y=y) _sentry_dtype(np.int32, xInd=xInd) _sentry_dtype(xVal.dtype, y=y) fn = self._get_api("gthr", xVal.dtype) nnz = xVal.size with _readonly(y, xInd) as [dy, dxind]: with _readwrite(xVal) as [dxval]: fn(nnz=nnz, xVal=dxval, xInd=dxind, y=dy, idxBase=self.idxbase) def gthrz(self, y, xVal, xInd): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-gthrz """ _sentry_ndim(1, xVal=xVal, xInd=xInd, y=y) _sentry_dtype(np.int32, xInd=xInd) _sentry_dtype(xVal.dtype, y=y) fn = self._get_api("gthrz", xVal.dtype) nnz = xVal.size with _readonly(xInd) as [dxind]: with _readwrite(y, xVal) as [dy, dxval]: fn(nnz=nnz, xVal=dxval, xInd=dxind, y=dy, idxBase=self.idxbase) def roti(self, xVal, xInd, y, c, s): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-roti """ _sentry_ndim(1, xVal=xVal, xInd=xInd, y=y) _sentry_dtype(np.int32, xInd=xInd) _sentry_dtype(xVal.dtype, y=y) fn = self._get_api("roti", xVal.dtype) nnz = xVal.size with _readonly(xInd) as [dxind]: with _readwrite(y, xVal) as [dy, dxval]: fn(nnz=nnz, xVal=dxval, xInd=dxind, y=dy, c=c, s=s, idxBase=self.idxbase) def sctr(self, xVal, xInd, y): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-sctr """ _sentry_ndim(1, xVal=xVal, xInd=xInd, y=y) _sentry_dtype(np.int32, xInd=xInd) _sentry_dtype(xVal.dtype, y=y) fn = self._get_api("sctr", xVal.dtype) nnz = xVal.size with _readonly(xVal, xInd) as [dxval, dxind]: with _readwrite(y) as [dy]: fn(nnz=nnz, xVal=dxval, xInd=dxind, y=dy, idxBase=self.idxbase) # ------------------------------------------------------------------------ # Level 2 API def bsrmv_matrix(self, dir, trans, alpha, descr, bsrmat, x, beta, y): bsrVal = bsrmat.data bsrRowPtr = bsrmat.indptr bsrColInd = bsrmat.indices nnzb = bsrColInd.size m, n = bsrmat.shape blockDim, blockDim1 = bsrmat.blocksize assert blockDim == blockDim1 mb = (m + blockDim - 1) // blockDim nb = (n + blockDim - 1) // blockDim self.bsrmv(dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y) def bsrmv(self, dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-bsrmv """ _sentry_ndim(1, x=x, y=y) _sentry_dtype(bsrVal.dtype, x=x, y=y) fn = self._get_api("bsrmv", bsrVal.dtype) with _readonly(bsrVal, bsrRowPtr, bsrColInd, x) \ as [dbsrVal, dbsrRowPtr, dbsrColInd, dx]: with _readwrite(y) as [dy]: fn(dirA=dir, transA=trans, mb=mb, nb=nb, nnzb=nnzb, alpha=alpha, descrA=descr, bsrValA=dbsrVal, bsrRowPtrA=dbsrRowPtr, bsrColIndA=dbsrColInd, blockDim=blockDim, x=dx, beta=beta, y=dy) def bsrxmv(self, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-bsrxmv """ _sentry_ndim(1, x=x, y=y) _sentry_dtype(bsrVal.dtype, x=x, y=y) fn = self._get_api("bsrxmv", bsrVal.dtype) with _readonly(bsrVal, bsrRowPtr, bsrColInd, bsrMaskPtr, bsrEndPtr, x) \ as [dbsrVal, dbsrRowPtr, dbsrColInd, dbsrMaskPtr, dbsrEndPtr, dx]: with _readwrite(y) as [dy]: fn(dirA=dir, transA=trans, sizeOfMask=sizeOfMask, mb=mb, nb=nb, nnzb=nnzb, alpha=alpha, descrA=descr, bsrValA=dbsrVal, bsrRowPtrA=dbsrRowPtr, bsrColIndA=dbsrColInd, bsrMaskPtrA=dbsrMaskPtr, bsrEndPtrA=dbsrEndPtr, blockDim=blockDim, x=dx, beta=beta, y=dy) def csrmv(self, trans, m, n, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, beta, y): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrmv """ _sentry_ndim(1, x=x, y=y) _sentry_dtype(csrVal.dtype, x=x, y=y) fn = self._get_api("csrmv", csrVal.dtype) with _readonly(csrVal, csrRowPtr, csrColInd, x) \ as [dcsrVal, dcsrRowPtr, dcsrColInd, dx]: with _readwrite(y) as [dy]: fn(transA=trans, m=m, n=n, nnz=nnz, alpha=alpha, descrA=descr, csrValA=dcsrVal, csrRowPtrA=dcsrRowPtr, csrColIndA=dcsrColInd, x=dx, beta=beta, y=dy) def csrsv_analysis(self, trans, m, nnz, descr, csrVal, csrRowPtr, csrColInd): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrsvanalysis Returns ------- SolveAnalysisInfo """ fn = self._get_api("csrsv_analysis", csrVal.dtype) info = self.api.solve_analysis_info() with _readonly(csrVal, csrRowPtr, csrColInd) \ as [dcsrVal, dcsrRowPtr, dcsrColInd]: fn(transA=trans, m=m, nnz=nnz, descrA=descr, csrValA=dcsrVal, csrRowPtrA=dcsrRowPtr, csrColIndA=dcsrColInd, info=info) return info def csrsv_solve(self, trans, m, alpha, descr, csrVal, csrRowPtr, csrColInd, info, x, y): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrsvsolve """ _sentry_ndim(1, x=x, y=y) _sentry_dtype(csrVal.dtype, x=x, y=y) fn = self._get_api("csrsv_solve", csrVal.dtype) with _readonly(csrVal, csrRowPtr, csrColInd, x) \ as [dcsrVal, dcsrRowPtr, dcsrColInd, dx]: with _readwrite(y) as [dy]: fn(transA=trans, m=m, alpha=alpha, descrA=descr, csrValA=dcsrVal, csrRowPtrA=dcsrRowPtr, csrColIndA=dcsrColInd, info=info, x=dx, y=dy) hybmv = NotImplemented hybmv_analysis = NotImplemented hybmv_solve = NotImplemented # ------------------------------------------------------------------------ # Level 3 API def csrmm(self, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrmm """ _sentry_dtype(csrValA.dtype, B=B, C=C) fn = self._get_api("csrmm", csrValA.dtype) with _readonly(csrValA, csrRowPtrA, csrColIndA, B) \ as [dcsrValA, dcsrRowPtrA, dcsrColIndA, dB]: with _readwrite(C) as [dC]: fn(transA=transA, m=m, n=n, k=k, nnz=nnz, alpha=alpha, descrA=descrA, csrValA=dcsrValA, csrRowPtrA=dcsrRowPtrA, csrColIndA=dcsrColIndA, B=dB, ldb=ldb, beta=beta, C=dC, ldc=ldc) def csrmm2(self, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrmm2 """ _sentry_dtype(csrValA.dtype, B=B, C=C) fn = self._get_api("csrmm2", csrValA.dtype) with _readonly(csrValA, csrRowPtrA, csrColIndA, B) \ as [dcsrValA, dcsrRowPtrA, dcsrColIndA, dB]: with _readwrite(C) as [dC]: fn(transa=transA, transb=transB, m=m, n=n, k=k, nnz=nnz, alpha=alpha, descrA=descrA, csrValA=dcsrValA, csrRowPtrA=dcsrRowPtrA, csrColIndA=dcsrColIndA, B=dB, ldb=ldb, beta=beta, C=dC, ldc=ldc) def csrsm_analysis(self, transA, m, nnz, descrA, csrValA, csrRowPtrA, csrColIndA): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrsmanalysis """ fn = self._get_api("csrsm_analysis", csrValA.dtype) info = self.api.solve_analysis_info() with _readonly(csrValA, csrRowPtrA, csrColIndA) \ as [dcsrValA, dcsrRowPtrA, dcsrColIndA]: fn(transA=transA, m=m, nnz=nnz, descrA=descrA, csrValA=dcsrValA, csrRowPtrA=dcsrRowPtrA, csrColIndA=dcsrColIndA, info=info) return info def csrsm_solve(self, transA, m, n, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, info, X, ldx, Y, ldy): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrsmsolve """ fn = self._get_api("csrsm_solve", csrValA.dtype) with _readonly(csrValA, csrRowPtrA, csrColIndA, X) \ as [dcsrValA, dcsrRowPtrA, dcsrColIndA, dX]: with _readwrite(Y) as [dY]: fn(transA=transA, m=m, n=n, alpha=alpha, descrA=descrA, csrValA=dcsrValA, csrRowPtrA=dcsrRowPtrA, csrColIndA=dcsrColIndA, info=info, x=dX, ldx=ldx, y=dY, ldy=ldy) # ------------------------------------------------------------------------ # Extra API def XcsrgeamNnz(self, m, n, descrA, nnzA, csrRowPtrA, csrColIndA, descrB, nnzB, csrRowPtrB, csrColIndB, descrC, csrRowPtrC): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrgeam Returns ------- int nnzC """ fn = self.api.XcsrgeamNnz with _readonly(csrRowPtrA, csrColIndA, csrRowPtrB, csrColIndB) \ as (dcsrRowPtrA, dcsrColIndA, dcsrRowPtrB, dcsrColIndB): with _readwrite(csrRowPtrC) as [dcsrRowPtrC]: nnzC = fn(m=m, n=n, descrA=descrA, nnzA=nnzA, csrRowPtrA=dcsrRowPtrA, csrColIndA=dcsrColIndA, descrB=descrB, nnzB=nnzB, csrRowPtrB=dcsrRowPtrB, csrColIndB=dcsrColIndB, descrC=descrC, csrRowPtrC=dcsrRowPtrC, nnzTotalDevHostPtr=0) return nnzC def csrgeam(self, m, n, alpha, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA, beta, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, descrC, csrValC, csrRowPtrC, csrColIndC): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrgeam """ fn = self._get_api("csrgeam", csrValA.dtype) with _readonly(csrValA, csrRowPtrA, csrColIndA, csrValB, csrRowPtrB, csrColIndB, csrRowPtrC) \ as [dcsrValA, dcsrRowPtrA, dcsrColIndA, dcsrValB, dcsrRowPtrB, dcsrColIndB, dcsrRowPtrC]: with _readwrite(csrValC, csrColIndC) as [dcsrValC, dcsrColIndC]: fn(m=m, n=n, alpha=alpha, descrA=descrA, nnzA=nnzA, csrValA=dcsrValA, csrRowPtrA=dcsrRowPtrA, csrColIndA=dcsrColIndA, csrValB=dcsrValB, descrB=descrB, nnzB=nnzB, beta=beta, csrRowPtrB=dcsrRowPtrB, csrColIndB=dcsrColIndB, descrC=descrC, csrValC=dcsrValC, csrRowPtrC=dcsrRowPtrC, csrColIndC=dcsrColIndC) def XcsrgemmNnz(self, transA, transB, m, n, k, descrA, nnzA, csrRowPtrA, csrColIndA, descrB, nnzB, csrRowPtrB, csrColIndB, descrC, csrRowPtrC): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrgemm Returns ------- int nnzC """ fn = self.api.XcsrgemmNnz with _readonly(csrRowPtrA, csrColIndA, csrRowPtrB, csrColIndB) \ as (dcsrRowPtrA, dcsrColIndA, dcsrRowPtrB, dcsrColIndB): with _readwrite(csrRowPtrC) as [dcsrRowPtrC]: nnzC = fn(transA=transA, transB=transB, k=k, m=m, n=n, descrA=descrA, nnzA=nnzA, csrRowPtrA=dcsrRowPtrA, csrColIndA=dcsrColIndA, descrB=descrB, nnzB=nnzB, csrRowPtrB=dcsrRowPtrB, csrColIndB=dcsrColIndB, descrC=descrC, csrRowPtrC=dcsrRowPtrC, nnzTotalDevHostPtr=0) return nnzC def csrgemm(self, transA, transB, m, n, k, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, descrC, csrValC, csrRowPtrC, csrColIndC): """http://docs.nvidia.com/cuda/cusparse/#cusparse-lt-t-gt-csrgemm """ fn = self._get_api("csrgemm", csrValA.dtype)
id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. !!! note in tensorflow's implementation, sparse gradients do not propagate through gather. Args: sp_tensor: params: A single tensor representing the complete embedding tensor, or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. sp_tensor (`SparseTensor`): N x M `SparseTensor` with the ids and weights where N is typically batch size and M is arbitrary. combiner: A string specifying the reduction op. Currently "mean", "sqrtn" and "sum" are supported. "sum" computes the weighted sum of the embedding results for each row. "mean" is the weighted sum divided by the total weight. "sqrtn" is the weighted sum divided by the square root of the sum of the squares of the weights. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. name (`str`): op name Returns: tensor (`Tensor`): dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by `sp_ids`, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. Raises: TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is neither `None` nor `SparseTensor`. ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}. """ if combiner is None: logging.warn("The default value of combiner will change from \"mean\" " "to \"sqrtn\" after 2016/11/01.") combiner = "mean" if combiner not in ("mean", "sqrtn", "sum"): raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'") if isinstance(params, PartitionedVariable): params = list(params) # Iterate to get the underlying Variables. if not isinstance(params, list): params = [params] if not isinstance(sp_tensor, tf.SparseTensor): raise TypeError("sp_ids must be SparseTensor") with tf.name_scope(name) as name: segment_ids = sp_tensor.indices[:, 0] if segment_ids.dtype != tf.int32: segment_ids = tf.cast(segment_ids, tf.int32) ids = sp_tensor.indices[:, -1] # ids, idx = tf.unique(ids) embeddings = tf.nn.embedding_lookup( params=params, ids=ids, max_norm=max_norm) # *** # this second lookup causes problems because sparse gradients don't propagate though gather # embeddings = embedding_lookup(embeddings, idx) # embeddings, _ = gather_dynamic(embeddings, idx) # *** weights = sp_tensor.values if weights.dtype != embeddings.dtype: weights = tf.cast(weights, embeddings.dtype) # Reshape weights to allow broadcast ones = tf.fill( tf.expand_dims(tf.rank(embeddings) - 1, 0), 1) bcast_weights_shape = tf.concat( [tf.shape(weights), ones], 0) orig_weights_shape = weights.get_shape() weights = tf.reshape(weights, bcast_weights_shape) # Set the weight shape, since after reshaping to bcast_weights_shape, # the shape becomes None. if embeddings.get_shape().ndims is not None: weights.set_shape(orig_weights_shape.concatenate( [1 for _ in range(embeddings.get_shape().ndims - 1)])) embeddings *= weights if combiner == "sum": embeddings = tf.math.segment_sum(embeddings, segment_ids, name=name) elif combiner == "mean": embeddings = tf.math.segment_sum(embeddings, segment_ids) weight_sum = tf.math.segment_sum(weights, segment_ids) embeddings = tf.math.divide_no_nan(embeddings, weight_sum, name=name) elif combiner == "sqrtn": embeddings = tf.math.segment_sum(embeddings, segment_ids) weights_squared = tf.math.pow(weights, 2) weight_sum = tf.math.segment_sum(weights_squared, segment_ids) weight_sum_sqrt = tf.math.sqrt(weight_sum) embeddings = tf.math.divide_no_nan(embeddings, weight_sum_sqrt, name=name) else: assert False, "Unrecognized combiner" return embeddings def sparse_overlap(sp_tensor1, sp_tensor2, name="sparse_overlap"): """sparse overlap Returns a `SparseTensor` where the indices of the overlapping indices in the two sparse tensors with the values of the first one. Args: sp_tensor1 (`SparseTensor`): a sparse tensor sp_tensor2 (`SparseTensor`): another sparse tensor name (`str`): name for sparse_overlap op Returns: sp_tensor (`SparseTensor`): sparse tensor with the overlapping indices and the values of `sp_tensor1` """ with tf.name_scope(name): ones1 = mx.sparse_ones(sp_tensor1.indices, sp_tensor1.dense_shape) ones2 = mx.sparse_ones(sp_tensor2.indices, sp_tensor2.dense_shape) index_union = tf.sparse.add(ones1, ones2) index_filter = tf.equal(index_union.values, 2.) zeros1 = sparse_zeros(index_union.indices, index_union.dense_shape, sp_tensor1.values.dtype) expand1 = tf.sparse.add(zeros1, sp_tensor1) filtered = tf.sparse.retain(expand1, index_filter) return filtered def sort_by_first(tensor1, tensor2, ascending=True, name="sort_by_first"): """sort_by_first Sorts two tensors. Sorts the second by the changes in the first sort Args: tensor1 (`Tensor`): tensor to determine the oder by which the second is sorted tensor2 (`Tensor`): tensor to be sorted according to the sorting of the first ascending (`Bool`): if True sorts by ascending order of value name (`str`): name of the op Returns: tensor1, tensor2 (`Tensor`,`Tensor`): sorted first tensor, second tensor sorted according to the indices of the first tensor sorting """ with tf.name_scope(name=name): tensor1 = as_tensor(tensor1) tensor2 = as_tensor(tensor2) sorted_tensor1, sorted_tensor1_indices = tf.nn.top_k(tensor1, k=tf.shape(tensor1)[-1]) if ascending: sorted_tensor1 = tf.reverse(sorted_tensor1, axis=[-1]) sorted_tensor1_indices = tf.reverse(sorted_tensor1_indices, axis=[-1]) # TODO not sure what the performance implication of this check is when converted to graph if len(tensor1.shape.as_list()) == 1: sorted_tensor1_indices = tf.expand_dims(sorted_tensor1_indices, 1) else: sorted_tensor1_indices = matrix_indices(sorted_tensor1_indices, sort_indices=False) sorted_values = tf.gather_nd(tensor2, sorted_tensor1_indices) sorted_values = tf.reshape(sorted_values, tf.shape(tensor2)) return sorted_tensor1, sorted_values def ranges(range_sizes, name="ranges"): """ ranges similar to concatenating multiple `tf.range` calls applied to each element of a given 1D tensor with range sizes. Example: ```python ranges([1,2,4]) [0,0,1,0,1,2,3] ``` the enums are `[0]`, `[0,1]`, `[0,1,2,3]` Args: range_sizes (`Tensor`): 1D tensor with range sizes name (`str`): ranges op name Returns: ranges (`Tensor`): a 1D `Tensor` with `tf.reduce_sum(range_sizes)` dimensions """ with tf.name_scope(name): range_sizes = tf.convert_to_tensor(range_sizes) tf.ensure_shape(range_sizes, tf.TensorShape([None])) tf.debugging.assert_greater(tf.shape(range_sizes)[0], 0, message="range_sizes cannot be empty") num_ranges = tf.shape(range_sizes)[0] # get maximum repeat length in x max_len = tf.math.reduce_max(range_sizes) x = tf.range(max_len) # tile it to the maximum repeat length [maxlen x maxlen] now x_repeat = tf.stack([num_ranges, 1], axis=0) x_tiled = tf.tile(tf.expand_dims(x, 0), x_repeat) # create a sequence mask using x # this will create a boolean matrix of shape [xlen, max_len] # where result[i,j] is true if j < x[i]. mask = tf.sequence_mask(range_sizes, max_len) # mask the elements based on the sequence mask return tf.boolean_mask(x_tiled, mask) def gather_sparse(sp_tensor, ids, name="gather_sparse"): """ gather_sparse gather rows from a sparse tensor by the given ids and returns a sparse tensor !!! warning gathering from a `SparseTensor` is inefficient Example: ```python gather_sparse(sp_tensor,[1,1,4]) ``` returns a `[3,sp_tensor.dense_shape[-1]]` `SparseTensor` Args: sp_tensor (`SparseTensor`): sparse tensor ids (`Tensor`): an int tensor with the ids of the rows to be returned name (`str`): on name Returns: sp_gathered (`SparseTensor`): a sparse tensor with the gathered rows. """ with tf.name_scope(name=name): ids = tf.cast(ids, tf.int64) ids = tf.reshape(ids, [-1]) # count columns and compute row coordinates sp_column_ones = sparse_ones(sp_tensor.indices, sp_tensor.dense_shape, dtype=tf.int64) col_count = tf.sparse.reduce_sum(sp_column_ones, axis=-1) # sparse_reduce_sum sets shape to unknown col_count.set_shape([sp_tensor.get_shape().as_list()[0]]) col_count_cs = tf.math.cumsum(col_count) row_start_coor = col_count_cs - col_count g_col_count = tf.gather(col_count, ids) g_row_start_coor = tf.gather(row_start_coor, ids) row_start_coor = tf.repeat(g_row_start_coor, g_col_count) # col_counts = repeat_each(g_col_count, g_col_count) offset = ranges(g_col_count) # use modular arithmetic to make sure we get incremental coordinates # gather_ids = row_start_coor + offset % col_counts gather_ids = row_start_coor + offset num_ids = tf.cast(tf.shape(ids)[0], tf.int64) new_rows = tf.repeat(tf.range(num_ids), g_col_count) sp_cols = sp_tensor.indices[:, -1] new_cols = tf.gather(sp_cols, gather_ids) new_indices = tf.stack([new_rows, new_cols], axis=-1) new_values = tf.gather(sp_tensor.values, gather_ids) new_shape = tf.concat([tf.expand_dims(tf.cast(num_ids, tf.int64), -1), sp_tensor.dense_shape[1:]], axis=-1) sp = tf.SparseTensor(new_indices, new_values, new_shape) return sp def grid_2d(shape, name="grid_2d"): """ creates a tensor with a grid 2d coordinates Args: shape (`Tensor`): an Tensor of tf.int32 with a 2D shape for the grid name (`str`): grid_2d op name Returns: grid_coordinates (`Tensor`): 2D tensor with grid coordinates """ shape = as_tensor(shape, tf.int32) with tf.name_scope(name): x = tf.range(shape[0]) y = tf.range(shape[1]) x = x[tf.newaxis, :, tf.newaxis] y = y[:, tf.newaxis, tf.newaxis] return tf.reshape(tf.concat([x + tf.zeros_like(y), tf.zeros_like(x) + y], axis=2), [-1, 2]) def sparse_tile(sp_tensor, num, name="sparse_tile"): """ Constructs a `SparseTensor` by replicating the input sparse tensor `num` times Args: sp_tensor (`SparseTensor`): a sparse input tensor to be tiled num (`int`): number of repetitions name (`str`): name for the op Returns: sp_tile (`SparseTensor`): result sparse tensor """ with tf.name_scope(name): sp_tensor = as_tensor(sp_tensor) values = tf.tile(sp_tensor.values, [num]) num = as_tensor(num, tf.int64) indices = tf.tile(sp_tensor.indices, [num, 1]) row_indices, col_indices = tf.unstack(indices, num=2, axis=-1) # fix row indices num_values = tf.shape(sp_tensor.values, out_type=tf.int64)[0] batch_size = tf.shape(sp_tensor, out_type=tf.int64)[0] # this is preferable to using dense
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This script saves bid and ask data for specified ETFs to files for each day during market open hours. It assumes the computer is at US East Coast Time. @author: mark """ import os import pandas as pd import numpy as np from itertools import product import streamlit as st from bokeh.plotting import figure from bokeh.models.tools import HoverTool from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet from streamlit_metrics import metric_row def display_method_to_choose_etfs(selected_method_choose_dates, all_etfs, etf_data, sl_obj): """ Generates various streamlit options for selecting which ETFs to display. Parameters ---------- selected_method_choose_dates : list of str Strings of the various methods of selecting ETFs. all_etfs : list of str List of all ETF tickers. etf_data : pd.DataFrame Dataframe containing bulk data about ETFs. sl_obj : streamlit Stremlit object to place the elements. Returns ------- selected_etfs : list of str List of str tickers chosen by users. """ selected_etfs = all_etfs if 'By volume traded' in selected_method_choose_dates: selection_data = etf_data['volume (shares/day)'] log_min = float(np.floor(np.log10(selection_data.min()))) log_max = float(np.ceil(np.log10(selection_data.max()))) min_vol, max_vol = sl_obj.slider('Average Volume (shares/day)', min_value=float(log_min), max_value=float(log_max), value=(float(log_min), float(log_max)), step=float(log_min - log_max) / 100, format='10^%.1f' ) selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol) selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index)) if 'By market cap' in selected_method_choose_dates: selection_data = etf_data['net assets (million USD)'] log_min = float(np.floor(np.log10(selection_data.min()))) log_max = float(np.ceil(np.log10(selection_data.max()))) min_vol, max_vol = sl_obj.slider('Market Cap as of 2021-02-21 (million USD)', min_value=float(log_min), max_value=float(log_max), value=(float(log_min), float(log_max)), step=float(log_min - log_max) / 100, format='10^%.1f' ) selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol) selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index)) if 'Only ESG ETFs' in selected_method_choose_dates: esg_etfs = etf_data[etf_data['esg'] == True].index selected_etfs = list(set(selected_etfs) & set(esg_etfs)) if 'choose specific ETFs' in selected_method_choose_dates: selected_etfs = sl_obj.multiselect('Which ETFs do you want to look at', list(selected_etfs), ['ESGV','VTI','BND', 'VCEB', 'VSGX']) return selected_etfs def get_averages(data, selected_dates, selected_etfs): """ Obtain average values of various ETFs across the trading day. Parameters ---------- data : pd.DataFrame data of various days and ETFs. selected_dates : list of str list of dates in format YYYY-MM-DD. selected_etfs : list of str list of ETF tickers. Returns ------- pd.Series Data frame of average values in ETFs at various times during tradiing day. """ potential_columns = product(selected_dates, selected_etfs) actual_columns = [x for x in potential_columns if x in data.columns] return data[actual_columns].T.groupby(level=['etf']).mean().T def add_trade_windows(p, t_new, t_old, ymax): """ Add trade windows to plot Parameters ---------- p : Bokeh figure Figure to add trading windows to. t_new : tuple of timestamps Starting and ending timestamp of the old trading window. t_old : tuple of timestamps Starting and ending timestamp of the new trading window. ymax : float Maxs value to extend trading windows. Returns ------- None. """ source = ColumnDataSource(dict(x=[t_old[0]+0.5*(t_old[1]-t_old[0]),t_new[0]+0.5*(t_new[1]-t_new[0])], y=[ymax-0.0002, ymax-0.0002 ], w=[t_old[1]-t_old[0], t_new[1]-t_new[0]], h =[2,2], desc=['Old', 'New'])) if ymax > 2: patch = {'h' : [ (0, ymax), (1, ymax) ],} source.patch(patch) boxes = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=0.1, line_width=0) boxes_select = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=.2, line_width=0) box_rend = p.add_glyph(source, boxes) box_rend.hover_glyph = boxes_select tooltips = [('trade window','@desc')] p.add_tools(HoverTool(tooltips=tooltips, renderers=[box_rend])) def format_plots(p, ymax=None): """ Format bokeh plots for quoted spreads across market times Parameters ---------- p : Bokeh figure plot Bokeh plot object to format ymax : TYPE, optional Max yaxis value. The default is None. Returns ------- None """ if ymax is None: num_formatter='0.00%' else: num_zeros = int(np.log10(1/ymax)-.4) num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%' p.yaxis.formatter = NumeralTickFormatter(format=num_formatter) p.xaxis.formatter = DatetimeTickFormatter(hours='%H:%M') p.xaxis.axis_label = 'Market Time' p.xgrid.grid_line_color = None p.ygrid.grid_line_color = None p.toolbar.autohide = True def make_multi_etf_plot(selected_etfs, selected_dates, t_new, t_old, quoted_spread): """ Make plot with multiple ETF averages Parameters ---------- selected_etfs : list of str List of ETF tickers selected_dates : list of str List of dates to obtain averages of. In format YYYY-MM-DD. t_new : tuple of timestamps Starting and ending timestamp of the old trading window. t_old : tuple of timestamps Starting and ending timestamp of the new trading window. quoted_spread : pd.DataFrame Quoted spread data for various times, days, and ETFs. Returns ------- p : Bokeh figure Plot of multiple ETF averages. """ t_all = t_new + t_old average_data = get_averages(quoted_spread, selected_dates, selected_etfs) p = figure(plot_width=400, plot_height=400, x_axis_type="datetime", toolbar_location='below', title='quoted Bid-Ask Spread for various ETFs', x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)), y_range=(0, average_data.max().max()+0.0001)) #trading windows add_trade_windows(p, t_new, t_old, average_data.max().max()) # etf lines renders = [] for etf in selected_etfs: renders.append(p.line(average_data.index, average_data[etf],# set visual properties for selected glyphs hover_color="firebrick", hover_alpha=1, # set visual properties for non-selected glyphs color="grey", alpha=0.5, name=etf)) tooltips = [('etf','$name'), ('time','$x{%H:%M}'), ('Bid-Ask spread', '$y{"0.00%"}')] formatters = { "$x": "datetime",} p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters)) format_plots(p, ymax=average_data.max().max()+0.0001) return p def make_single_etf_plot(selected_etf, selected_dates, t_new, t_old, quoted_spread, supress_hover_after= 10000): """ Plots data for a single ETF for multiple days. Parameters ---------- selected_etfs : list of str List of ETF tickers selected_dates : list of str List of dates to plot. In format YYYY-MM-DD. t_new : tuple of timestamps Starting and ending timestamp of the old trading window. t_old : tuple of timestamps Starting and ending timestamp of the new trading window. quoted_spread : pd.DataFrame Quoted spread data for various times, days, and ETFs. supress_hover_after : int, optional Do not show hover functionality if there are more than this number of days. The default is 10000. Returns ------- p : Bokeh figure Plot of single ETF over various days. """ t_all = t_new + t_old average_data = get_averages(quoted_spread, selected_dates, [selected_etf]) p = figure(plot_width=400, plot_height=400, x_axis_type="datetime", toolbar_location='below', title='Quoted spread for {}'.format(selected_etf), x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)), y_range=(0, average_data.max().max()+0.0001)) add_trade_windows(p, t_new, t_old, average_data.max().max()) # etf lines renders = [] if len(selected_dates) > 1: for date in selected_dates: try: render = p.line(quoted_spread.index, quoted_spread.loc[:,(date,selected_etf)],# set visual properties for selected glyphs hover_color="firebrick", hover_alpha=0.33, color="grey", alpha=0.25, name=date) except KeyError: continue if len(selected_dates) < supress_hover_after: renders.append(render) average_name = 'average' else: average_name = selected_dates[0] renders.append(p.line(average_data.index, average_data[selected_etf],# set visual properties for selected glyphs hover_color="firebrick", hover_alpha=0.75, color="black", alpha=0.5, name=average_name)) tooltips = [('date','$name'), ('time','$x{%H:%M}'), ('Bid-Ask spread', '$y{"0.00%"}')] formatters = { "$x": "datetime",} p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters)) format_plots(p) return p def make_bid_ask_plot(selected_etf, selected_date, t_new, t_old, directory): """ Plots bid and ask prices over one trading day for one ETF. Parameters ---------- selected_etf : str ETF ticker of data to show. selected_date : str Date of data to show. In format YYYY-MM-DD. t_new : tuple of timestamps Starting and ending timestamp of the old trading window. t_old : tuple of timestamps Starting and ending timestamp of the new trading window. directory : str Folder containing ETF bid and ask price data. File must be in format date_etf.csv. Returns ------- p : Bokeh figure Plot of bid and ask prices. """ data = pd.read_csv(os.path.join(directory, '{}_{}.csv'.format(selected_date, selected_etf)), index_col=0) basetime = pd.to_datetime('2021-01-01') + pd.Timedelta(hours=9, minutes=30) timedeltas = pd.TimedeltaIndex([pd.Timedelta(seconds=x) for x in data.index]) data.index = timedeltas + basetime t_all = t_new + t_old bid = data.bid ask = data.ask p = figure(plot_width=400, plot_height=400, x_axis_type="datetime", toolbar_location='below', title='Bid & ask prices for {} on {}'.format(selected_etf, selected_date), x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)), y_range=(min(bid.min(),ask.min())-0.2, max(bid.max(),ask.max())+0.2)) add_trade_windows(p, t_new, t_old, max(bid.max(),ask.max())) renders = [] renders.append(p.line(bid.index, bid.values,# set visual properties for selected glyphs hover_color="blue", hover_alpha=1, color="blue", alpha=.5, name='bid')) renders.append(p.line(ask.index, ask.values,# set visual properties for selected glyphs hover_color="firebrick", hover_alpha=1, color="firebrick", alpha=0.5, name='ask')) tooltips = [('type','$name'), ('time','$x{%H:%M}'), ('price', '$y{"$0.00"}')] formatters = { "$x": "datetime",} p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters)) format_plots(p) p.yaxis.formatter = NumeralTickFormatter(format="$0.00") return p def make_relative_fee_amount(selected_ratios, t_new_text = ''): """ Generate a bar plot for the ratio of quoted spread to expense ratio. Parameters ---------- selected_ratios : pd.Series Data of ratio of quoted spread to expense ratio. t_new_text : str Time range to place in title of plot. Returns ------- p : Bokeh figure Produced plot. """ p = figure(plot_width=400, plot_height=400, x_axis_label="ETFs", x_minor_ticks=len(selected_ratios), toolbar_location='below', title='Ratio of quoted spread to expense ratio {}'.format(t_new_text)) source = ColumnDataSource(dict(x=range(len(selected_ratios)), top=selected_ratios.values, desc=selected_ratios.index,)) glyph = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='grey', line_width=0, fill_alpha=0.5) glyph_hover = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='firebrick', line_width=0, fill_alpha=1) rend = p.add_glyph(source, glyph) rend.hover_glyph = glyph_hover labels = LabelSet(x='x', level='glyph', source=source, render_mode='canvas') tooltips = [('etf','@desc'), ('ratio','@top')] p.add_tools(HoverTool(tooltips=tooltips, renderers=[rend])) num_zeros = int(np.log10(1/selected_ratios.max())-.4) num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%' p.yaxis.formatter = NumeralTickFormatter(format=num_formatter) p.xgrid.grid_line_color = None p.ygrid.grid_line_color =
in conjunction with `access_control_translation` override configuration. """ return pulumi.get(self, "account") @property @pulumi.getter(name="encryptionConfiguration") def encryption_configuration(self) -> Optional['outputs.BucketReplicationConfigRuleDestinationEncryptionConfiguration']: """ A configuration block that provides information about encryption documented below. If `source_selection_criteria` is specified, you must specify this element. """ return pulumi.get(self, "encryption_configuration") @property @pulumi.getter def metrics(self) -> Optional['outputs.BucketReplicationConfigRuleDestinationMetrics']: """ A configuration block that specifies replication metrics-related settings enabling replication metrics and events documented below. """ return pulumi.get(self, "metrics") @property @pulumi.getter(name="replicationTime") def replication_time(self) -> Optional['outputs.BucketReplicationConfigRuleDestinationReplicationTime']: """ A configuration block that specifies S3 Replication Time Control (S3 RTC), including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated documented below. Replication Time Control must be used in conjunction with `metrics`. """ return pulumi.get(self, "replication_time") @property @pulumi.getter(name="storageClass") def storage_class(self) -> Optional[str]: """ The [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Destination.html#AmazonS3-Type-Destination-StorageClass) used to store the object. By default, Amazon S3 uses the storage class of the source object to create the object replica. """ return pulumi.get(self, "storage_class") @pulumi.output_type class BucketReplicationConfigRuleDestinationAccessControlTranslation(dict): def __init__(__self__, *, owner: str): """ :param str owner: Specifies the replica ownership. For default and valid values, see [PUT bucket replication](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) in the Amazon S3 API Reference. Valid values: `Destination`. """ pulumi.set(__self__, "owner", owner) @property @pulumi.getter def owner(self) -> str: """ Specifies the replica ownership. For default and valid values, see [PUT bucket replication](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) in the Amazon S3 API Reference. Valid values: `Destination`. """ return pulumi.get(self, "owner") @pulumi.output_type class BucketReplicationConfigRuleDestinationEncryptionConfiguration(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "replicaKmsKeyId": suggest = "replica_kms_key_id" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketReplicationConfigRuleDestinationEncryptionConfiguration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketReplicationConfigRuleDestinationEncryptionConfiguration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketReplicationConfigRuleDestinationEncryptionConfiguration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, replica_kms_key_id: str): """ :param str replica_kms_key_id: The ID (Key ARN or Alias ARN) of the customer managed AWS KMS key stored in AWS Key Management Service (KMS) for the destination bucket. """ pulumi.set(__self__, "replica_kms_key_id", replica_kms_key_id) @property @pulumi.getter(name="replicaKmsKeyId") def replica_kms_key_id(self) -> str: """ The ID (Key ARN or Alias ARN) of the customer managed AWS KMS key stored in AWS Key Management Service (KMS) for the destination bucket. """ return pulumi.get(self, "replica_kms_key_id") @pulumi.output_type class BucketReplicationConfigRuleDestinationMetrics(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "eventThreshold": suggest = "event_threshold" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketReplicationConfigRuleDestinationMetrics. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketReplicationConfigRuleDestinationMetrics.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketReplicationConfigRuleDestinationMetrics.__key_warning(key) return super().get(key, default) def __init__(__self__, *, status: str, event_threshold: Optional['outputs.BucketReplicationConfigRuleDestinationMetricsEventThreshold'] = None): """ :param str status: The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. :param 'BucketReplicationConfigRuleDestinationMetricsEventThresholdArgs' event_threshold: A configuration block that specifies the time threshold for emitting the `s3:Replication:OperationMissedThreshold` event documented below. """ pulumi.set(__self__, "status", status) if event_threshold is not None: pulumi.set(__self__, "event_threshold", event_threshold) @property @pulumi.getter def status(self) -> str: """ The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. """ return pulumi.get(self, "status") @property @pulumi.getter(name="eventThreshold") def event_threshold(self) -> Optional['outputs.BucketReplicationConfigRuleDestinationMetricsEventThreshold']: """ A configuration block that specifies the time threshold for emitting the `s3:Replication:OperationMissedThreshold` event documented below. """ return pulumi.get(self, "event_threshold") @pulumi.output_type class BucketReplicationConfigRuleDestinationMetricsEventThreshold(dict): def __init__(__self__, *, minutes: int): """ :param int minutes: Time in minutes. Valid values: `15`. """ pulumi.set(__self__, "minutes", minutes) @property @pulumi.getter def minutes(self) -> int: """ Time in minutes. Valid values: `15`. """ return pulumi.get(self, "minutes") @pulumi.output_type class BucketReplicationConfigRuleDestinationReplicationTime(dict): def __init__(__self__, *, status: str, time: 'outputs.BucketReplicationConfigRuleDestinationReplicationTimeTime'): """ :param str status: The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. :param 'BucketReplicationConfigRuleDestinationReplicationTimeTimeArgs' time: A configuration block specifying the time by which replication should be complete for all objects and operations on objects documented below. """ pulumi.set(__self__, "status", status) pulumi.set(__self__, "time", time) @property @pulumi.getter def status(self) -> str: """ The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. """ return pulumi.get(self, "status") @property @pulumi.getter def time(self) -> 'outputs.BucketReplicationConfigRuleDestinationReplicationTimeTime': """ A configuration block specifying the time by which replication should be complete for all objects and operations on objects documented below. """ return pulumi.get(self, "time") @pulumi.output_type class BucketReplicationConfigRuleDestinationReplicationTimeTime(dict): def __init__(__self__, *, minutes: int): """ :param int minutes: Time in minutes. Valid values: `15`. """ pulumi.set(__self__, "minutes", minutes) @property @pulumi.getter def minutes(self) -> int: """ Time in minutes. Valid values: `15`. """ return pulumi.get(self, "minutes") @pulumi.output_type class BucketReplicationConfigRuleExistingObjectReplication(dict): def __init__(__self__, *, status: str): """ :param str status: Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. """ pulumi.set(__self__, "status", status) @property @pulumi.getter def status(self) -> str: """ Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. """ return pulumi.get(self, "status") @pulumi.output_type class BucketReplicationConfigRuleFilter(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "and": suggest = "and_" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketReplicationConfigRuleFilter. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketReplicationConfigRuleFilter.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketReplicationConfigRuleFilter.__key_warning(key) return super().get(key, default) def __init__(__self__, *, and_: Optional['outputs.BucketReplicationConfigRuleFilterAnd'] = None, prefix: Optional[str] = None, tag: Optional['outputs.BucketReplicationConfigRuleFilterTag'] = None): """ :param 'BucketReplicationConfigRuleFilterAndArgs' and_: A configuration block for specifying rule filters. This element is required only if you specify more than one filter. See and below for more details. :param str prefix: An object key name prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. :param 'BucketReplicationConfigRuleFilterTagArgs' tag: A configuration block for specifying a tag key and value documented below. """ if and_ is not None: pulumi.set(__self__, "and_", and_) if prefix is not None: pulumi.set(__self__, "prefix", prefix) if tag is not None: pulumi.set(__self__, "tag", tag) @property @pulumi.getter(name="and") def and_(self) -> Optional['outputs.BucketReplicationConfigRuleFilterAnd']: """ A configuration block for specifying rule filters. This element is required only if you specify more than one filter. See and below for more details. """ return pulumi.get(self, "and_") @property @pulumi.getter def prefix(self) -> Optional[str]: """ An object key name prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. """ return pulumi.get(self, "prefix") @property @pulumi.getter def tag(self) -> Optional['outputs.BucketReplicationConfigRuleFilterTag']: """ A configuration block for specifying a tag key and value documented below. """ return pulumi.get(self, "tag") @pulumi.output_type class BucketReplicationConfigRuleFilterAnd(dict): def __init__(__self__, *, prefix: Optional[str] = None, tags: Optional[Mapping[str, str]] = None): """ :param str prefix: An object key name prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. :param Mapping[str, str] tags: A map of tags (key and value pairs) that identifies a subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. """ if prefix is not None: pulumi.set(__self__, "prefix", prefix) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter def prefix(self) -> Optional[str]: """ An object key name prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. """ return pulumi.get(self, "prefix") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ A map of tags (key and value pairs) that identifies a subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. """ return pulumi.get(self, "tags") @pulumi.output_type class BucketReplicationConfigRuleFilterTag(dict): def __init__(__self__, *, key: str, value: str): """ :param str key: Name of the object key. :param str value: Value of the tag. """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: """ Name of the object key. """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: """ Value of the tag. """ return pulumi.get(self, "value") @pulumi.output_type class BucketReplicationConfigRuleSourceSelectionCriteria(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "replicaModifications": suggest = "replica_modifications" elif key == "sseKmsEncryptedObjects": suggest = "sse_kms_encrypted_objects" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketReplicationConfigRuleSourceSelectionCriteria. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str)
type of activation function to use in MLP. \ If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``. - norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \ for more details. Default ``None``. - noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \ Default ``False``. """ super(QRDQNHead, self).__init__() layer = NoiseLinearLayer if noise else nn.Linear block = noise_block if noise else fc_block self.Q = nn.Sequential( MLP( hidden_size, hidden_size, hidden_size, layer_num, layer_fn=layer, activation=activation, norm_type=norm_type ), block(hidden_size, output_size * num_quantiles) ) self.num_quantiles = num_quantiles self.output_size = output_size def forward(self, x: torch.Tensor) -> Dict: """ Overview: Use encoded embedding tensor to run MLP with ``QRDQNHead`` and return the prediction dictionary. Arguments: - x (:obj:`torch.Tensor`): Tensor containing input embedding. Returns: - outputs (:obj:`Dict`): Dict containing keywords ``logit`` (:obj:`torch.Tensor`), \ ``q`` (:obj:`torch.Tensor`), and ``tau`` (:obj:`torch.Tensor`). Shapes: - x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``. - logit: :math:`(B, M)`, where ``M = output_size``. - q: :math:`(B, M, num_quantiles)`. - tau: :math:`(B, M, 1)`. Examples: >>> head = QRDQNHead(64, 64) >>> inputs = torch.randn(4, 64) >>> outputs = head(inputs) >>> assert isinstance(outputs, dict) >>> assert outputs['logit'].shape == torch.Size([4, 64]) >>> # default num_quantiles is 32 >>> assert outputs['q'].shape == torch.Size([4, 64, 32]) >>> assert outputs['tau'].shape == torch.Size([4, 32, 1]) """ q = self.Q(x) q = q.view(*q.shape[:-1], self.output_size, self.num_quantiles) logit = q.mean(-1) tau = torch.linspace(0, 1, self.num_quantiles + 1) tau = ((tau[:-1] + tau[1:]) / 2).view(1, -1, 1).repeat(q.shape[0], 1, 1).to(q) return {'logit': logit, 'q': q, 'tau': tau} class QuantileHead(nn.Module): """ Overview: The ``QuantileHead`` used to output action quantiles. \ Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \ output ``logit``, ``q``, and ``quantiles``. Interfaces: ``__init__``, ``forward``, ``quantile_net``. """ def __init__( self, hidden_size: int, output_size: int, layer_num: int = 1, num_quantiles: int = 32, quantile_embedding_size: int = 128, beta_function_type: Optional[str] = 'uniform', activation: Optional[nn.Module] = nn.ReLU(), norm_type: Optional[str] = None, noise: Optional[bool] = False, ) -> None: """ Overview: Init the ``QuantileHead`` layers according to the provided arguments. Arguments: - hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``QuantileHead``. - output_size (:obj:`int`): The number of outputs. - layer_num (:obj:`int`): The number of layers used in the network to compute Q value output. - num_quantiles (:obj:`int`): The number of quantiles. - quantile_embedding_size (:obj:`int`): The embedding size of a quantile. - beta_function_type (:obj:`str`): Type of beta function. See ``ding.rl_utils.beta_function.py`` \ for more details. Default is ``uniform``. - activation (:obj:`nn.Module`): The type of activation function to use in MLP. \ If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``. - norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \ for more details. Default ``None``. - noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \ Default ``False``. """ super(QuantileHead, self).__init__() layer = NoiseLinearLayer if noise else nn.Linear block = noise_block if noise else fc_block self.Q = nn.Sequential( MLP( hidden_size, hidden_size, hidden_size, layer_num, layer_fn=layer, activation=activation, norm_type=norm_type ), block(hidden_size, output_size) ) self.num_quantiles = num_quantiles self.quantile_embedding_size = quantile_embedding_size self.output_size = output_size self.iqn_fc = nn.Linear(self.quantile_embedding_size, hidden_size) self.beta_function = beta_function_map[beta_function_type] def quantile_net(self, quantiles: torch.Tensor) -> torch.Tensor: """ Overview: Deterministic parametric function trained to reparameterize samples from a base distribution. \ By repeated Bellman update iterations of Q-learning, the optimal action-value function is estimated. Arguments: - x (:obj:`torch.Tensor`): The encoded embedding tensor of parametric sample. Returns: - quantile_net (:obj:`torch.Tensor`): Quantile network output tensor after reparameterization. Shapes: - quantile_net :math:`(quantile_embedding_size, M)`, where ``M = output_size``. Examples: >>> head = QuantileHead(64, 64) >>> quantiles = torch.randn(128,1) >>> qn_output = head.quantile_net(quantiles) >>> assert isinstance(qn_output, torch.Tensor) >>> # default quantile_embedding_size: int = 128, >>> assert qn_output.shape == torch.Size([128, 64]) """ quantile_net = quantiles.repeat([1, self.quantile_embedding_size]) quantile_net = torch.cos( torch.arange(1, self.quantile_embedding_size + 1, 1).to(quantiles) * math.pi * quantile_net ) quantile_net = self.iqn_fc(quantile_net) quantile_net = F.relu(quantile_net) return quantile_net def forward(self, x: torch.Tensor, num_quantiles: Optional[int] = None) -> Dict: """ Overview: Use encoded embedding tensor to run MLP with ``QuantileHead`` and return the prediction dictionary. Arguments: - x (:obj:`torch.Tensor`): Tensor containing input embedding. Returns: - outputs (:obj:`Dict`): Dict containing keywords ``logit`` (:obj:`torch.Tensor`), \ ``q`` (:obj:`torch.Tensor`), and ``quantiles`` (:obj:`torch.Tensor`). Shapes: - x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``. - logit: :math:`(B, M)`, where ``M = output_size``. - q: :math:`(num_quantiles, B, M)`. - quantiles: :math:`(quantile_embedding_size, 1)`. Examples: >>> head = QuantileHead(64, 64) >>> inputs = torch.randn(4, 64) >>> outputs = head(inputs) >>> assert isinstance(outputs, dict) >>> assert outputs['logit'].shape == torch.Size([4, 64]) >>> # default num_quantiles is 32 >>> assert outputs['q'].shape == torch.Size([32, 4, 64]) >>> assert outputs['quantiles'].shape == torch.Size([128, 1]) """ if num_quantiles is None: num_quantiles = self.num_quantiles batch_size = x.shape[0] q_quantiles = torch.FloatTensor(num_quantiles * batch_size, 1).uniform_(0, 1).to(x) logit_quantiles = torch.FloatTensor(num_quantiles * batch_size, 1).uniform_(0, 1).to(x) logit_quantiles = self.beta_function(logit_quantiles) q_quantile_net = self.quantile_net(q_quantiles) logit_quantile_net = self.quantile_net(logit_quantiles) x = x.repeat(num_quantiles, 1) q_x = x * q_quantile_net # 4*32,64 logit_x = x * logit_quantile_net q = self.Q(q_x).reshape(num_quantiles, batch_size, -1) logit = self.Q(logit_x).reshape(num_quantiles, batch_size, -1).mean(0) return {'logit': logit, 'q': q, 'quantiles': q_quantiles} class FQFHead(nn.Module): """ Overview: The ``FQFHead`` used to output action quantiles. \ Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \ output ``logit``, ``q``, ``quantiles``, ``quantiles_hats``, ``q_tau_i`` and ``entropies``. Interfaces: ``__init__``, ``forward``, ``quantile_net``. """ def __init__( self, hidden_size: int, output_size: int, layer_num: int = 1, num_quantiles: int = 32, quantile_embedding_size: int = 128, activation: Optional[nn.Module] = nn.ReLU(), norm_type: Optional[str] = None, noise: Optional[bool] = False, ) -> None: """ Overview: Init the ``FQFHead`` layers according to the provided arguments. Arguments: - hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``FQFHead``. - output_size (:obj:`int`): The number of outputs. - layer_num (:obj:`int`): The number of layers used in the network to compute Q value output. - num_quantiles (:obj:`int`): The number of quantiles. - quantile_embedding_size (:obj:`int`): The embedding size of a quantile. - activation (:obj:`nn.Module`): The type of activation function to use in MLP. \ If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``. - norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \ for more details. Default ``None``. - noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \ Default ``False``. """ super(FQFHead, self).__init__() layer = NoiseLinearLayer if noise else nn.Linear block = noise_block if noise else fc_block self.Q = nn.Sequential( MLP( hidden_size, hidden_size, hidden_size, layer_num, layer_fn=layer, activation=activation, norm_type=norm_type ), block(hidden_size, output_size) ) self.num_quantiles = num_quantiles self.quantile_embedding_size = quantile_embedding_size self.output_size = output_size self.fqf_fc = nn.Sequential(nn.Linear(self.quantile_embedding_size, hidden_size), nn.ReLU()) self.register_buffer( 'sigma_pi', torch.arange(1, self.quantile_embedding_size + 1, 1).view(1, 1, self.quantile_embedding_size) * math.pi ) # initialize weights_xavier of quantiles_proposal network quantiles_proposal_fc = nn.Linear(hidden_size, num_quantiles) torch.nn.init.xavier_uniform_(quantiles_proposal_fc.weight, gain=0.01) torch.nn.init.constant_(quantiles_proposal_fc.bias, 0) self.quantiles_proposal = nn.Sequential(quantiles_proposal_fc, nn.LogSoftmax(dim=1)) def quantile_net(self, quantiles: torch.Tensor) -> torch.Tensor: """ Overview: Deterministic parametric function trained to reparameterize samples from the quantiles_proposal network. \ By repeated Bellman update iterations of Q-learning, the optimal action-value function is estimated. Arguments: - x (:obj:`torch.Tensor`): The encoded embedding tensor of parametric sample. Returns: - quantile_net (:obj:`torch.Tensor`): Quantile network output tensor after reparameterization. Examples: >>> head = FQFHead(64, 64) >>> quantiles = torch.randn(4,32) >>> qn_output = head.quantile_net(quantiles) >>> assert isinstance(qn_output, torch.Tensor) >>> # default quantile_embedding_size: int = 128, >>> assert qn_output.shape == torch.Size([4, 32, 64]) """ batch_size, num_quantiles = quantiles.shape[:2] quantile_net = torch.cos(self.sigma_pi.to(quantiles) * quantiles.view(batch_size, num_quantiles, 1)) quantile_net = self.fqf_fc(quantile_net) # (batch_size, num_quantiles, hidden_size) return quantile_net def forward(self, x: torch.Tensor, num_quantiles: Optional[int] = None) -> Dict: """ Overview: Use encoded embedding tensor to run MLP with ``FQFHead`` and return the prediction dictionary. Arguments: - x (:obj:`torch.Tensor`): Tensor containing input embedding. Returns: - outputs (:obj:`Dict`): Dict containing keywords ``logit`` (:obj:`torch.Tensor`), \ ``q`` (:obj:`torch.Tensor`), ``quantiles`` (:obj:`torch.Tensor`), \ ``quantiles_hats`` (:obj:`torch.Tensor`), \ ``q_tau_i`` (:obj:`torch.Tensor`), ``entropies`` (:obj:`torch.Tensor`). Shapes: - x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``. - logit: :math:`(B, M)`, where ``M = output_size``. - q: :math:`(B, num_quantiles, M)`. - quantiles: :math:`(B, num_quantiles + 1)`. - quantiles_hats: :math:`(B, num_quantiles)`. - q_tau_i: :math:`(B,
res3 = res3[1:] aNew.append(a) res_final.append(aNew) return render_template('hn_viewwn.html', username=username, result=res_final, messages=msg, userRole=userRole) else: print("NULL") msg = "none" return render_template('hn_viewwn.html', username=username, messages=msg, userRole=userRole) @app.route('/hn_viewb', methods=['GET', 'POST']) def hn_viewb(): msg = '' db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8') cursor = db.cursor() try: cursor.execute("use Hospital") except: print("Error: unable to use database!") # 查询 sql = "SELECT * FROM location LEFT OUTER JOIN patient ON location.p_id = patient.p_id WHERE hn_id = '%s'" % username cursor.execute(sql) db.commit() res = cursor.fetchall() if len(res) != 0: msg = "done" print(msg) print(len(res)) res_final = [] a = '' for i in res: aNew = [] if i[6] is None: a = "房间" + str(i[1]) + "床位" + str(i[2]) +"(空闲) " aNew.append(a) aNew.append('无'+'') aNew.append('无'+'') else: a = "房间" + str(i[1]) + "床位" + str(i[2]) aNew.append(a) aNew.append(i[6]) aNew.append(i[8]) res_final.append(aNew) return render_template('hn_viewb.html', username=username, result=res_final, messages=msg, userRole=userRole) else: print("NULL") msg = "none" return render_template('hn_viewb.html', username=username, messages=msg, userRole=userRole) # 病房护士!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # 修改个人信息页面 @app.route('/ModifyPersonalInfo_wn', methods=['GET', 'POST']) def ModifyPersonalInfo_wn(): msg = "" if request.method == 'GET': return render_template('ModifyPersonalInfo_wn.html', username=username) if request.method == 'POST': # username = request.form['username'] phonenum = request.form['phonenum'] # 连接数据库,默认数据库用户名root,密码空 db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8') cursor = db.cursor() try: cursor.execute("use Hospital") except: print("Error: unable to use database!") if userRole == 'doctor': sql = "Update {} SET contact_info = '{}' where d_id = '{}'".format(userRole, phonenum, username) elif userRole == 'head_nurse': sql = "Update {} SET contact_info = '{}' where hn_id = '{}'".format(userRole, phonenum, username) elif userRole == 'ward_nurse': sql = "Update {} SET contact_info = '{}' where wn_id = '{}'".format(userRole, phonenum, username) elif userRole == 'emergency_nurse': sql = "Update {} SET contact_info = '{}' where en_id = '{}'".format(userRole, phonenum, username) try: cursor.execute(sql) db.commit() # print("修改个人信息成功") msg = "done" except ValueError as e: print("--->", e) print("修改个人信息失败") msg = "fail" return render_template('ModifyPersonalInfo_wn.html', messages=msg, username=username, userRole=userRole) # 修改密码页面 @app.route('/ModifyPassword_wn', methods=['GET', 'POST']) def ModifyPassword_wn(): msg = "" if request.method == 'GET': return render_template('ModifyPassword_wn.html', username=username, userRole=userRole) if request.method == 'POST': # username = request.form['username'] psw1 = request.form['psw1'] psw2 = request.form['psw2'] # 两次输入密码是否相同 if psw1 == psw2: db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8') cursor = db.cursor() try: cursor.execute("use Hospital") except: print("Error: unable to use database!") if userRole == 'doctor': sql = "Update {} SET password = '{}' where d_id = '{}'".format(userRole, psw1, username) elif userRole == 'head_nurse': sql = "Update {} SET password = '{}' where hn_id = '{}'".format(userRole, psw1, username) elif userRole == 'ward_nurse': sql = "Update {} SET password = '{}' where wn_id = '{}'".format(userRole, psw1, username) elif userRole == 'emergency_nurse': sql = "Update {} SET password = '{}' where en_id = '{}'".format(userRole, psw1, username) try: cursor.execute(sql) db.commit() # print("修改密码成功") msg = "done" except ValueError as e: print("--->", e) print("修改密码失败") msg = "fail" return render_template('ModifyPassword_wn.html', messages=msg, username=username, userRole=userRole) else: msg = "not equal" return render_template('ModifyPassword_wn.html', messages=msg, username=username, userRole=userRole) # 急诊护士!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # 修改个人信息页面 @app.route('/ModifyPersonalInfo_en', methods=['GET', 'POST']) def ModifyPersonalInfo_en(): msg = "" if request.method == 'GET': return render_template('ModifyPersonalInfo_en.html', username=username) if request.method == 'POST': # username = request.form['username'] phonenum = request.form['phonenum'] # 连接数据库,默认数据库用户名root,密码空 db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8') cursor = db.cursor() try: cursor.execute("use Hospital") except: print("Error: unable to use database!") if userRole == 'doctor': sql = "Update {} SET contact_info = '{}' where d_id = '{}'".format(userRole, phonenum, username) elif userRole == 'head_nurse': sql = "Update {} SET contact_info = '{}' where hn_id = '{}'".format(userRole, phonenum, username) elif userRole == 'ward_nurse': sql = "Update {} SET contact_info = '{}' where wn_id = '{}'".format(userRole, phonenum, username) elif userRole == 'emergency_nurse': sql = "Update {} SET contact_info = '{}' where en_id = '{}'".format(userRole, phonenum, username) try: cursor.execute(sql) db.commit() # print("修改个人信息成功") msg = "done" except ValueError as e: print("--->", e) print("修改个人信息失败") msg = "fail" return render_template('ModifyPersonalInfo_en.html', messages=msg, username=username, userRole=userRole) # 修改密码页面 @app.route('/ModifyPassword_en', methods=['GET', 'POST']) def ModifyPassword_en(): msg = "" if request.method == 'GET': return render_template('ModifyPassword_en.html', username=username, userRole=userRole) if request.method == 'POST': # username = request.form['username'] psw1 = request.form['psw1'] psw2 = request.form['psw2'] # 两次输入密码是否相同 if psw1 == psw2: db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8') cursor = db.cursor() try: cursor.execute("use Hospital") except: print("Error: unable to use database!") if userRole == 'doctor': sql = "Update {} SET password = '{}' where d_id = '{}'".format(userRole, psw1, username) elif userRole == 'head_nurse': sql = "Update {} SET password = '{}' where hn_id = '{}'".format(userRole, psw1, username) elif userRole == 'ward_nurse': sql = "Update {} SET password = '{}' where wn_id = '{}'".format(userRole, psw1, username) elif userRole == 'emergency_nurse': sql = "Update {} SET password = '{}' where en_id = '{}'".format(userRole, psw1, username) try: cursor.execute(sql) db.commit() # print("修改密码成功") msg = "done" except ValueError as e: print("--->", e) print("修改密码失败") msg = "fail" return render_template('ModifyPassword_en.html', messages=msg, username=username, userRole=userRole) else: msg = "not equal" return render_template('ModifyPassword_hn.html', messages=msg, username=username, userRole=userRole) def get_dailyid(): j = 5 id = ''.join(str(i) for i in random.sample(range(0, 9), j)) # sample(seq, n) 从序列seq中选择n个随机且独立的元素; return ("0"+id) @app.route('/wn_daily', methods=['GET', 'POST']) def wn_daily(): msg = "" if request.method == 'GET': return render_template('wn_daily.html', username=username, userRole=userRole) if request.form["action"] == "确认": db = pymysql.connect("localhost", "root", password="20000106", db="Hospital", charset='utf8') cursor = db.cursor() try: cursor.execute("use Hospital") except: print("Error: unable to use database!") p_id = request.form['p_id'] temperature = request.form['temperature'] symptom = request.form['symptom'] date = request.form.get('date') result = request.form.get('result') dfid = get_dailyid() life = request.form.get('result3') if p_id is None or temperature is None or symptom is None or date is None or result is None or life is None: msg = 'fail' return render_template('en_newp.html', messages=msg, username=username) print(p_id) print(temperature) print(symptom) print(date) print(result) print(dfid) print(life) sql1 ="SELECT * FROM location natural join patient WHERE patient.p_id = '%s' and location.wn_id = '%s'" %(p_id, username) cursor.execute(sql1) db.commit() res = cursor.fetchall() if len(res) == 0: msg = 'fail1' return render_template('wn_daily.html', messages=msg, username=username) if temperature > '41' or temperature < '36': msg = 'fail2' return render_template('wn_daily.html', messages=msg, username=username) sql = "INSERT into daily_info values ('{}', '{}','{}', '{}',{},'{}', '{}','{}')".format(dfid, date, temperature, symptom, result, life, p_id, username) print(sql) cursor.execute(sql) db.commit() print("添加成功") msg = "done" return render_template('wn_daily.html', messages=msg, username=username) return render_template('wn_daily.html', username=username, userRole=userRole) @app.route('/wn_viewp', methods=['GET', 'POST']) def wn_viewp(): msg = "" if request.method == 'GET': db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8') cursor = db.cursor() try: cursor.execute("use Hospital") except: print("Error: unable to use database!") # 查询 sql = "SELECT * FROM patient as p WHERE p_id = some(SELECT p_id FROM location WHERE Location.wn_id = '%s')" % username cursor.execute(sql) res = cursor.fetchall() db.commit() if len(res) != 0: msg = "done" print(msg) print(len(res)) return render_template('wn_viewp.html', username=username, result=res, messages=msg, userRole=userRole) else: print("NULL") msg = "none" return render_template('wn_viewp.html', username=username, messages=msg, userRole=userRole) def get_pid(): j = 5 id = ''.join(str(i) for i in random.sample(range(0, 9), j)) # sample(seq, n) 从序列seq中选择n个随机且独立的元素; return ("7"+id) @app.route('/en_newp', methods=['GET', 'POST']) def en_newp(): msg = "" if request.method == 'GET': return render_template('en_newp.html', username=username, userRole=userRole) if request.form["action"] == "确认": db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8') cursor = db.cursor() try: cursor.execute("use Hospital") except: print("Error: unable to use database!") p_id = get_pid() name = request.form['name'] age = request.form['age'] contact = request.form.get('contact_info') severity = request.form.get('severity') life = '住院治疗' sql = "INSERT into patient values ('{}', '{}',{}, '{}','{}','{}','{}')".format(p_id, name, age, contact, severity, life,username) print(sql) cursor.execute(sql) db.commit() print("添加成功") msg = "done" if severity == '轻症': areaa = 1 elif severity == '重症': areaa = 2 else: areaa =3 # 查找适合床位 sql3 = "SELECT * FROM location WHERE location.area = '%s' and location.p_id is null" % areaa cursor.execute(sql3) db.commit() res = cursor.fetchall() # 无空床位,取消送人操作 if len(res) == 0: msg = 'done' sql = "SELECT room_no, bed_no FROM location WHERE area = 0 and p_id is null" cursor.execute(sql) db.commit() tey = cursor.fetchall() a = tey[0][0] b = tey[0][1] sql5 = "UPDATE location as l SET l.p_id = '%s' WHERE l.area = 0 and l.room_no = '%s' and l.bed_no = '%s'" % ( p_id, a, b) print(sql5) cursor.execute(sql5) db.commit() return render_template('en_newp.html', messages=msg, username=username) # 有空床位则送入 else: if areaa == 1: sql9 = "(SELECT wn_id FROM ward_nurse WHERE wn_id NOT IN ((SELECT wn_id FROM location l WHERE l.area = 1 and wn_id is not null) UNION (SELECT wn_id FROM location l WHERE l.area = 2 and wn_id is not null) UNION (SELECT wn_id FROM location l WHERE
if index is out of range. If errors=='ignore', return None if index is out of range. default is 'ignore'. Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([1, 2, 3]) >>> cdll CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3) >>> cdll.peek(0) 1 >>> cdll.peek(1, node=True) Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>) >>> cdll.peek(3) >>> cdll.peek(3, node=True) >>> cdll.peek(3, node=True, errors='raise') Traceback (most recent call last): ... IndexError: Index 3 out of range >>> cdll.peek(3, node=True, errors='ignore') is None True """ if index < 0: # Handle negative index index += self.size if index < 0 or index >= self.size: if errors == 'ignore': return None elif errors == 'raise': raise IndexError(f"Index {index} out of range") else: raise ValueError( f"Unknown errors value: {errors}. " "Must be 'raise' or 'ignore'" ) # determine shortest direction to index if (reverse := (index > self.size//2)): index = self.size - index - 1 # adjust index for i, i_node in enumerate(self.iter_nodes(reverse=reverse)): if i == index: return i_node if node else i_node.value return None @assert_types(index=int) def pop(self, index: int = -1) -> T: """Remove and return item at given index. If index is None, remove and return last item. If index is out of range, raise IndexError. Usage: >>> from ds.cdll import CDLList >>> c = CDLList([1]) >>> c.pop() 1 >>> cdll = CDLList([1, 2, 3]) >>> cdll CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3) >>> cdll.pop(0) 1 >>> cdll CDLList(head=Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=2) >>> cdll.pop(1) 3 >>> cdll CDLList(head=Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=1) >>> cdll.pop(1) Traceback (most recent call last): ... IndexError: Index 1 out of range >>> cdll.pop(0) 2 >>> cdll.pop() Traceback (most recent call last): ... ds.errors.EmptyInstanceHeadAccess: Cannot pop from empty CDLList Hint: Try inserting an item first using append()/appendleft() """ if self.size == 0: raise EmptyInstanceHeadAccess( "Cannot pop from empty CDLList", hint = "Try inserting an item first using append()/appendleft()" ) if index < 0: index += self.size if index < 0 or index >= self.size: raise IndexError(f"Index {index} out of range") if self.size == 1: val = self.head.value self.size = 0 return val if index == 0: return self.popleft() ith_node = self.peek(index, node=True, errors='raise') ith_node.left.right = ith_node.right ith_node.right.left = ith_node.left self.size -= 1 return ith_node.value def popleft(self) -> T: """Remove and return first item, in doubly linked list. Raise EmptyInstanceHeadAccess if empty. Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([1]) >>> cdll CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=1) >>> cdll.popleft() 1 >>> cdll CDLList(empty, size=0) >>> cdll.popleft() Traceback (most recent call last): ... ds.errors.EmptyInstanceHeadAccess: Cannot pop from empty CDLList Hint: Try inserting an item first using append()/appendleft() """ if self.size == 0: raise EmptyInstanceHeadAccess( "Cannot pop from empty CDLList", hint = "Try inserting an item first using append()/appendleft()" ) val = self.head.value if self.size == 1: self.size = 0 return val self.head.right.left = self.head.left self.head = self.head.right self.size -= 1 return val @assert_types(index=int) def insert(self, index: int, value: T): """Insert value at given index, in doubly linked list. If index is greater than size, raise IndexError. Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([1, 2, 3]) >>> cdll CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3) >>> cdll.insert(0, 0) >>> list(cdll) [0, 1, 2, 3] >>> cdll.insert(2, 0) >>> list(cdll) [0, 1, 0, 2, 3] >>> cdll CDLList(head=Node(value=0, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=5) >>> cdll.insert(9, 0) Traceback (most recent call last): ... IndexError: Index 9 out of range >>> cdll.insert(5, 0) >>> list(cdll) [0, 1, 0, 2, 3, 0] >>> cdll.insert(-1, 10) >>> list(cdll) [0, 1, 0, 2, 3, 10, 0] """ if index < 0: index += self.size if index < 0 or index > self.size: raise IndexError(f"Index {index} out of range") if index == 0: self.appendleft(value) return if index == self.size: self.append(value) return ith_node = self.peek(index, node=True, errors='raise') ith_node.left.right = node = Node( value, left=ith_node.left, right=ith_node ) ith_node.left = node self.size += 1 def appendleft(self, value: T): """Insert value at the front of the list. Moves head to the newly inserted node. Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([]) >>> cdll CDLList(empty, size=0) >>> cdll.appendleft(1) >>> cdll CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=1) >>> cdll.appendleft(2) >>> cdll CDLList(head=Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=2) >>> list(cdll) [2, 1] """ if self.size == 0: self.head = Node(value) else: node = Node( value, left=self.head.left, right=self.head ) self.head.left.right = node self.head.left = node self.head = node self.size += 1 def append(self, value: T): """Insert value at the end of the list. Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([]) >>> cdll CDLList(empty, size=0) >>> cdll.append(1) >>> cdll CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=1) >>> cdll.append(2) >>> cdll CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=2) >>> list(cdll) [1, 2] """ if self.size == 0: self.head = Node(value) else: node = Node( value, left=self.tail, right=self.head ) self.tail.right = node self.head.left = node self.size += 1 @assert_types(values=Iterable) def extend(self, values: Iterable[T]) -> None: """Insert values at the end of the list. Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([]) >>> cdll.extend([1, 2, 3]) >>> cdll CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3) >>> list(cdll) [1, 2, 3] >>> cdll.extend([4, 5, 6]) >>> cdll CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=6) >>> list(cdll) [1, 2, 3, 4, 5, 6] """ new = self.__class__.from_iterable(values) if new.size == 0: return if self.size == 0: self.head = new.head self.size = new.size return new.tail.right = self.head self.tail.right = new.head self.head.left = new.tail new.head.left = self.tail self.size += new.size del new # free memory def iter_nodes( self, cycle: bool = False, reverse: bool = False ) -> Iterator[Node[T]]: r"""Iterate over nodes in the list. If cycle is True, iterate over nodes in a cycle. (default: False) If reverse is True, iterate over nodes in reverse order. (default: False) Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([1, 2, 3]) >>> print(*cdll.iter_nodes(), sep='\n') Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>) Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>) Node(value=3, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>) >>> print(*cdll.iter_nodes(reverse=True), sep='\n') Node(value=3, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>) Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>) Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>) """ if self.size == 0: return 'Empty CDLList' current = self.tail if reverse else self.head while True: yield current current = current.left if reverse else current.right # determine direction if not cycle and current is self.head: # break if not cyclic break if reverse: yield self.head def __iter__(self) -> Iterator[T]: r"""Iterate over values in the list. Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([1, 2, 3]) >>> print(*cdll, sep='\n') 1 2 3 """ for node in self.iter_nodes(): yield node.value def __reversed__(self) -> Iterator[T]: r"""Iterate over values in the list in reverse order. Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([1, 2, 3]) >>> print(*reversed(cdll), sep='\n') 3 2 1 """ for node in self.iter_nodes(reverse=True): yield node.value @overload def __getitem__(self, index: int) -> T: """If index is int, return item at given index. If index is out of range, raise IndexError. """ @overload def __getitem__(self, index: slice) -> 'CDLList[T]': """If index is slice, return a new CDLList with items in given range. If slice is not a valid integer slice, raise InvalidIntegerSliceError. """ @assert_types(index = int | slice) def __getitem__(self, index): """ Return item(s) at given index(es). If index is int, return item at given index. If index is out of range, raise IndexError. If index is slice, return a new CDLList with items in given range. If slice is not a valid integer slice, raise InvalidIntegerSliceError. Usage: >>> from ds.cdll import CDLList >>> cdll = CDLList([1, 2, 3]) >>> cdll[0] 1 >>> cdll[1] 2 >>> cdll[2] 3 >>> cdll[-1] 3 >>> cdll[3] Traceback (most recent call last): ... IndexError: index=3 out of range, for CDLList of len(self)=3 items >>> cdll[1:2] CDLList(head=Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>),
parsed_ca.pop('creator_id', None) if creator_id is not None: self.creator_id = creator_id project_id = parsed_ca.pop('project_id', None) if project_id is not None: self.project_id = project_id for key in parsed_ca: meta = CertificateAuthorityMetadatum(key, parsed_ca[key]) self.ca_meta[key] = meta self.status = States.ACTIVE def _do_delete_children(self, session): """Sub-class hook: delete children relationships.""" for k, v in self.ca_meta.items(): v.delete(session) def _do_extra_dict_fields(self): """Sub-class hook method: return dict of fields.""" if self.expiration: expiration = self.expiration.isoformat() else: expiration = None return { 'ca_id': self.id, 'plugin_name': self.plugin_name, 'plugin_ca_id': self.plugin_ca_id, 'expiration': expiration, 'meta': [ { meta['key']: meta['value'] } for key, meta in self.ca_meta.items() ] } class CertificateAuthorityMetadatum(BASE, ModelBase): """Represents CA metadatum for a single key-value pair.""" __tablename__ = "certificate_authority_metadata" key = sa.Column(sa.String(255), index=True, nullable=False) value = sa.Column(sa.Text, nullable=False) ca_id = sa.Column( sa.String(36), sa.ForeignKey('certificate_authorities.id'), index=True, nullable=False) __table_args__ = (sa.UniqueConstraint( 'ca_id', 'key', name='_certificate_authority_metadatum_uc'),) def __init__(self, key=None, value=None, check_exc=True): super(CertificateAuthorityMetadatum, self).__init__() msg = u._("Must supply non-None {0} argument " "for CertificateAuthorityMetadatum entry.") if key is None and check_exc: raise exception.MissingArgumentError(msg.format("key")) self.key = key if value is None and check_exc: raise exception.MissingArgumentError(msg.format("value")) self.value = value def _do_extra_dict_fields(self): """Sub-class hook method: return dict of fields.""" return { 'key': self.key, 'value': self.value } class ProjectCertificateAuthority(BASE, ModelBase): """Stores CAs available for a project. Admins can define a set of CAs that are available for use in a particular project. There can be multiple entries for any given project. """ __tablename__ = 'project_certificate_authorities' project_id = sa.Column(sa.String(36), sa.ForeignKey('projects.id'), index=True, nullable=False) ca_id = sa.Column(sa.String(36), sa.ForeignKey('certificate_authorities.id'), index=True, nullable=False) ca = orm.relationship("CertificateAuthority", backref="project_cas") __table_args__ = (sa.UniqueConstraint( 'project_id', 'ca_id', name='_project_certificate_authority_uc'),) def __init__(self, project_id=None, ca_id=None, check_exc=True): """Registers a Consumer to a Container.""" super(ProjectCertificateAuthority, self).__init__() msg = u._("Must supply non-None {0} argument " "for ProjectCertificateAuthority entry.") if project_id is None and check_exc: raise exception.MissingArgumentError(msg.format("project_id")) self.project_id = project_id if ca_id is None and check_exc: raise exception.MissingArgumentError(msg.format("ca_id")) self.ca_id = ca_id self.status = States.ACTIVE def _do_extra_dict_fields(self): """Sub-class hook method: return dict of fields.""" return {'project_id': self.project_id, 'ca_id': self.ca_id} class PreferredCertificateAuthority(BASE, ModelBase): """Stores preferred CAs for any project. Admins can define a set of CAs available for issuance requests for any project in the ProjectCertificateAuthority table.. """ __tablename__ = 'preferred_certificate_authorities' project_id = sa.Column(sa.String(36), sa.ForeignKey('projects.id'), index=True, unique=True, nullable=False) ca_id = sa.Column(sa.String(36), sa.ForeignKey( 'certificate_authorities.id', name='preferred_certificate_authorities_fk'), index=True, nullable=False) project = orm.relationship('Project', backref=orm.backref('preferred_ca'), uselist=False) ca = orm.relationship('CertificateAuthority', backref=orm.backref('preferred_ca')) def __init__(self, project_id=None, ca_id=None, check_exc=True): """Registers a Consumer to a Container.""" super(PreferredCertificateAuthority, self).__init__() msg = u._("Must supply non-None {0} argument " "for PreferredCertificateAuthority entry.") if project_id is None and check_exc: raise exception.MissingArgumentError(msg.format("project_id")) self.project_id = project_id if ca_id is None and check_exc: raise exception.MissingArgumentError(msg.format("ca_id")) self.ca_id = ca_id self.status = States.ACTIVE def _do_extra_dict_fields(self): """Sub-class hook method: return dict of fields.""" return {'project_id': self.project_id, 'ca_id': self.ca_id} class SecretACL(BASE, ModelBase): """Stores Access Control List (ACL) for a secret. Class to define whitelist of user ids who are allowed specific operation on a secret. List of user ids is defined via SecretACLUser via acl_users association. Creator_only flag helps in making a secret private for non-admin project users who may have access otherwise. SecretACL deletes are not soft-deletes. """ __tablename__ = 'secret_acls' secret_id = sa.Column(sa.String(36), sa.ForeignKey('secrets.id'), index=True, nullable=False) operation = sa.Column(sa.String(255), nullable=False) project_access = sa.Column(sa.Boolean, nullable=False, default=True) secret = orm.relationship( 'Secret', backref=orm.backref('secret_acls', lazy=False)) acl_users = orm.relationship( 'SecretACLUser', backref=orm.backref('secret_acl', lazy=False), cascade="all, delete-orphan") __table_args__ = (sa.UniqueConstraint( 'secret_id', 'operation', name='_secret_acl_operation_uc'),) def __init__(self, secret_id=None, operation=None, project_access=None, user_ids=None, check_exc=True): """Creates secret ACL entity.""" super(SecretACL, self).__init__() msg = u._("Must supply non-None {0} argument for SecretACL entry.") if secret_id is None and check_exc: raise exception.MissingArgumentError(msg.format("secret_id")) self.secret_id = secret_id if operation is None and check_exc: raise exception.MissingArgumentError(msg.format("operation")) self.operation = operation if project_access is not None: self.project_access = project_access self.status = States.ACTIVE if user_ids is not None and isinstance(user_ids, list): userids = set(user_ids) # remove duplicate if any for user_id in userids: acl_user = SecretACLUser(self.id, user_id) self.acl_users.append(acl_user) def _do_delete_children(self, session): """Sub-class hook: delete children relationships.""" for acl_user in self.acl_users: acl_user.delete(session) def _do_extra_dict_fields(self): """Sub-class hook method: return dict of fields. Adds non-deleted acl related users from relationship if there. """ users = [acl_user.user_id for acl_user in self.acl_users if not acl_user.deleted] fields = {'acl_id': self.id, 'secret_id': self.secret_id, 'operation': self.operation, 'project_access': self.project_access} if users: fields['users'] = users return fields class ContainerACL(BASE, ModelBase): """Stores Access Control List (ACL) for a container. Class to define whitelist of user ids who are allowed specific operation on a container. List of user ids is defined in ContainerACLUser via acl_users association. Creator_only flag helps in making a container private for non-admin project users who may have access otherwise. ContainerACL deletes are not soft-deletes. """ __tablename__ = 'container_acls' container_id = sa.Column(sa.String(36), sa.ForeignKey('containers.id'), index=True, nullable=False) operation = sa.Column(sa.String(255), nullable=False) project_access = sa.Column(sa.Boolean, nullable=False, default=True) container = orm.relationship( 'Container', backref=orm.backref('container_acls', lazy=False)) acl_users = orm.relationship( 'ContainerACLUser', backref=orm.backref('container_acl', lazy=False), cascade="all, delete-orphan") __table_args__ = (sa.UniqueConstraint( 'container_id', 'operation', name='_container_acl_operation_uc'),) def __init__(self, container_id=None, operation=None, project_access=None, user_ids=None, check_exc=True): """Creates container ACL entity.""" super(ContainerACL, self).__init__() msg = u._("Must supply non-None {0} argument for ContainerACL entry.") if container_id is None and check_exc: raise exception.MissingArgumentError(msg.format("container_id")) self.container_id = container_id if operation is None and check_exc: raise exception.MissingArgumentError(msg.format("operation")) self.operation = operation if project_access is not None: self.project_access = project_access self.status = States.ACTIVE if user_ids is not None and isinstance(user_ids, list): userids = set(user_ids) # remove duplicate if any for user_id in userids: acl_user = ContainerACLUser(self.id, user_id) self.acl_users.append(acl_user) def _do_delete_children(self, session): """Sub-class hook: delete children relationships.""" for acl_user in self.acl_users: acl_user.delete(session) def _do_extra_dict_fields(self): """Sub-class hook method: return dict of fields. Adds non-deleted acl related users from relationship if there. """ users = [acl_user.user_id for acl_user in self.acl_users if not acl_user.deleted] fields = {'acl_id': self.id, 'container_id': self.container_id, 'operation': self.operation, 'project_access': self.project_access} if users: fields['users'] = users return fields class SecretACLUser(BASE, ModelBase): """Stores user id for a secret ACL. This class provides way to store list of users associated with a specific ACL operation. SecretACLUser deletes are not soft-deletes. """ __tablename__ = 'secret_acl_users' acl_id = sa.Column(sa.String(36), sa.ForeignKey('secret_acls.id'), index=True, nullable=False) user_id = sa.Column(sa.String(255), nullable=False) __table_args__ = (sa.UniqueConstraint( 'acl_id', 'user_id', name='_secret_acl_user_uc'),) def __init__(self, acl_id=None, user_id=None, check_exc=True): """Creates secret ACL user entity.""" super(SecretACLUser, self).__init__() msg = u._("Must supply non-None {0} argument for SecretACLUser entry.") self.acl_id = acl_id if user_id is None and check_exc: raise exception.MissingArgumentError(msg.format("user_id")) self.user_id = user_id self.status = States.ACTIVE def _do_extra_dict_fields(self): """Sub-class hook method: return dict of fields.""" return {'acl_id': self.acl_id, 'user_id': self.user_id} class ContainerACLUser(BASE, ModelBase): """Stores user id for a container ACL. This class provides way to store list of users associated with a specific ACL operation. ContainerACLUser deletes are not soft-deletes. """ __tablename__ = 'container_acl_users' acl_id = sa.Column(sa.String(36), sa.ForeignKey('container_acls.id'), index=True, nullable=False) user_id = sa.Column(sa.String(255), nullable=False) __table_args__ = (sa.UniqueConstraint( 'acl_id', 'user_id', name='_container_acl_user_uc'),) def __init__(self, acl_id=None, user_id=None, check_exc=True): """Creates container ACL user entity.""" super(ContainerACLUser, self).__init__() msg = u._("Must supply non-None {0} argument for ContainerACLUser " "entry.") self.acl_id = acl_id if user_id is None and check_exc: raise exception.MissingArgumentError(msg.format("user_id")) self.user_id = user_id self.status = States.ACTIVE def _do_extra_dict_fields(self): """Sub-class hook method: return dict of fields.""" return {'acl_id': self.acl_id, 'user_id': self.user_id} class ProjectQuotas(BASE, ModelBase): """Stores Project Quotas. Class to define project specific resource quotas. Project quota deletes are not soft-deletes. """ __tablename__ = 'project_quotas' project_id = sa.Column( sa.String(36), sa.ForeignKey('projects.id', name='project_quotas_fk'), index=True, nullable=False) secrets = sa.Column(sa.Integer, nullable=True) orders = sa.Column(sa.Integer, nullable=True) containers = sa.Column(sa.Integer, nullable=True) consumers = sa.Column(sa.Integer, nullable=True) cas = sa.Column(sa.Integer, nullable=True) def __init__(self, project_id=None, parsed_project_quotas=None, check_exc=True): """Creates Project Quotas entity from a project and a dict. :param project_id: the internal id of the project with quotas :param parsed_project_quotas: a dict with the keys matching the resources for which quotas are to be set, and the values containing the quota value to be set for this project and that resource. :return: None """ super(ProjectQuotas, self).__init__() msg = u._("Must supply non-None {0} argument for ProjectQuotas entry.") if project_id is None and check_exc: raise exception.MissingArgumentError(msg.format("project_id")) self.project_id = project_id if parsed_project_quotas is None: self.secrets = None self.orders = None self.containers = None self.consumers = None self.cas = None else: self.secrets = parsed_project_quotas.get('secrets') self.orders = parsed_project_quotas.get('orders') self.containers = parsed_project_quotas.get('containers') self.consumers = parsed_project_quotas.get('consumers') self.cas = parsed_project_quotas.get('cas') def _do_extra_dict_fields(self): """Sub-class hook method: return dict of fields.""" ret =
id :param imageid: AWS OS AMI image id or Azure image references offer and sku: e.g. 'UbuntuServer#16.04.0-LTS'. :param instancetype: AWS instance resource type e.g 'd2.4xlarge' or Azure hardware profile vm size e.g. 'Standard_DS14_v2'. :param user: remote ssh user for the instance :param localpath: localpath where the logs should be downloaded, and the default path for other necessary tools :param region: EC2 region to connect to :param zone: EC2 zone where other resources should be available :param sriov: Enable or disable SR-IOV :param kernel: custom kernel name provided in localpath """ disk_size = 0 if provider == constants.AWS: disk_size = 100 elif provider == constants.AZURE: disk_size = 513 elif provider == constants.GCE: disk_size = 167 test_env = SetupTestEnv(provider=provider, vm_count=2, test_type=constants.DB_DISK, disk_size=disk_size, raid=False, keyid=keyid, secret=secret, token=token, subscriptionid=subscription, tenantid=tenant, projectid=projectid, imageid=imageid, instancetype=instancetype, user=user, localpath=localpath, region=region, zone=zone, sriov=sriov, kernel=kernel) test_cmd = '/tmp/run_mongodb.sh {} {} {}'.format(test_env.vm_ips[2], user, test_env.device) results_path = os.path.join(localpath, 'mongodb{}_{}_{}.zip'.format( str(time.time()), instancetype, sriov)) test_env.run_test(ssh_vm_conf=1, testname='mongodb', test_cmd=test_cmd, results_path=results_path) upload_results(localpath=localpath, table_name='Perf_{}_MongoDB'.format(provider), results_path=results_path, parser=MongodbLogsReader, other_table=('.deb' in kernel), test_case_name='{}_MongoDB_perf_tuned'.format(provider), data_path=shortcut.data_path(sriov), host_type=shortcut.host_type(provider), instance_size=instancetype, disk_setup='1 x SSD {}GB'.format(disk_size)) def test_mongodb_raid(provider, keyid, secret, token, imageid, subscription, tenant, projectid, instancetype, user, localpath, region, zone, sriov, kernel): """ Run MongoDB YCBS benchmark test on 2 instances using 12 x SSD devices in RAID 0. :param provider Service provider to be used e.g. azure, aws, gce. :param keyid: user key for executing remote connection :param secret: user secret for executing remote connection :param token: GCE refresh token obtained with gcloud sdk :param subscription: Azure specific subscription id :param tenant: Azure specific tenant id :param projectid: GCE specific project id :param imageid: AWS OS AMI image id or Azure image references offer and sku: e.g. 'UbuntuServer#16.04.0-LTS'. :param instancetype: AWS instance resource type e.g 'd2.4xlarge' or Azure hardware profile vm size e.g. 'Standard_DS14_v2'. :param user: remote ssh user for the instance :param localpath: localpath where the logs should be downloaded, and the default path for other necessary tools :param region: EC2 region to connect to :param zone: EC2 zone where other resources should be available :param sriov: Enable or disable SR-IOV :param kernel: custom kernel name provided in localpath """ disk_size = 0 raid = 10 if provider == constants.AWS: disk_size = 100 elif provider == constants.AZURE: disk_size = 513 elif provider == constants.GCE: disk_size = 167 test_env = SetupTestEnv(provider=provider, vm_count=2, test_type=constants.DB_DISK, disk_size=disk_size, raid=raid, keyid=keyid, secret=secret, token=token, subscriptionid=subscription, tenantid=tenant, projectid=projectid, imageid=imageid, instancetype=instancetype, user=user, localpath=localpath, region=region, zone=zone, sriov=sriov, kernel=kernel) test_cmd = '/tmp/run_mongodb.sh {} {} {}'.format(test_env.vm_ips[2], user, constants.RAID_DEV) results_path = os.path.join(localpath, 'mongodb_raid{}_{}.zip'.format( str(time.time()), instancetype, sriov)) test_env.run_test(ssh_vm_conf=1, testname='mongodb', test_cmd=test_cmd, raid=raid, ssh_raid=2, results_path=results_path) upload_results(localpath=localpath, table_name='Perf_{}_MongoDB'.format(provider), results_path=results_path, parser=MongodbLogsReader, other_table=('.deb' in kernel), test_case_name='{}_MongoDB_perf_tuned'.format(provider), data_path=shortcut.data_path(sriov), host_type=shortcut.host_type(provider), instance_size=instancetype, disk_setup='{} x SSD {}GB RAID0'.format(raid, disk_size)) def test_postgresql(provider, keyid, secret, token, imageid, subscription, tenant, projectid, instancetype, user, localpath, region, zone, sriov, kernel): """ Run Pgbench benchmark on PostgreSQL server with a dedicated client. :param provider Service provider to be used e.g. azure, aws, gce. :param keyid: user key for executing remote connection :param secret: user secret for executing remote connection :param token: GCE refresh token obtained with gcloud sdk :param subscription: Azure specific subscription id :param tenant: Azure specific tenant id :param projectid: GCE specific project id :param imageid: AWS OS AMI image id or Azure image references offer and sku: e.g. 'UbuntuServer#16.04.0-LTS'. :param instancetype: AWS instance resource type e.g 'd2.4xlarge' or Azure hardware profile vm size e.g. 'Standard_DS14_v2'. :param user: remote ssh user for the instance :param localpath: localpath where the logs should be downloaded, and the default path for other necessary tools :param region: EC2 region to connect to :param zone: EC2 zone where other resources should be available :param sriov: Enable or disable SR-IOV :param kernel: custom kernel name provided in localpath """ disk_size = 0 if provider == constants.AWS: disk_size = 300 elif provider == constants.AZURE: disk_size = 513 elif provider == constants.GCE: disk_size = 167 test_env = SetupTestEnv(provider=provider, vm_count=2, test_type=constants.DB_DISK, disk_size=disk_size, raid=False, keyid=keyid, secret=secret, token=token, subscriptionid=subscription, tenantid=tenant, projectid=projectid, imageid=imageid, instancetype=instancetype, user=user, localpath=localpath, region=region, zone=zone, sriov=sriov, kernel=kernel) test_cmd = '/tmp/run_postgresql.sh {} {} {}'.format(test_env.vm_ips[2], user, test_env.device) results_path = os.path.join(localpath, 'postgresql{}_{}_{}.zip'.format( str(time.time()), instancetype, sriov)) test_env.run_test(ssh_vm_conf=1, testname='postgresql', test_cmd=test_cmd, results_path=results_path, timeout=constants.TIMEOUT * 3) upload_results(localpath=localpath, table_name='Perf_{}_PostgreSQL'.format(provider), results_path=results_path, parser=PostgreSQLLogsReader, other_table=('.deb' in kernel), test_case_name='{}_PostgreSQL_perf_tuned'.format(provider), data_path=shortcut.data_path(sriov), host_type=shortcut.host_type(provider), instance_size=instancetype, disk_setup='1 x SSD {}GB'.format(disk_size)) def test_zookeeper(provider, keyid, secret, token, imageid, subscription, tenant, projectid, instancetype, user, localpath, region, zone, sriov, kernel): """ Run ZooKeeper benchmark on a tree of 5 servers and 1 generating client. :param provider Service provider to be used e.g. azure, aws, gce. :param keyid: user key for executing remote connection :param secret: user secret for executing remote connection :param token: GCE refresh token obtained with gcloud sdk :param subscription: Azure specific subscription id :param tenant: Azure specific tenant id :param projectid: GCE specific project id :param imageid: AWS OS AMI image id or Azure image references offer and sku: e.g. 'UbuntuServer#16.04.0-LTS'. :param instancetype: AWS instance resource type e.g 'd2.4xlarge' or Azure hardware profile vm size e.g. 'Standard_DS14_v2'. :param user: remote ssh user for the instance :param localpath: localpath where the logs should be downloaded, and the default path for other necessary tools :param region: EC2 region to connect to :param zone: EC2 zone where other resources should be available :param sriov: Enable or disable SR-IOV :param kernel: custom kernel name provided in localpath """ vm_count = 6 test_env = SetupTestEnv(provider=provider, vm_count=vm_count, test_type=None, disk_size=None, raid=False, keyid=keyid, secret=secret, token=token, subscriptionid=subscription, tenantid=tenant, projectid=projectid, imageid=imageid, instancetype=instancetype, user=user, localpath=localpath, region=region, zone=zone, sriov=sriov, kernel=kernel) zk_servers = ' '.join([test_env.vm_ips[i] for i in range(2, 7)]) test_cmd = '/tmp/run_zookeeper.sh {} {}'.format(user, zk_servers) results_path = os.path.join(localpath, 'zookeeper{}_{}_{}.zip'.format( str(time.time()), instancetype, sriov)) test_env.run_test(ssh_vm_conf=6, testname='zookeeper', test_cmd=test_cmd, results_path=results_path, timeout=constants.TIMEOUT) upload_results(localpath=localpath, table_name='Perf_{}_Zookeeper'.format(provider), results_path=results_path, parser=ZookeeperLogsReader, other_table=('.deb' in kernel), test_case_name='{}_Zookeeper_perf_tuned'.format(provider), data_path=shortcut.data_path(sriov), host_type=shortcut.host_type(provider), instance_size=instancetype, cluster_setup='{} x servers'.format(vm_count - 1)) def test_terasort(provider, keyid, secret, token, imageid, subscription, tenant, projectid, instancetype, user, localpath, region, zone, sriov, kernel): """ Run Hadoop terasort benchmark on a tree of servers using 1 master and 5 slaves instances in VPC to elevate AWS Enhanced Networking. :param provider Service provider to be used e.g. azure, aws, gce. :param keyid: user key for executing remote connection :param secret: user secret for executing remote connection :param token: GCE refresh token obtained with gcloud sdk :param subscription: Azure specific subscription id :param tenant: Azure specific tenant id :param projectid: GCE specific project id :param imageid: AWS OS AMI image id or Azure image references offer and sku: e.g. 'UbuntuServer#16.04.0-LTS'. :param instancetype: AWS instance resource type e.g 'd2.4xlarge' or Azure hardware profile vm size e.g. 'Standard_DS14_v2'. :param user: remote ssh user for the instance :param localpath: localpath where the logs should be downloaded, and the default path for other necessary tools :param region: EC2 region to connect to :param zone: EC2 zone where other resources should be available :param sriov: Enable or disable SR-IOV :param kernel: custom kernel name provided in localpath """ vm_count = 6 test_env = SetupTestEnv(provider=provider, vm_count=vm_count, test_type=constants.CLUSTER_DISK, disk_size=100, raid=False, keyid=keyid, secret=secret, token=token, subscriptionid=subscription, tenantid=tenant, projectid=projectid, imageid=imageid, instancetype=instancetype, user=user, localpath=localpath, region=region, zone=zone, sriov=sriov, kernel=kernel) slaves = ' '.join([test_env.vm_ips[i] for i in range(2, 7)]) test_cmd = '/tmp/run_terasort.sh {} {} {}'.format(user, test_env.device, slaves) results_path = os.path.join(localpath, 'terasort{}_{}_{}.zip'.format( str(time.time()), instancetype, sriov)) test_env.run_test(ssh_vm_conf=6, testname='terasort', test_cmd=test_cmd, results_path=results_path, timeout=constants.TIMEOUT) upload_results(localpath=localpath, table_name='Perf_{}_Terasort'.format(provider), results_path=results_path, parser=TerasortLogsReader, other_table=('.deb' in kernel), test_case_name='{}_Terasort_perf_tuned'.format(provider), data_path=shortcut.data_path(sriov), host_type=shortcut.host_type(provider), instance_size=instancetype, cluster_setup='1 master + {} slaves'.format(vm_count - 1)) def test_sql_server_inmemdb(provider, keyid, secret, token, imageid, subscription, tenant, projectid, instancetype, user, localpath, region, zone, sriov, kernel): """ Run SQLServer Benchcraft profiling. The test assumes the existence of a *.vm config file and an Azure windows image prepared with all benchcraft prerequisites (sql scripts, ps scripts and db flat files). The setup creates a windows and a linux VM for testing specific InMemDB performance. :param provider Service provider to be used e.g. azure, aws, gce. :param keyid: user key for executing remote connection :param secret: user secret for executing remote connection :param token: GCE refresh token obtained with gcloud sdk :param subscription: Azure specific subscription id :param tenant: Azure
from start to end # http://gavwood.com/paper.pdf data = self.try_simplify_to_constant(self.read_buffer(start, size)) if issymbolic(data): known_sha3 = {} # Broadcast the signal self._publish( "on_symbolic_sha3", data, known_sha3 ) # This updates the local copy of sha3 with the pairs we need to explore value = 0 # never used known_hashes_cond = False for key, hsh in known_sha3.items(): assert not issymbolic(key), "Saved sha3 data,hash pairs should be concrete" cond = key == data known_hashes_cond = Operators.OR(cond, known_hashes_cond) value = Operators.ITEBV(256, cond, hsh, value) return value value = sha3.keccak_256(data).hexdigest() value = int(value, 16) self._publish("on_concrete_sha3", data, value) logger.info("Found a concrete SHA3 example %r -> %x", data, value) return value ############################################################################ # Environmental Information def ADDRESS(self): """Get address of currently executing account""" return self.address def BALANCE_gas(self, account): return 380 # BALANCE_SUPPLEMENTAL_GAS def BALANCE(self, account): """Get balance of the given account""" return self.world.get_balance(account) def ORIGIN(self): """Get execution origination address""" return Operators.ZEXTEND(self.world.tx_origin(), 256) def CALLER(self): """Get caller address""" return Operators.ZEXTEND(self.caller, 256) def CALLVALUE(self): """Get deposited value by the instruction/transaction responsible for this execution""" return self.value def CALLDATALOAD(self, offset): """Get input data of current environment""" if issymbolic(offset): if Z3Solver().can_be_true(self._constraints, offset == self._used_calldata_size): self.constraints.add(offset == self._used_calldata_size) offset = self._used_calldata_size else: raise ConcretizeArgument(1, policy="SAMPLED") self._use_calldata(offset, 32) data_length = len(self.data) bytes = [] for i in range(32): try: c = Operators.ITEBV(8, offset + i < data_length, self.data[offset + i], 0) except IndexError: # offset + i is concrete and outside data c = 0 bytes.append(c) return Operators.CONCAT(256, *bytes) def _use_calldata(self, n, size): assert not issymbolic(n) max_size = len(self.data) min_size = self._used_calldata_size self._used_calldata_size = Operators.ITEBV( 256, size != 0, Operators.ITEBV(256, min_size + n > max_size, max_size, min_size + n), self._used_calldata_size, ) def CALLDATASIZE(self): """Get size of input data in current environment""" return self._calldata_size def CALLDATACOPY_gas(self, mem_offset, data_offset, size): GCOPY = 3 # cost to copy one 32 byte word copyfee = self.safe_mul(GCOPY, self.safe_add(size, 31) // 32) memfee = self._get_memfee(mem_offset, size) return copyfee + memfee def CALLDATACOPY(self, mem_offset, data_offset, size): """Copy input data in current environment to memory""" if issymbolic(size): if not Z3Solver().can_be_true( self._constraints, Operators.ULE(size, len(self.data) + data_offset + 32) ): print("omg it can not be small") import pdb pdb.set_trace() raise ConcretizeArgument(3, policy="MIN") self.constraints.add(Operators.ULE(size, len(self.data) + data_offset + 32)) raise ConcretizeArgument(3, policy="SAMPLED") if issymbolic(data_offset): if Z3Solver().can_be_true(self._constraints, data_offset == self._used_calldata_size): self.constraints.add(data_offset == self._used_calldata_size) data_offset = self._used_calldata_size print("symbolic data_offset", data_offset, "choosing", self._used_calldata_size) else: logger.debug("symbolic data_offset MIN") raise ConcretizeArgument(2, policy="MIN") # account for calldata usage self._use_calldata(data_offset, size) self._allocate(mem_offset, size) if size > consts.calldata_max_offset: logger.info("CALLDATACOPY absurd size %d. OOG policy used: %r", size, consts.oog) raise TerminateState( "CALLDATACOPY absurd size %d. OOG policy used: %r" % (size, consts.oog), testcase=True, ) for i in range(size): try: c = Operators.ITEBV( 8, data_offset + i < len(self.data), Operators.ORD(self.data[data_offset + i]), 0, ) except IndexError: # data_offset + i is concrete and outside data c = 0 self._store(mem_offset + i, c) def CODESIZE(self): """Get size of code running in current environment""" return len(self.bytecode) def CODECOPY_gas(self, mem_offset, code_offset, size): return self._get_memfee(mem_offset, size) @concretized_args(code_offset="SAMPLED", size="SAMPLED") def CODECOPY(self, mem_offset, code_offset, size): """Copy code running in current environment to memory""" self._allocate(mem_offset, size) GCOPY = 3 # cost to copy one 32 byte word copyfee = self.safe_mul(GCOPY, Operators.UDIV(self.safe_add(size, 31), 32)) self._consume(copyfee) if issymbolic(size): max_size = Z3Solver().max(self.constraints, size) else: max_size = size for i in range(max_size): if issymbolic(i < size): default = Operators.ITEBV( 8, i < size, 0, self._load(mem_offset + i, 1) ) # Fixme. unnecessary memory read else: if i < size: default = 0 else: default = self._load(mem_offset + i, 1) if issymbolic(code_offset): value = Operators.ITEBV( 8, code_offset + i >= len(self.bytecode), default, self.bytecode[code_offset + i], ) else: if code_offset + i >= len(self.bytecode): value = default else: value = self.bytecode[code_offset + i] self._store(mem_offset + i, value) self._publish("did_evm_read_code", code_offset, size) def GASPRICE(self): """Get price of gas in current environment""" return self.world.tx_gasprice() @concretized_args(account="ACCOUNTS") def EXTCODESIZE(self, account): """Get size of an account's code""" return len(self.world.get_code(account)) def EXTCODECOPY_gas(self, account, address, offset, size): GCOPY = 3 # cost to copy one 32 byte word extbytecode = self.world.get_code(account) memfee = self._get_memfee(address, size) return GCOPY * (ceil32(len(extbytecode)) // 32) + memfee @concretized_args(account="ACCOUNTS") def EXTCODECOPY(self, account, address, offset, size): """Copy an account's code to memory""" extbytecode = self.world.get_code(account) self._allocate(address + size) for i in range(size): if offset + i < len(extbytecode): self._store(address + i, extbytecode[offset + i]) else: self._store(address + i, 0) def RETURNDATACOPY_gas(self, mem_offset, return_offset, size): return self._get_memfee(mem_offset, size) def RETURNDATACOPY(self, mem_offset, return_offset, size): return_data = self.world.last_transaction.return_data self._allocate(mem_offset, size) for i in range(size): if return_offset + i < len(return_data): self._store(mem_offset + i, return_data[return_offset + i]) else: self._store(mem_offset + i, 0) def RETURNDATASIZE(self): return len(self.world.last_transaction.return_data) ############################################################################ # Block Information def BLOCKHASH(self, a): """Get the hash of one of the 256 most recent complete blocks""" return self.world.block_hash(a) def COINBASE(self): """Get the block's beneficiary address""" return self.world.block_coinbase() def TIMESTAMP(self): """Get the block's timestamp""" return self.world.block_timestamp() def NUMBER(self): """Get the block's number""" return self.world.block_number() def DIFFICULTY(self): """Get the block's difficulty""" return self.world.block_difficulty() def GASLIMIT(self): """Get the block's gas limit""" return self.world.block_gaslimit() ############################################################################ # Stack, Memory, Storage and Flow Operations def POP(self, a): """Remove item from stack""" # Items are automatically removed from stack # by the instruction dispatcher pass def MLOAD_gas(self, address): return self._get_memfee(address, 32) def MLOAD(self, address): """Load word from memory""" self._allocate(address, 32) value = self._load(address, 32) return value def MSTORE_gas(self, address, value): return self._get_memfee(address, 32) def MSTORE(self, address, value): """Save word to memory""" if istainted(self.pc): value = taint_with(value, *get_taints(self.pc)) self._allocate(address, 32) self._store(address, value, 32) def MSTORE8_gas(self, address, value): return self._get_memfee(address, 1) def MSTORE8(self, address, value): """Save byte to memory""" if istainted(self.pc): for taint in get_taints(self.pc): value = taint_with(value, taint) self._allocate(address, 1) self._store(address, Operators.EXTRACT(value, 0, 8), 1) def SLOAD(self, offset): """Load word from storage""" storage_address = self.address self._publish("will_evm_read_storage", storage_address, offset) value = self.world.get_storage_data(storage_address, offset) self._publish("did_evm_read_storage", storage_address, offset, value) return value def SSTORE_gas(self, offset, value): storage_address = self.address GSTORAGEREFUND = 15000 GSTORAGEKILL = 5000 GSTORAGEMOD = 5000 GSTORAGEADD = 20000 previous_value = self.world.get_storage_data(storage_address, offset) gascost = Operators.ITEBV( 512, previous_value != 0, Operators.ITEBV(512, value != 0, GSTORAGEMOD, GSTORAGEKILL), Operators.ITEBV(512, value != 0, GSTORAGEADD, GSTORAGEMOD), ) return gascost def SSTORE(self, offset, value): """Save word to storage""" storage_address = self.address self._publish("will_evm_write_storage", storage_address, offset, value) # refund = Operators.ITEBV(256, # previous_value != 0, # Operators.ITEBV(256, value != 0, 0, GSTORAGEREFUND), # 0) if istainted(self.pc): for taint in get_taints(self.pc): value = taint_with(value, taint) self.world.set_storage_data(storage_address, offset, value) self._publish("did_evm_write_storage", storage_address, offset, value) def JUMP(self, dest): """Alter the program counter""" self.pc = dest # This set ups a check for JMPDEST in the next instruction self._set_check_jmpdest() def JUMPI(self, dest, cond): """Conditionally alter the program counter""" self.pc = Operators.ITEBV(256, cond != 0, dest, self.pc + self.instruction.size) # This set ups a check for JMPDEST in the next instruction if cond != 0 self._set_check_jmpdest(cond != 0) def GETPC(self): """Get the value of the program counter prior to the increment""" return self.pc def MSIZE(self): """Get the size of active memory in bytes""" return self._allocated def GAS(self): """Get the amount of available gas, including the corresponding reduction the amount of available gas""" # fixme calculate gas consumption return Operators.EXTRACT(self._gas, 0, 256) def JUMPDEST(self): """Mark a valid destination for jumps""" ############################################################################ # Push Operations def PUSH(self, value): """Place 1 to 32 bytes item on stack""" return value ############################################################################ # Duplication Operations def DUP(self, *operands): """Duplicate stack item""" return (operands[-1],) + operands ############################################################################ # Exchange Operations def SWAP(self, *operands): """Exchange 1st and 2nd stack items""" a = operands[0] b = operands[-1] return (b,) + operands[1:-1] + (a,) ############################################################################ # Logging Operations def LOG_gas(self, address, size, *topics): return self._get_memfee(address, size) @concretized_args(size="ONE") def LOG(self, address, size, *topics): GLOGBYTE = 8 self._consume(size * GLOGBYTE) memlog = self.read_buffer(address, size) self.world.log(self.address, topics, memlog) ############################################################################ # System operations def CREATE_gas(self, value, offset, size): return self._get_memfee(offset, size) @transact def CREATE(self, value, offset, size): """Create a new account with associated code""" address = self.world.create_account( address=EVMWorld.calculate_new_address( sender=self.address, nonce=self.world.get_nonce(self.address) ) ) self.world.start_transaction( "CREATE", address, data=self.read_buffer(offset, size), caller=self.address, value=value, gas=self.gas, ) raise StartTx() @CREATE.pos
= Constraint(expr= m.b55 + m.b79 <= 1) m.c295 = Constraint(expr= m.b56 + m.b80 <= 1) m.c296 = Constraint(expr= m.b57 + m.b81 <= 1) m.c297 = Constraint(expr= m.b58 + m.b82 <= 1) m.c298 = Constraint(expr= m.b59 + m.b83 <= 1) m.c299 = Constraint(expr= m.b60 + m.b84 <= 1) m.c300 = Constraint(expr= m.x127 == 0) m.c301 = Constraint(expr= m.x155 == 0) m.c302 = Constraint(expr= m.x190 == 0) m.c303 = Constraint(expr= m.x251 + m.x252 + m.x253 + m.x254 + m.x255 + m.x256 + m.x293 + m.x294 + m.x295 + m.x296 + m.x297 + m.x298 + m.x353 + m.x354 + m.x355 + m.x356 + m.x357 + m.x358 == 1000) m.c304 = Constraint(expr= m.x257 + m.x258 + m.x259 + m.x260 + m.x261 + m.x262 + m.x263 + m.x264 + m.x265 + m.x266 + m.x267 + m.x268 + m.x299 + m.x300 + m.x301 + m.x302 + m.x303 + m.x304 + m.x305 + m.x306 + m.x307 + m.x308 + m.x309 + m.x310 + m.x335 + m.x336 + m.x337 + m.x338 + m.x339 + m.x340 + m.x341 + m.x342 + m.x343 + m.x344 + m.x345 + m.x346 + m.x359 + m.x360 + m.x361 + m.x362 + m.x363 + m.x364 + m.x365 + m.x366 + m.x367 + m.x368 + m.x369 + m.x370 == 1000) m.c305 = Constraint(expr= m.x311 + m.x312 + m.x313 + m.x314 + m.x315 + m.x316 + m.x347 + m.x348 + m.x349 + m.x350 + m.x351 + m.x352 + m.x371 + m.x372 + m.x373 + m.x374 + m.x375 + m.x376 == 1000) m.c306 = Constraint(expr= m.b85 + m.b86 + m.b88 + m.b91 == 1) m.c307 = Constraint(expr= m.b87 + m.b89 + m.b92 + m.b94 == 1) m.c308 = Constraint(expr= m.b90 + m.b93 + m.b95 + m.b96 == 1) m.c309 = Constraint(expr= m.b97 + m.b98 + m.b100 + m.b103 == 1) m.c310 = Constraint(expr= m.b99 + m.b101 + m.b104 + m.b106 == 1) m.c311 = Constraint(expr= m.b102 + m.b105 + m.b107 + m.b108 == 1) m.c312 = Constraint(expr= m.b1 + m.b7 + m.b13 <= 1) m.c313 = Constraint(expr= m.b2 + m.b8 + m.b14 <= 1) m.c314 = Constraint(expr= m.b3 + m.b9 + m.b15 <= 1) m.c315 = Constraint(expr= m.b4 + m.b10 + m.b16 <= 1) m.c316 = Constraint(expr= m.b5 + m.b11 + m.b17 <= 1) m.c317 = Constraint(expr= m.b6 + m.b12 + m.b18 <= 1) m.c318 = Constraint(expr= m.b61 + m.b67 == 1) m.c319 = Constraint(expr= m.b62 + m.b68 == 1) m.c320 = Constraint(expr= m.b63 + m.b69 == 1) m.c321 = Constraint(expr= m.b64 + m.b70 == 1) m.c322 = Constraint(expr= m.b65 + m.b71 == 1) m.c323 = Constraint(expr= m.b66 + m.b72 == 1) m.c324 = Constraint(expr= m.b73 + m.b79 == 1) m.c325 = Constraint(expr= m.b74 + m.b80 == 1) m.c326 = Constraint(expr= m.b75 + m.b81 == 1) m.c327 = Constraint(expr= m.b76 + m.b82 == 1) m.c328 = Constraint(expr= m.b77 + m.b83 == 1) m.c329 = Constraint(expr= m.b78 + m.b84 == 1) m.c330 = Constraint(expr= m.b1 - m.b85 <= 0) m.c331 = Constraint(expr= m.b2 - m.b85 - m.b86 <= 0) m.c332 = Constraint(expr= m.b3 - m.b85 - m.b86 - m.b88 <= 0) m.c333 = Constraint(expr= m.b4 - m.b85 - m.b86 - m.b88 - m.b91 <= 0) m.c334 = Constraint(expr= m.b5 - m.b85 - m.b86 - m.b88 - m.b91 <= 0) m.c335 = Constraint(expr= m.b6 - m.b85 - m.b86 - m.b88 - m.b91 <= 0) m.c336 = Constraint(expr= m.b7 <= 0) m.c337 = Constraint(expr= m.b8 - m.b87 <= 0) m.c338 = Constraint(expr= m.b9 - m.b87 - m.b89 <= 0) m.c339 = Constraint(expr= m.b10 - m.b87 - m.b89 - m.b92 <= 0) m.c340 = Constraint(expr= m.b11 - m.b87 - m.b89 - m.b92 - m.b94 <= 0) m.c341 = Constraint(expr= m.b12 - m.b87 - m.b89 - m.b92 - m.b94 <= 0) m.c342 = Constraint(expr= m.b13 <= 0) m.c343 = Constraint(expr= m.b14 <= 0) m.c344 = Constraint(expr= m.b15 - m.b90 <= 0) m.c345 = Constraint(expr= m.b16 - m.b90 - m.b93 <= 0) m.c346 = Constraint(expr= m.b17 - m.b90 - m.b93 - m.b95 <= 0) m.c347 = Constraint(expr= m.b18 - m.b90 - m.b93 - m.b95 - m.b96 <= 0) m.c348 = Constraint(expr= m.b1 - m.b97 - m.b98 - m.b100 - m.b103 <= 0) m.c349 = Constraint(expr= m.b2 - m.b98 - m.b100 - m.b103 <= 0) m.c350 = Constraint(expr= m.b3 - m.b100 - m.b103 <= 0) m.c351 = Constraint(expr= m.b4 - m.b103 <= 0) m.c352 = Constraint(expr= m.b5 <= 0) m.c353 = Constraint(expr= m.b6 <= 0) m.c354 = Constraint(expr= m.b7 - m.b99 - m.b101 - m.b104 - m.b106 <= 0) m.c355 = Constraint(expr= m.b8 - m.b99 - m.b101 - m.b104 - m.b106 <= 0) m.c356 = Constraint(expr= m.b9 - m.b101 - m.b104 - m.b106 <= 0) m.c357 = Constraint(expr= m.b10 - m.b104 - m.b106 <= 0) m.c358 = Constraint(expr= m.b11 - m.b106 <= 0) m.c359 = Constraint(expr= m.b12 <= 0) m.c360 = Constraint(expr= m.b13 - m.b102 - m.b105 - m.b107 - m.b108 <= 0) m.c361 = Constraint(expr= m.b14 - m.b102 - m.b105 - m.b107 - m.b108 <= 0) m.c362 = Constraint(expr= m.b15 - m.b102 - m.b105 - m.b107 - m.b108 <= 0) m.c363 = Constraint(expr= m.b16 - m.b105 - m.b107 - m.b108 <= 0) m.c364 = Constraint(expr= m.b17 - m.b107 - m.b108 <= 0) m.c365 = Constraint(expr= m.b18 - m.b108 <= 0) m.c366 = Constraint(expr= - m.b61 - m.b68 + m.x109 >= -1) m.c367 = Constraint(expr= - m.b62 - m.b69 + m.x110 >= -1) m.c368 = Constraint(expr= - m.b63 - m.b70 + m.x111 >= -1) m.c369 = Constraint(expr= - m.b64 - m.b71 + m.x112 >= -1) m.c370 = Constraint(expr= - m.b65 - m.b72 + m.x113 >= -1) m.c371 = Constraint(expr= - m.b73 - m.b80 + m.x114 >= -1) m.c372 = Constraint(expr= - m.b74 - m.b81 + m.x115 >= -1) m.c373 = Constraint(expr= - m.b75 - m.b82 + m.x116 >= -1) m.c374 = Constraint(expr= - m.b76 - m.b83 + m.x117 >= -1) m.c375 = Constraint(expr= - m.b77 - m.b84 + m.x118 >= -1) m.c376 = Constraint(expr= - m.b62 - m.b67 + m.x109 >= -1) m.c377 = Constraint(expr= - m.b63 - m.b68 + m.x110 >= -1) m.c378 = Constraint(expr= - m.b64 - m.b69 + m.x111 >= -1) m.c379 = Constraint(expr= - m.b65 - m.b70 + m.x112 >= -1) m.c380 = Constraint(expr= - m.b66 - m.b71 + m.x113 >= -1) m.c381 = Constraint(expr= - m.b74 - m.b79 + m.x114 >= -1) m.c382 = Constraint(expr= - m.b75 - m.b80 + m.x115 >= -1) m.c383 = Constraint(expr= - m.b76 - m.b81 + m.x116 >= -1) m.c384 = Constraint(expr= - m.b77 - m.b82 + m.x117 >= -1) m.c385 = Constraint(expr= - m.b78 - m.b83 + m.x118 >= -1) m.c386 = Constraint(expr= m.x109 + m.x110 + m.x111 + m.x112 + m.x113 - m.x119 == -1) m.c387 = Constraint(expr= m.x114 + m.x115 + m.x116 + m.x117 + m.x118 - m.x120 == -1) m.c388 = Constraint(expr= - 1000*m.b1 + m.x233 <= 0) m.c389 = Constraint(expr= - 1000*m.b2 + m.x234 <= 0) m.c390 = Constraint(expr= - 1000*m.b3 + m.x235 <= 0) m.c391 = Constraint(expr= - 1000*m.b4 + m.x236 <= 0) m.c392 = Constraint(expr= - 1000*m.b5 + m.x237 <= 0) m.c393 = Constraint(expr= - 1000*m.b6 + m.x238 <= 0) m.c394 = Constraint(expr= - 1000*m.b7 + m.x269 <= 0) m.c395 = Constraint(expr= - 1000*m.b8 + m.x270 <= 0) m.c396 = Constraint(expr= - 1000*m.b9 + m.x271 <= 0) m.c397 = Constraint(expr= - 1000*m.b10 + m.x272 <= 0) m.c398 = Constraint(expr= - 1000*m.b11 + m.x273 <= 0) m.c399 = Constraint(expr= - 1000*m.b12 + m.x274 <= 0) m.c400 = Constraint(expr= - 1000*m.b13 + m.x317 <= 0) m.c401 = Constraint(expr= - 1000*m.b14 + m.x318 <= 0) m.c402 = Constraint(expr= - 1000*m.b15 + m.x319 <= 0) m.c403 = Constraint(expr= - 1000*m.b16 + m.x320 <= 0) m.c404 = Constraint(expr= - 1000*m.b17 + m.x321 <= 0) m.c405 = Constraint(expr= - 1000*m.b18 + m.x322 <= 0) m.c406 = Constraint(expr= - 1000*m.b19 + m.x239 <= 0) m.c407 = Constraint(expr= - 1000*m.b20 + m.x240 <= 0) m.c408 = Constraint(expr= - 1000*m.b21 + m.x241 <= 0) m.c409 = Constraint(expr= - 1000*m.b22 + m.x242 <= 0) m.c410 = Constraint(expr= - 1000*m.b23 + m.x243 <= 0) m.c411 = Constraint(expr= - 1000*m.b24 + m.x244 <= 0) m.c412 = Constraint(expr= - 1000*m.b25 + m.x245 <= 0) m.c413 = Constraint(expr= - 1000*m.b26 + m.x246 <= 0) m.c414 = Constraint(expr= - 1000*m.b27 + m.x247 <= 0) m.c415 = Constraint(expr= - 1000*m.b28 + m.x248 <= 0) m.c416 = Constraint(expr= - 1000*m.b29 + m.x249 <= 0) m.c417 = Constraint(expr= - 1000*m.b30 + m.x250 <= 0) m.c418 = Constraint(expr= - 1000*m.b31 + m.x275 <= 0) m.c419 = Constraint(expr= - 1000*m.b32 + m.x276 <= 0) m.c420 = Constraint(expr= - 1000*m.b33 + m.x277 <= 0) m.c421
<gh_stars>0 ################################################################################ ## ## This library is free software; you can redistribute it and/or ## modify it under the terms of the GNU Lesser General Public ## License as published by the Free Software Foundation; either ## version 2.1 of the License, or (at your option) any later version. ## ## This library is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public ## License along with this library; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ## ## (C) Copyrights Dr. <NAME> and TSRI 2016 ## ################################################################################ ############################################################################# # # Author: <NAME> # # Copyright: <NAME> TSRI 2016 # ############################################################################# # # $Header: /mnt/raid/services/cvs/PmvApp/cartoonCmds.py,v 1.1.4.2 2017/09/25 21:16:20 annao Exp $ # # $Id: cartoonCmds.py,v 1.1.4.2 2017/09/25 21:16:20 annao Exp $ # import numpy from math import sqrt from DejaVu2.Geom import Geom from DejaVu2.Cylinders import Cylinders from DejaVu2.Spheres import Spheres from DejaVu2.IndexedPolygons import IndexedPolygons from DejaVu2.Shapes import Rectangle2D, Circle2D, Ellipse2D from MolKit2.selection import Selection from AppFramework.App import DeleteObjectEvent from PmvApp.displayCmds import DisplayCommand from PmvApp.Pmv import MVCommand #MVAtomICOM from PmvApp.splineFrames import SplineFrames from PmvApp.extrude import Extruder from PmvApp.cubicInterpolate import ResampleControlPoints, GetSmoothNormals, GetFrames from MolKit2.selection import SelectionSet class ComputeCartoon(MVCommand): """The ComputeCartoon command computes a graphical representation of molecule's backbones. This represention relies on a 2D path representing a plane following a spline through Calpha positions of amino acids and P atoms for nucleic acids. Different shapes can be extruded along this path for varous secondary structure elements. The quality controls the number of points used for each amino or nucleic acid to discretize the spline.The calculation is carried out separately for each chain in the molecule. Synopsis: None <- ComputeCartoon(atoms, helixShape=None, sheetShape=None, coilShape=None, shapeNABB=None, quality=3) Required Arguments : a set of atoms --- the command will compute the cartoon for all molecules with an atoms in the set. Optional Arguments: quality --- the number of points used to represent a single aminio or nucleic acid backbone helixShape --- DejaVu2.Shape 2D object extruded for helical segments. A value of 'None' defaults to Ellipse2D(0.6, 0.1, quality=quality) sheetShape --- DejaVu2.Shape 2D object extruded for beta-sheets segments. A value of 'None' defaults to shapeSheet = Rectangle2D(width=1.2, height=0.2) coilShape --- DejaVu2.Shape 2D object extruded for segements that are neither helical nor beta-sheets. A value of 'None' defaults to Circle2D(radius=0.1, quality=quality) shapeNABB --- DejaVu2.Shape 2D object extruded for nucleic acids backbone A value of 'None' defaults to Circle2D(radius=0.3, quality=quality) Package : PmvApp Module : cartoonCmds Class : ComputeCartoonPath Command name : computeCartoonPath """ _argNames = ['helixShape', 'sheetShape', 'coilShape', 'shapeNABB', 'quality', 'scale'] def checkArguments(self, nodes, **kw): for name in kw.keys(): if name not in self._argNames: raise RuntimeError("%s: unrecognized keyword argument '%s', valid names are %s"%( self.name, name, str(self._argNames))) return (nodes,), kw def onAddCmdToApp(self): self._extruder = Extruder() app = self.app() app.eventHandler.registerListener(DeleteObjectEvent, self.handleDeleteObject) self._splineFrames = SplineFrames() def handleDeleteObject(self, event): mol = event.object initialized = self.initializedFor.get(mol, False) if initialized: del mol._cartoonGeoms def isInitialized(self, mol): return self.initializedFor.get(mol, False) def initialize(self,mol): # call self.initializedFor is needed isInitialized = self.initializedFor.get(mol, False) if not isInitialized: self.initializeMolForCmd(mol) def initializeMolForCmd(self, mol): """ """ self.initializedFor[mol] = True geomC = mol.geomContainer mol._cartoonGeoms = {} chids = numpy.unique(mol._ag.getChids()) #if mol._multi=="conformations": if mol._colors.get('cartoon', None) is None: # per atom properties used for lines # initialize with line colors mol._colors['cartoon'] = mol._colors['lines'].copy() if mol._ag.getData('colorsIndices_cartoon') is None: mol._ag.setData('colorsIndices_cartoon', mol._ag.getData('colorsIndices_lines').copy()) mol._cartoonData = [None]*mol._ag._coords.shape[0] if not geomC.geoms.has_key('cartoon'): master = Geom('cartoon', inheritLineWidth=False) geomC.addGeom( master, parent=geomC.masterGeom, redo=0, makeAtomSet=False, displayCmd=self.app().displayCartoon, undisplayCmd=self.app().undisplayCartoon) for chid in chids: geom = IndexedPolygons("chain_%s"%chid, inheritMaterial=False, inheritLineWidth=False, ) geomC.addGeom( geom, parent=master, redo=0, displayCmd=self.app().displayCartoon, undisplayCmd=self.app().undisplayCartoon) mol._cartoonGeoms[chid] = geom def pickedVerticesToAtoms(self, geom, vertInd): mol = geom.mol() geomC = mol.geomContainer confNum = mol._ag.getACSIndex() cartoonDataDict = mol._cartoonData[confNum] for chnum, chid in enumerate(numpy.unique(mol._ag.getChids())): g = cartoonDataDict[chid][0] if g == geom: v2At = cartoonDataDict[chid][4] atIndices = numpy.unique(v2At[list(vertInd)]) res = numpy.unique(Selection(mol._ag, atIndices, "").getResindices()) resStr = str(res.tolist()).replace(',',' ')[1:-1] atoms = mol._ag.select('resindex %s'%resStr) atmInds = atoms.getIndices() return atmInds def gaps(self, coords, cutOff): v = coords[1:]-coords[:-1] d = numpy.sum(v*v, axis=1) mask = d >= cutOff gapIndices = (numpy.where(mask==True)[0]) gapDistances = d[gapIndices] return (gapIndices+1).tolist(), gapDistances def doit(self, molSel, helixShape=None, sheetShape=None, coilShape=None, shapeNABB=None, quality=3, scale=1.): """ compute the spline path for the entire molecules and extrude 2D shapes """ mol = molSel.getAtomGroup().getMolecule() molSel = molSel.select("not deleted") isInitialized = self.initializedFor.get(mol, False) if not isInitialized: self.initializeMolForCmd(mol) # assign secondary structure if needed if mol._ag.getSecstrs() is None: mol.assignSecondaryStructureWithPross() if helixShape is None: helixShape = Ellipse2D(0.6*scale, 0.1, quality=quality) if sheetShape is None: sheetShape = Rectangle2D(width=1.2*scale, height=0.2*scale, vertDup=1) if coilShape is None: coilShape = Circle2D(radius=0.1*scale, quality=quality) if shapeNABB is None: shapeNABB = Circle2D(radius=0.3*scale, quality=quality) if mol._renderingProp.get('cartoon', None) is None: mol._renderingProp['cartoon'] = { } mol._renderingProp['cartoon']['helixShape'] = helixShape mol._renderingProp['cartoon']['sheetShape'] = sheetShape mol._renderingProp['cartoon']['coilShape'] = coilShape mol._renderingProp['cartoon']['shapeNABB'] = shapeNABB mol._renderingProp['cartoon']['quality'] = quality mol._renderingProp['cartoon']['scale'] = scale geomC = mol.geomContainer sf = self._splineFrames cartoonDataDict = {} confNum = mol._ag.getACSIndex() #for chid in numpy.unique(mol._ag.getChids()): for chain in mol._ag.getHierView(): chid = chain.getChid() if len(chain) < 4: cartoonDataDict[chid] = None continue # calculate 2D sheet allcaAtoms = chain.select("ca") if allcaAtoms is None: allpAtoms = chain.select("nucleotide and element P and not deleted") if allpAtoms is None: cartoonDataDict[chid] = None else: # build cartoon for Nucleic Acids # look for gaps gapIndices, gd = self.gaps(allpAtoms.getCoords(), 64.) # 8.0**2 P-P gapIndices.insert(0, 0) gapIndices.append(len(allpAtoms._indices)) for n, gindex in enumerate(gapIndices[1:-1]): ind = allpAtoms._indices[gindex] data = mol._ag._data print 'GAP in chain %s after residue %s%d (distance %.2f)'%( chid, data['resname'][ind], data['resnum'][ind], sqrt(gd[n])) verts = [] faces = [] normals = [] vert2At = [] faces4At= {} vc = 0 fc = 0 cylCoords =[] cylcc = 0 facesDict = {} allCtrlAtoms = mol.emptySelection() #allNAResnums = chain.select('nucleotide and not deleted').getResnums() #withP = numpy.unique(chain.select("nucleotide and element P and not deleted").getResnums()) #allNA = numpy.unique(chain.select("nucleotide").getResnums()) #noP = allNAResnums = numpy.unique(chain.select('nucleotide and not deleted').getResnums()).tolist() for gindex, start in enumerate(gapIndices[:-1]): end = gapIndices[gindex+1] patoms = Selection(mol._ag, allpAtoms._indices[start:end], '') ## Assumption: we only cartoon consecutive segments of NA containing a P atom ## but check if there is 1 NA before and after without a P and use O5' ## at the begining and O3' at the end in these cases ctrl, ctrlAtoms = mol.getChainCtrlPointsForNA(patoms, chain, allNAResnums) # ctrl has 2 more points than ctrlAtoms for spline interpolation #print chid, ctrlAtoms.getNames() # add tot he set of control atoms allCtrlAtoms = allCtrlAtoms | ctrlAtoms nbres = len(numpy.unique(ctrlAtoms.getResindices())) resIndices = ctrlAtoms.getResindices() atIndices = ctrlAtoms.getIndices() resNums = ctrlAtoms.getResnums() smoothPoints, ids = ResampleControlPoints(ctrl, quality, atIndices) pp1, norm, binormals = GetFrames(smoothPoints) sstype = ['C']*len(smoothPoints) v, f, n, v2A, f4A = self._extruder( smoothPoints, norm, binormals, quality, ids, pp1[0], sstype, coilShape, coilShape, shapeNABB, fc) verts.extend(v) faces.extend((numpy.array(f)+vc).tolist()) normals.extend(n) vert2At.extend(v2A) faces4At.update(f4A) vc += len(v) fc += len(f) ## ## add cylinder and sphere geoms for ladder steps # compute middle of P-P as base for cylinder mcoords = (0.5*(ctrl[1:-2]+ctrl[2:-1])).tolist() #import pdb; pdb.set_trace() for ii, resIndex in enumerate(numpy.unique(resIndices)): res = chain[resNums[ii]] resname = res.getResname() cylCoords.append(mcoords[ii]) if resname=='A' or resname=='G' or resname=='DA' or resname=='DG': cylCoords.append(res.select("nucleotide and name N1").getCoords()[0]) else: cylCoords.append(res.select("nucleotide and name N3").getCoords()[0]) facesDict[resIndex] = ( 2*ii+cylcc, 2*ii+cylcc+1) cylcc = len(cylCoords) geom = mol._cartoonGeoms[chid] geom._nuclotide = True # used by display to decide if ladder bar have # to be displayed geom._allAtoms = Selection(mol._ag, allCtrlAtoms._indices, "p") # used by display geom.Set(vertices=verts, vnormals=normals, faces=[], visible=0) cartoonDataDict[chid] = (geom, numpy.array(verts), numpy.array(faces), numpy.array(normals), numpy.array(vert2At), faces4At) geomC.geomPickToAtoms["chain_%s"%chid] = self.pickedVerticesToAtoms geomC.geomPickToBonds["chain_%s"%chid] = None cyl = Cylinders('steps_%s'%chid, vertices=cylCoords, faces=[], radii=0.2) cyl._facesDict = facesDict self.app().gui().viewer.AddObject(cyl, parent=geom) sph = Spheres('stepsCap_%s'%chid, vertices=cylCoords, faces=[], radii=(0.2,)) self.app().gui().viewer.AddObject(sph, parent=geom) else: # protein cartoon for this chain allcaoAtoms = chain.select("protein and name CA O and not deleted") # look for gaps #gapIndices = self.gaps(allcaAtoms.getCoords(), 16.81) # 4.1**2 CA-CA gapIndices, gd = self.gaps(allcaAtoms.getCoords(), 18.5) # 4.1**2 CA-CA gapIndices.insert(0, 0) gapIndices.append(len(allcaAtoms._indices)) for n, gindex in enumerate(gapIndices[1:-1]): ind = allcaAtoms._indices[gindex] data = mol._ag._data print 'GAP in chain %s after residue %s%d (distance %.2f)'%( chid, data['resname'][ind], data['resnum'][ind], sqrt(gd[n])) # handle each segment
import numpy as np import scipy.special as special import scipy.spatial.distance as distfuncs def cart2sph(x, y, z): """Conversion from Cartesian to spherical coordinates Parameters ------ x, y, z : Position in Cartesian coordinates Returns ------ phi, theta, r: Azimuth angle, zenith angle, distance """ r_xy = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) theta = np.arctan2(r_xy, z) r = np.sqrt(x**2 + y**2 + z**2) return phi, theta, r def sph2cart(phi, theta, r): """Conversion from spherical to Cartesian coordinates Parameters ------ phi, theta, r: Azimuth angle, zenith angle, distance Returns ------ x, y, z : Position in Cartesian coordinates """ x = r * np.sin(theta) * np.cos(phi) y = r * np.sin(theta) * np.sin(phi) z = r * np.cos(theta) return x, y, z def sph_harm(m, n, phi, theta): """Spherical harmonic function m, n: degrees and orders phi, theta: azimuth and zenith angles """ return special.sph_harm(m, n, phi, theta) def sph_harm_nmvec(order, rep=None): """Vectors of spherical harmonic orders and degrees Returns (order+1)**2 size vectors of n and m n = [0, 1, 1, 1, ..., order, ..., order]^T m = [0, -1, 0, 1, ..., -order, ..., order]^T Parameters ------ order: Maximum order rep: Same vectors are copied as [n, .., n] and [m, ..., m] Returns ------ n, m: Vectors of orders and degrees """ n = np.array([0]) m = np.array([0]) for nn in np.arange(1, order+1): nn_vec = np.tile([nn], 2*nn+1) n = np.append(n, nn_vec) mm = np.arange(-nn, nn+1) m = np.append(m, mm) if rep is not None: n = np.tile(n[:, None], (1, rep)) m = np.tile(m[:, None], (1, rep)) return n, m def spherical_hn(n, k, z): """nth-order sphericah Henkel function of kth kind Returns h_n^(k)(z) """ if k == 1: return special.spherical_jn(n, z) + 1j * special.spherical_yn(n, z) elif k == 2: return special.spherical_jn(n, z) - 1j * special.spherical_yn(n, z) else: raise ValueError() def sf_int_basis3d(n, m, x, y, z, k): """Spherical wavefunction for interior sound field in 3D Parameters ------ n, m: orders and degrees x, y, z: Position in Cartesian coordinates k: Wavenumber Returns ------ sqrt(4pi) j_n(kr) Y_n^m(phi,theta) (Normalized so that 0th order coefficient corresponds to pressure) """ phi, theta, r = cart2sph(x, y, z) J = special.spherical_jn(n, k * r) Y = sph_harm(m, n, phi, theta) f = np.sqrt(4*np.pi) * J * Y return f def gauntcoef(l1, m1, l2, m2, l3): """Gaunt coefficients """ m3 = -m1 - m2 l = int((l1 + l2 + l3) / 2) t1 = l2 - m1 - l3 t2 = l1 + m2 - l3 t3 = l1 + l2 - l3 t4 = l1 - m1 t5 = l2 + m2 tmin = max([0, max([t1, t2])]) tmax = min([t3, min([t4, t5])]) t = np.arange(tmin, tmax+1) gl_tbl = np.array(special.gammaln(np.arange(1, l1+l2+l3+3))) G = np.sum( (-1.)**t * np.exp( -np.sum( gl_tbl[np.array([t, t-t1, t-t2, t3-t, t4-t, t5-t])] ) \ +np.sum( gl_tbl[np.array([l1+l2-l3, l1-l2+l3, -l1+l2+l3, l])] ) \ -np.sum( gl_tbl[np.array([l1+l2+l3+1, l-l1, l-l2, l-l3])] ) \ +np.sum( gl_tbl[np.array([l1+m1, l1-m1, l2+m2, l2-m2, l3+m3, l3-m3])] ) * 0.5 ) ) \ * (-1.)**( l + l1 - l2 - m3) * np.sqrt( (2*l1+1) * (2*l2+1) * (2*l3+1) / (4*np.pi) ) return G def trjmat3d(order1, order2, x, y, z, k): """Translation operator in 3D """ if np.all([x, y, z] == 0): T = np.eye((order1+1)**2, (order2+1)**2) return T else: order = order1 + order2 n, m = sph_harm_nmvec(order) P = sf_int_basis3d(n, m, x, y, z, k) T = np.zeros(((order1+1)**2, (order2+1)**2), dtype=complex) icol = 0 for n in np.arange(0, order2+1): for m in np.arange(-n, n+1): irow = 0 for nu in np.arange(0, order1+1): for mu in np.arange(-nu, nu+1): l = np.arange((n+nu), max( [np.abs(n-nu), np.abs(m-mu)] )-1, -2) G = np.zeros(l.shape) for ll in np.arange(0, l.shape[0]): G[ll] = gauntcoef(n, m, nu, -mu, l[ll]) T[irow, icol] = np.sqrt(4.*np.pi) * 1j**(nu-n) * (-1.)**m * np.sum( 1j**(l) * P[l**2 + l - (mu-m)] * G ) irow = irow + 1 icol = icol+1 return T def planewave(amp, phi, theta, x, y, z, k): """Planewave """ kx, ky, kz = sph2cart(phi, theta, k) p = amp * np.exp(-1j * (kx * x + ky * y + kz * z)) return p def planewave_mode(order, amp, phi, theta, x, y, z, k): """Expansion coefficients of planewave by spherical wavefunctions """ kx, ky, kz = sph2cart(phi, theta, k) A = amp * np.exp(-1j * (kx*x + ky*y + kz*z)) n, m = sph_harm_nmvec(order, 1) coef = A * np.sqrt(4 * np.pi) * (-1j)**n * sph_harm(m, n, phi, theta).conj() return coef def sphericalwave(amp, x_s, y_s, z_s, x, y, z, k): """Point source (3D free-field Green's function) """ r = np.sqrt((x-x_s)**2 + (y-y_s)**2 + (z-z_s)**2) p = amp * np.exp(- 1j * k * r) / (4 * np.pi * r) return p def sphericalwave_mode(order, amp, x_s, y_s, z_s, x, y, z, k): """Expansion coefficients of point source by spherical wavefunctions """ phi_s, theta_s, r_s = cart2sph(x_s-x, y_s-y, z_s-z) n, m = sph_harm_nmvec(order, 1) if np.isscalar(phi_s) is False: numPos = phi_s.shape[0] numOrd = n.shape[0] n = np.tile(n, (1,numPos)) m = np.tile(m, (1,numPos)) amp = np.tile(amp.T, (numOrd, 1)) phi_s = np.tile(phi_s, (numOrd, 1)) theta_s = np.tile(theta_s, (numOrd, 1)) r_s = np.tile(r_s, (numOrd, 1)) coef = - amp * 1j * k / np.sqrt(4 * np.pi) * spherical_hn(n, 2, k*r_s) * sph_harm(m, n, phi_s, theta_s).conj() return coef def coefEstOprGen(posEst, orderEst, posMic, orderMic, coefMic, k): """Generate operator to estimate expansion coefficients of spherical wavefunctions from measurement vectors - <NAME>, <NAME>, and <NAME>, “Sound Field Recording Using Distributed Microphones Based on Harmonic Analysis of Infinite Order,” IEEE SPL, DOI: 10.1109/LSP.2017.2775242, 2018. Parameters ------ posEst: Position of expansion center for estimation orderEst: Maximum order for estimation poMic: Microphone positions orderMic: Maximum order of microphone directivity coefMic: Expansion coefficients of microphone directivity Returns ------ Operator for estimation (Expansion coefficeints are estimated by multiplying with measurement vectors) """ reg = 1e-3 numMic = posMic.shape[0] if np.isscalar(k): numFreq = 1 k = np.array([k]) else: numFreq = k.shape[0] Xi = np.zeros((numFreq, (orderEst+1)**2, numMic), dtype=complex) Psi = np.zeros((numFreq, numMic, numMic), dtype=complex) for ff in np.arange(numFreq): print('Frequency: %d/%d' % (ff, numFreq)) for j in np.arange(numMic): T = trjmat3d(orderEst, orderMic, posEst[0, 0]-posMic[j, 0], posEst[0, 1]-posMic[j, 1], posEst[0, 2]-posMic[j, 2], k[ff]) Xi[ff, :, j] = T @ coefMic[:, j] Psi[ff, j, j] = coefMic[:, j].conj().T @ coefMic[:, j] for i in np.arange(j, numMic): T = trjmat3d(orderMic, orderMic, posMic[i, 0]-posMic[j, 0], posMic[i, 1]-posMic[j, 1], posMic[i, 2]-posMic[j, 2], k[ff]) Psi[ff, i, j] = coefMic[:, i].conj().T @ T @ coefMic[:, j] Psi[ff, j, i] = Psi[ff, i, j].conj() Psi_inv = np.linalg.inv(Psi + reg * np.eye(numMic, numMic)[None, :, :]) coefEstOpr = Xi @ Psi_inv return coefEstOpr def kiFilterGen(k, posMic, posEst, filterLen=None, smplShift=None): """Kernel interpolation filter for estimating pressure distribution from measurements - <NAME>, <NAME>, and <NAME>uwatari, “Kernel Ridge Regression With Constraint of Helmholtz Equation for Sound Field Interpolation,” Proc. IWAENC, DOI: 10.1109/IWAENC.2018.8521334, 2018. - <NAME>, <NAME>, and <NAME>, “Sound Field Recording Using Distributed Microphones Based on Harmonic Analysis of Infinite Order,” IEEE SPL, DOI: 10.1109/LSP.2017.2775242, 2018. """ numMic = posMic.shape[0] numEst = posEst.shape[0] numFreq = k.shape[0] fftlen = numFreq*2 reg = 1e-1 if filterLen is None: filterLen = numFreq+1 if smplShift is None: smplShift = numFreq/2 k = k[:, None, None] distMat = distfuncs.cdist(posMic, posMic)[None, :, :] K = special.spherical_jn(0, k * distMat) Kinv = np.linalg.inv(K + reg * np.eye(numMic)[None, :, :]) distVec = np.transpose(distfuncs.cdist(posEst, posMic), (1, 0))[None, :, :] kappa = special.spherical_jn(0, k * distVec) kiTF = np.transpose(kappa, (0, 2, 1)) @ Kinv kiTF = np.concatenate((np.zeros((1, numEst, numMic)), kiTF, kiTF[int(fftlen/2)-2::-1, :, :].conj())) kiFilter = np.fft.ifft(kiTF, n=fftlen, axis=0).real kiFilter = np.concatenate((kiFilter[fftlen-smplShift:fftlen, :, :], kiFilter[:filterLen-smplShift, :, :])) return kiFilter def kiFilterGenDir(k, posMic, posEst, angSrc, betaSrc, filterLen=None, smplShift=None): """Kernel interpolation filter with directional weighting for estimating pressure distribution from measurements - <NAME>, <NAME>, and <NAME>, “Directionally Weighted Wave Field Estimation Exploiting Prior Information on Source Direction,” IEEE Trans. SP, DOI: 10.1109/TSP.2021.3070228, 2021. """ numMic = posMic.shape[0] numEst = posEst.shape[0] numFreq = k.shape[0] fftlen = numFreq*2 reg
<reponame>BenniSchmiedel/ECO import numpy as np import xarray as xr import xgcm class Grid_ops: """ An object that includes operations for variables defined on a xgcm compatible grid. Those operations are defined to simplify dealing with mathematical operations that shift grid point positions when applied. Note: The operations include approximations of variables onto neighbouring grid positions by interpolation. For low grid resolutions the approximation through interpolation might introduce large deviations from the original data, especially where strong value shifts are present. """ def __init__(self, grid, discretization='standard', boundary={'boundary': 'fill', 'fill_value': 0}): """ Creates the standard configuration for the operations used. The configuration of an operation can be customized by will if required. """ self.grid = grid self.discretization = discretization self.boundary = boundary # Position types, hard coded self.points = {'T': (('z_c', 'y_c', 'x_c'), ('y_c', 'x_c'), ('z_c',)), 'U': (('z_c', 'y_c', 'x_f'), ('y_c', 'x_f'), ('x_f',)), 'V': (('z_c', 'y_f', 'x_c'), ('y_f', 'x_c'), ('y_f',)), 'F': (('z_c', 'y_f', 'x_f'), ('y_f', 'x_f')), 'W': (('z_f', 'y_c', 'x_c'), ('z_f',)), 'UW': (('z_f', 'y_c', 'x_f'),), 'VW': (('z_f', 'y_f', 'x_c'),), 'FW': (('z_f', 'y_f', 'x_f'),) } self.shift_mask = {} def _data_skip(self, da, skip): """ Returns a cut version of data vector da by skipping physical dimensions """ if type(skip) is int: da = da[:skip] + da[skip + 1:] elif type(skip) is list: k = 0 for ind in skip: da = da[:ind - k] + da[ind + 1 - k:] k += 1 else: raise Exception("""Input for "skip" not understood. Give integer or list.""") return da def _get_dims(self, da): """ Returns the spatial position of the data variable of data vector. Note: Output does not include 't'-dimension :param da: data variable or vector as xarray.Dataarray or List(xarray.Dataarray, ...) :return: position of data variable or vector """ # If da is a list (vector), return a list with the dimension tuple for every direction. if type(da) == list: pos = [] for i in range(len(da)): dims = da[i].dims # Remove temporal dimension if present if 't' in dims: ind = dims.index('t') dims = dims[:ind] + dims[ind + 1:] pos.append(dims) else: dims = da.dims if 't' in dims: ind = dims.index('t') dims = dims[:ind] + dims[ind + 1:] pos = dims return pos def _combine_metric(self, da, axes, skip=None): """ Returns the product of metrics corresponding to the respective datavariable. :param da: data variable or vector as xarray.Dataarray or List(xarray.Dataarray, ...) :return: position of data varaible or vector """ if type(skip) is list: pass elif skip is None: skip = list('_') else: skip = list(skip) metric = 1 for i in range(len(axes)): if any(axes[i] == np.array(skip)): pass else: metric = metric * self.grid.get_metric(da, axes[i]) return metric def _get_metric_by_pos(self, axes, pos): """ Returns the metrics for given axes corresponding to a grid point position. :param da: data variable or vector as xarray.Dataarray or List(xarray.Dataarray, ...) :return: position of data varaible or vector """ if type(axes) is list: metric_out = [] for ax in axes: check = False for i in self.points[pos]: metric_dims = self._get_dims(self.grid._metrics[frozenset(ax)]) if any([i == a for a in metric_dims]): check = True ind = np.where([i == a for a in metric_dims])[0] #print(ind) if len(ind) == 1: metric_out.append(self.grid._metrics[frozenset(ax)][ind[0]]) else: raise Exception( "Found multiple matches for the metric, ensure that metrics are not doubled.") if not check: raise Exception("No matching metric was found on axis %s for position %s" % (ax, pos)) else: ax = axes #metric_out = [] check = False for i in self.points[pos]: metric_dims = self._get_dims(self.grid._metrics[frozenset(ax)]) if any([i == a for a in metric_dims]): check = True ind = np.where([i == a for a in metric_dims])[0] if len(ind) == 1: #metric_out.append(self.grid._metrics[frozenset(ax)][ind[0]]) metric_out = self.grid._metrics[frozenset(ax)][ind[0]] else: raise Exception( "Found multiple matches for the metric, ensure that metrics are not doubled.") if not check: raise Exception("No matching metric was found on axis %s for position %s" % (ax, pos)) return metric_out def _get_shift_mask(self, da, fill_value=None, scaling=1): """ :param da: dataset from which the mask is generated from Return the 3D boundary mask which follows the terrain needed when a shift is performed. - scaling*boundary_value for the boundary value at the shifted position - 0 inside the boundary - nan outside the boundary If a specific fill_value is used it replaces scaling*boundary_value Boundary values of da have to be nan! Applicable for positions 'U', 'V', 'W' and 'F' with a shift to 'T'. When first calculated it is added to self.shift_mask. :return: da_mask """ t = False if da.dims[0]=='t': t=True mask = np.zeros(da.shape[1:]) da_ref = da[0] else: mask = np.zeros(da.shape) da_ref = da x_len=da_ref.shape[2] y_len=da_ref.shape[1] z_len=da_ref.shape[0] pos = self._get_position(da) ### Distinguish mask by position if pos=='U': for z in range(z_len): for y in range(y_len): for x in range(x_len-1): v = da_ref[z,y,x] v1 = da_ref[z,y,x+1] if x==0: skip=False elif x==x_len-2: if np.isnan(da_ref[z,y,x+1]): mask[z,y,x+1] = np.nan if skip: skip = False continue else: if not np.isnan(v) and not np.isnan(v1): continue if np.isnan(v) and np.isnan(v1): mask[z,y,x] = np.nan elif np.isnan(v) and not np.isnan(v1): mask[z,y,x] = np.nan if fill_value is None: mask[z, y, x + 1] = scaling*v1 else: mask[z, y, x + 1] = fill_value skip = True elif not np.isnan(v) and np.isnan(v1): #mask[z, y, x] = 1 if fill_value is None: mask[z, y, x + 1] = scaling * v else: mask[z, y, x + 1] = fill_value skip = True if t: return da[0].copy(data=mask).rename('shift_mask' + pos) else: return da.copy(data=mask).rename('shift_mask' + pos) if pos == 'V': for z in range(z_len): for x in range(x_len): for y in range(y_len-1): v = da_ref[z, y, x] v1 = da_ref[z, y+1, x] if y == 0: skip = False elif y == y_len - 2: if np.isnan(da_ref[z, y+1, x]): mask[z, y+1, x] = np.nan if skip: skip = False continue else: if not np.isnan(v) and not np.isnan(v1): continue if np.isnan(v) and np.isnan(v1): mask[z, y, x] = np.nan elif np.isnan(v) and not np.isnan(v1): mask[z, y, x] = np.nan if fill_value is None: mask[z, y+1, x] = scaling*v1 else: mask[z, y+1, x] = fill_value skip = True elif not np.isnan(v) and np.isnan(v1): if fill_value is None: mask[z, y + 1, x] = scaling * v else: mask[z, y + 1, x] = fill_value skip = True if t: return da[0].copy(data=mask).rename('shift_mask' + pos) else: return da.copy(data=mask).rename('shift_mask' + pos) if pos == 'W': for x in range(x_len): for y in range(y_len): for z in range(z_len - 1): v = da_ref[z, y, x] v1 = da_ref[z+1, y, x] if z == 0: skip = False elif z == z_len - 2: if np.isnan(da_ref[z+1, y, x]): mask[z+1, y, x] = np.nan if skip: skip = False continue else: if not np.isnan(v) and not np.isnan(v1): continue if np.isnan(v) and np.isnan(v1): mask[z, y, x] = np.nan elif np.isnan(v) and not np.isnan(v1): mask[z, y, x] = np.nan if fill_value is None: mask[z+1, y, x] = scaling*v1 else: mask[z+1, y, x] = fill_value skip = True elif not np.isnan(v) and np.isnan(v1): if fill_value is None: mask[z+1, y, x] = scaling * v else: mask[z+1, y, x] = fill_value skip = True if t: return da[0].copy(data=mask).rename('shift_mask' + pos) else: return da.copy(data=mask).rename('shift_mask' + pos) """if pos == 'F': if 'F' in self.shift_mask: mask = self.shift_mask[pos] elif 'U' not in self.shift_mask: raise Exception('Please run shift mask for U first') elif 'V' not in self.shift_mask: raise Exception('Please run shift mask for V first') else: if t: self.shift_mask[pos] = da[0].copy(data=self.shift_mask['U'].values*self.shift_mask['V'].values).rename('shift_mask'+pos) else: self.shift_mask[pos] = da.copy(data=self.shift_mask['U'].values*self.shift_mask['V'].values).rename('shift_mask'+pos) if t: return da[0].copy(data=mask).rename('shift_mask'+pos) else: return da.copy(data=mask).rename('shift_mask'+pos)""" def _get_position(self, da, skip=None): """ Returns the spatial position of the data variable of data vector. Note: Output does not include 't'-dimension :param da: data variable or vector as xarray.DataArray or List(xarray.DataArray, ...) :return: position of data variable or vector """ # If da is a list (vector), return a list with the dimension tuple for every direction. if skip != None: da = self._data_skip(da, skip) dims = self._get_dims(da) if type(dims) is list: positions = [] for dim in dims: for
def __init__(self, *args, **options): """ Initialize a :class:`RemoteCommand` object. :param args: Refer to the initializers of the :class:`RemoteAccount` and :class:`.ExternalCommand` classes. :param options: Keyword arguments can be used to conveniently override the values of :attr:`batch_mode`, :attr:`connect_timeout`, :attr:`identity_file`, :attr:`ignore_known_hosts`, :attr:`log_level`, :attr:`port`, :attr:`strict_host_key_checking`, :attr:`known_hosts_file`, :attr:`ssh_command` and the writable properties of the base classes :class:`RemoteAccount` and :class:`.ExternalCommand`. Any other keyword argument will raise :exc:`TypeError` as usual. The remote command is not started until you call :func:`~executor.ExternalCommand.start()` or :func:`~executor.ExternalCommand.wait()`. """ # Inject our logger as a default. options.setdefault('logger', logger) # Set the default remote working directory. self.remote_directory = DEFAULT_WORKING_DIRECTORY # Initialize the super class. super(RemoteCommand, self).__init__(*args, **options) @mutable_property def batch_mode(self): """ Control the SSH client option ``BatchMode`` (a boolean, defaults to :data:`True`). The following description is quoted from `man ssh_config`_: If set to "yes", passphrase/password querying will be disabled. In addition, the ``ServerAliveInterval`` option will be set to 300 seconds by default. This option is useful in scripts and other batch jobs where no user is present to supply the password, and where it is desirable to detect a broken network swiftly. The argument must be "yes" or "no". The default is "no". This property defaults to :data:`True` because it can get really awkward when a batch of SSH clients query for a passphrase/password on standard input at the same time. .. _man ssh_config: http://www.openbsd.org/cgi-bin/man.cgi/OpenBSD-current/man5/ssh_config.5 """ return True @mutable_property def command(self): """ A list of strings with the command to execute (optional). The value of :attr:`command` is optional for :class:`RemoteCommand` objects (as opposed to :class:`.ExternalCommand` objects) because the use of SSH implies a remote (interactive) shell that usually also accepts (interactive) commands as input. This means it is valid to create a remote command object without an actual remote command to execute, but with input that provides commands to execute instead. This "feature" can be useful to control non-UNIX systems that do accept SSH connections but don't support a conventional UNIX shell. For example, I added support for this "feature" so that I was able to send commands to Juniper routers and switches over SSH with the purpose of automating the failover of a connection between two datacenters (the resulting Python program works great and it's much faster than I am, making all of the required changes in a couple of seconds :-). """ return [] @property def command_line(self): """ The complete SSH client command including the remote command. This is a list of strings with the SSH client command to connect to the remote host and execute :attr:`~.ExternalCommand.command`. """ ssh_command = list(self.ssh_command) if self.identity_file: ssh_command.extend(('-i', self.identity_file)) if self.ssh_user: ssh_command.extend(('-l', self.ssh_user)) if self.port: ssh_command.extend(('-p', '%i' % self.port)) ssh_command.extend(('-o', 'BatchMode=%s' % ('yes' if self.batch_mode else 'no'))) ssh_command.extend(('-o', 'ConnectTimeout=%i' % self.connect_timeout)) ssh_command.extend(('-o', 'LogLevel=%s' % self.log_level)) if self.strict_host_key_checking in ('yes', 'no', 'ask'): ssh_command.extend(('-o', 'StrictHostKeyChecking=%s' % self.strict_host_key_checking)) else: ssh_command.extend(('-o', 'StrictHostKeyChecking=%s' % ('yes' if self.strict_host_key_checking else 'no'))) ssh_command.extend(('-o', 'UserKnownHostsFile=%s' % self.known_hosts_file)) if self.compression: ssh_command.append('-C') if self.tty: ssh_command.append('-t') ssh_command.append(self.ssh_alias) remote_command = quote(super(RemoteCommand, self).command_line) if remote_command: if self.remote_directory != DEFAULT_WORKING_DIRECTORY: cd_command = 'cd %s' % quote(self.remote_directory) remote_command = quote(self.prefix_shell_command(cd_command, remote_command)) ssh_command.append(remote_command) return ssh_command @mutable_property def compression(self): """Whether compression is enabled (a boolean, defaults to :data:`False`).""" return False @mutable_property def connect_timeout(self): """ Control the SSH client option ``ConnectTimeout`` (an integer). The following description is quoted from `man ssh_config`_: Specifies the timeout (in seconds) used when connecting to the SSH server, instead of using the default system TCP timeout. This value is used only when the target is down or really unreachable, not when it refuses the connection. Defaults to :data:`DEFAULT_CONNECT_TIMEOUT` so that non-interactive SSH connections created by :class:`RemoteCommand` don't hang indefinitely when the remote system doesn't respond properly. """ return DEFAULT_CONNECT_TIMEOUT @property def directory(self): """ Set the remote working directory. When you set this property you change the remote working directory, however reading back the property you'll just get :data:`.DEFAULT_WORKING_DIRECTORY`. This is because the superclass :class:`.ExternalCommand` uses :attr:`directory` as the local working directory for the ``ssh`` command, and a remote working directory isn't guaranteed to also exist on the local system. """ return DEFAULT_WORKING_DIRECTORY @directory.setter def directory(self, value): """Redirect assignment from `directory` to `remote_directory`.""" self.remote_directory = value @mutable_property def error_message(self): """A user friendly explanation of how the remote command failed (a string or :data:`None`).""" messages = { RemoteCommandFailed: "External command on {a} failed with exit code {n}!", RemoteCommandNotFound: "External command on {a} isn't available!", RemoteConnectFailed: "SSH connection to {a} failed!", } if self.error_type in messages: return self.format_error_message("\n\n".join([ messages[self.error_type], "SSH command:\n{c}", ]), a=self.ssh_alias, n=self.returncode, c=quote(self.command_line)) @mutable_property def error_type(self): """ An exception class applicable to the kind of failure detected or :data:`None`. :class:`RemoteConnectFailed` when :attr:`~.ExternalCommand.returncode` is set and matches :data:`SSH_ERROR_STATUS`, :class:`RemoteCommandFailed` when :attr:`~.ExternalCommand.returncode` is set and not zero, :data:`None` otherwise. """ if self.returncode == SSH_ERROR_STATUS: return RemoteConnectFailed elif self.returncode == COMMAND_NOT_FOUND_STATUS: return RemoteCommandNotFound elif self.returncode not in (None, 0): return RemoteCommandFailed @property def have_superuser_privileges(self): """ :data:`True` if :attr:`.ssh_user` is set to 'root', :data:`False` otherwise. There's no easy way for :class:`RemoteCommand` to determine whether any given SSH alias logs into a remote system with `superuser privileges`_ so unless :attr:`.ssh_user` is set to 'root' this is always :data:`False`. .. _superuser privileges: http://en.wikipedia.org/wiki/Superuser#Unix_and_Unix-like """ return self.ssh_user == 'root' @mutable_property def identity_file(self): """The pathname of the identity file used to connect to the remote host (a string or :data:`None`).""" @property def ignore_known_hosts(self): """ Whether host key checking is disabled. This is :data:`True` if host key checking is completely disabled: - :attr:`known_hosts_file` is set to :data:`os.devnull` - :attr:`strict_host_key_checking` is set to :data:`False` If you set this to :data:`True` host key checking is disabled and :attr:`log_level` is set to 'error' to silence warnings about automatically accepting host keys. If you set this to :data:`False` then :attr:`known_hosts_file`, :attr:`log_level` and :attr:`strict_host_key_checking` are reset to their default values. """ return self.known_hosts_file == os.devnull and self.strict_host_key_checking in (False, 'no') @ignore_known_hosts.setter def ignore_known_hosts(self, value): if value: self.known_hosts_file = os.devnull self.log_level = 'error' self.strict_host_key_checking = False else: del self.known_hosts_file del self.log_level del self.strict_host_key_checking @mutable_property def log_level(self): """ Control the SSH client option ``LogLevel`` (a string, defaults to 'info'). The following description is quoted from `man ssh_config`_: Gives the verbosity level that is used when logging messages from ``ssh``. The possible values are: QUIET, FATAL, ERROR, INFO, VERBOSE, DEBUG, DEBUG1, DEBUG2, and DEBUG3. The default is INFO. DEBUG and DEBUG1 are equivalent. DEBUG2 and DEBUG3 each specify higher levels of verbose output. """ return 'info' @mutable_property def ssh_command(self): """ The command used to run the SSH client program. This is a list of strings, by default the list contains just :data:`SSH_PROGRAM_NAME`. The :attr:`batch_mode`, :attr:`connect_timeout`, :attr:`log_level`, :attr:`.ssh_alias` and :attr:`strict_host_key_checking` properties also influence the SSH client command line used. """ return [SSH_PROGRAM_NAME] @mutable_property def port(self): """The port number of the SSH server (defaults to :data:`None` which means the SSH client program decides).""" @mutable_property def strict_host_key_checking(self): """ Control the SSH client option ``StrictHostKeyChecking``. This property accepts the values :data:`True` and :data:`False` and the strings 'yes', 'no' and 'ask'. The following description is quoted from `man ssh_config`_: If this flag is set to "yes", ``ssh`` will never automatically add host keys to the ``~/.ssh/known_hosts`` file, and refuses to connect to hosts whose host key has changed. This provides maximum protection against trojan horse attacks, though it can be annoying when the ``/etc/ssh/ssh_known_hosts`` file is poorly maintained or when connections to new hosts are frequently made. This option forces the user to manually add all new hosts. If this flag is set to "no", ssh will automatically add new host keys to the user known hosts files. If this flag is set to "ask", new host keys will be added to the user known host files only after the user has confirmed that is what they really want to do, and ssh will refuse to connect to hosts whose host key has changed. The host keys of known hosts will be verified
import numpy as np import matplotlib matplotlib.use("Agg") # Must be before importing matplotlib.pyplot or pylab! from matplotlib import pyplot as plt from matplotlib.colors import to_rgb from matplotlib import cm from mpl_toolkits.mplot3d import proj3d, Axes3D from tqdm import tqdm from typing import Dict, Sequence def plot_video_with_surface( rods_history: Sequence[Dict], video_name="video.mp4", fps=60, step=1, vis2D=True, **kwargs, ): plt.rcParams.update({"font.size": 22}) folder_name = kwargs.get("folder_name", "") # 2d case <always 2d case for now> import matplotlib.animation as animation # simulation time sim_time = np.array(rods_history[0]["time"]) # Rod n_visualized_rods = len(rods_history) # should be one for now # Rod info rod_history_unpacker = lambda rod_idx, t_idx: ( rods_history[rod_idx]["position"][t_idx], rods_history[rod_idx]["radius"][t_idx], ) # Rod center of mass com_history_unpacker = lambda rod_idx, t_idx: rods_history[rod_idx]["com"][time_idx] # Generate target sphere data sphere_flag = False if kwargs.__contains__("sphere_history"): sphere_flag = True sphere_history = kwargs.get("sphere_history") n_visualized_spheres = len(sphere_history) # should be one for now sphere_history_unpacker = lambda sph_idx, t_idx: ( sphere_history[sph_idx]["position"][t_idx], sphere_history[sph_idx]["radius"][t_idx], ) # color mapping sphere_cmap = cm.get_cmap("Spectral", n_visualized_spheres) # video pre-processing print("plot scene visualization video") FFMpegWriter = animation.writers["ffmpeg"] metadata = dict(title="Movie Test", artist="Matplotlib", comment="Movie support!") writer = FFMpegWriter(fps=fps, metadata=metadata) dpi = kwargs.get("dpi", 100) xlim = kwargs.get("x_limits", (-1.0, 1.0)) ylim = kwargs.get("y_limits", (-1.0, 1.0)) zlim = kwargs.get("z_limits", (-0.05, 1.0)) difference = lambda x: x[1] - x[0] max_axis_length = max(difference(xlim), difference(ylim)) # The scaling factor from physical space to matplotlib space scaling_factor = (2 * 0.1) / max_axis_length # Octopus head dimension scaling_factor *= 2.6e3 # Along one-axis if kwargs.get("vis3D", True): fig = plt.figure(1, figsize=(10, 8), frameon=True, dpi=dpi) ax = plt.axes(projection="3d") ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") ax.set_xlim(*xlim) ax.set_ylim(*ylim) ax.set_zlim(*zlim) time_idx = 0 rod_lines = [None for _ in range(n_visualized_rods)] rod_com_lines = [None for _ in range(n_visualized_rods)] rod_scatters = [None for _ in range(n_visualized_rods)] for rod_idx in range(n_visualized_rods): inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx) if not inst_position.shape[1] == inst_radius.shape[0]: inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1]) rod_scatters[rod_idx] = ax.scatter( inst_position[0], inst_position[1], inst_position[2], s=np.pi * (scaling_factor * inst_radius) ** 2, ) if sphere_flag: sphere_artists = [None for _ in range(n_visualized_spheres)] for sphere_idx in range(n_visualized_spheres): sphere_position, sphere_radius = sphere_history_unpacker( sphere_idx, time_idx ) sphere_artists[sphere_idx] = ax.scatter( sphere_position[0], sphere_position[1], sphere_position[2], s=np.pi * (scaling_factor * sphere_radius) ** 2, ) # sphere_radius, # color=sphere_cmap(sphere_idx),) ax.add_artist(sphere_artists[sphere_idx]) # ax.set_aspect("equal") video_name_3D = folder_name + "3D_" + video_name with writer.saving(fig, video_name_3D, dpi): with plt.style.context("seaborn-whitegrid"): for time_idx in tqdm(range(0, sim_time.shape[0], int(step))): for rod_idx in range(n_visualized_rods): inst_position, inst_radius = rod_history_unpacker( rod_idx, time_idx ) if not inst_position.shape[1] == inst_radius.shape[0]: inst_position = 0.5 * ( inst_position[..., 1:] + inst_position[..., :-1] ) rod_scatters[rod_idx]._offsets3d = ( inst_position[0], inst_position[1], inst_position[2], ) # rod_scatters[rod_idx].set_offsets(inst_position[:2].T) rod_scatters[rod_idx].set_sizes( np.pi * (scaling_factor * inst_radius) ** 2 ) if sphere_flag: for sphere_idx in range(n_visualized_spheres): sphere_position, _ = sphere_history_unpacker( sphere_idx, time_idx ) sphere_artists[sphere_idx]._offsets3d = ( sphere_position[0], sphere_position[1], sphere_position[2], ) writer.grab_frame() # Be a good boy and close figures # https://stackoverflow.com/a/37451036 # plt.close(fig) alone does not suffice # See https://github.com/matplotlib/matplotlib/issues/8560/ plt.close(plt.gcf()) if kwargs.get("vis2D", True): max_axis_length = max(difference(xlim), difference(ylim)) # The scaling factor from physical space to matplotlib space scaling_factor = (2 * 0.1) / max_axis_length # Octopus head dimension scaling_factor *= 2.6e3 # Along one-axis fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi) ax = fig.add_subplot(111) ax.set_xlim(*xlim) ax.set_ylim(*ylim) time_idx = 0 rod_lines = [None for _ in range(n_visualized_rods)] rod_com_lines = [None for _ in range(n_visualized_rods)] rod_scatters = [None for _ in range(n_visualized_rods)] for rod_idx in range(n_visualized_rods): inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx) if not inst_position.shape[1] == inst_radius.shape[0]: inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1]) rod_lines[rod_idx] = ax.plot( inst_position[0], inst_position[1], "r", lw=0.5 )[0] inst_com = com_history_unpacker(rod_idx, time_idx) rod_com_lines[rod_idx] = ax.plot(inst_com[0], inst_com[1], "k--", lw=2.0)[0] rod_scatters[rod_idx] = ax.scatter( inst_position[0], inst_position[1], s=np.pi * (scaling_factor * inst_radius) ** 2, ) if sphere_flag: sphere_artists = [None for _ in range(n_visualized_spheres)] for sphere_idx in range(n_visualized_spheres): sphere_position, sphere_radius = sphere_history_unpacker( sphere_idx, time_idx ) sphere_artists[sphere_idx] = Circle( (sphere_position[0], sphere_position[1]), sphere_radius, color=sphere_cmap(sphere_idx), ) ax.add_artist(sphere_artists[sphere_idx]) ax.set_aspect("equal") video_name_2D = folder_name + "2D_xy_" + video_name with writer.saving(fig, video_name_2D, dpi): with plt.style.context("seaborn-whitegrid"): for time_idx in tqdm(range(0, sim_time.shape[0], int(step))): for rod_idx in range(n_visualized_rods): inst_position, inst_radius = rod_history_unpacker( rod_idx, time_idx ) if not inst_position.shape[1] == inst_radius.shape[0]: inst_position = 0.5 * ( inst_position[..., 1:] + inst_position[..., :-1] ) rod_lines[rod_idx].set_xdata(inst_position[0]) rod_lines[rod_idx].set_ydata(inst_position[1]) com = com_history_unpacker(rod_idx, time_idx) rod_com_lines[rod_idx].set_xdata(com[0]) rod_com_lines[rod_idx].set_ydata(com[1]) rod_scatters[rod_idx].set_offsets(inst_position[:2].T) rod_scatters[rod_idx].set_sizes( np.pi * (scaling_factor * inst_radius) ** 2 ) if sphere_flag: for sphere_idx in range(n_visualized_spheres): sphere_position, _ = sphere_history_unpacker( sphere_idx, time_idx ) sphere_artists[sphere_idx].center = ( sphere_position[0], sphere_position[1], ) writer.grab_frame() # Be a good boy and close figures # https://stackoverflow.com/a/37451036 # plt.close(fig) alone does not suffice # See https://github.com/matplotlib/matplotlib/issues/8560/ plt.close(plt.gcf()) # Plot zy max_axis_length = max(difference(zlim), difference(ylim)) # The scaling factor from physical space to matplotlib space scaling_factor = (2 * 0.1) / max_axis_length # Octopus head dimension scaling_factor *= 2.6e3 # Along one-axis fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi) ax = fig.add_subplot(111) ax.set_xlim(*zlim) ax.set_ylim(*ylim) time_idx = 0 rod_lines = [None for _ in range(n_visualized_rods)] rod_com_lines = [None for _ in range(n_visualized_rods)] rod_scatters = [None for _ in range(n_visualized_rods)] for rod_idx in range(n_visualized_rods): inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx) if not inst_position.shape[1] == inst_radius.shape[0]: inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1]) rod_lines[rod_idx] = ax.plot( inst_position[2], inst_position[1], "r", lw=0.5 )[0] inst_com = com_history_unpacker(rod_idx, time_idx) rod_com_lines[rod_idx] = ax.plot(inst_com[2], inst_com[1], "k--", lw=2.0)[0] rod_scatters[rod_idx] = ax.scatter( inst_position[2], inst_position[1], s=np.pi * (scaling_factor * inst_radius) ** 2, ) if sphere_flag: sphere_artists = [None for _ in range(n_visualized_spheres)] for sphere_idx in range(n_visualized_spheres): sphere_position, sphere_radius = sphere_history_unpacker( sphere_idx, time_idx ) sphere_artists[sphere_idx] = Circle( (sphere_position[2], sphere_position[1]), sphere_radius, color=sphere_cmap(sphere_idx), ) ax.add_artist(sphere_artists[sphere_idx]) ax.set_aspect("equal") video_name_2D = folder_name + "2D_zy_" + video_name with writer.saving(fig, video_name_2D, dpi): with plt.style.context("seaborn-whitegrid"): for time_idx in tqdm(range(0, sim_time.shape[0], int(step))): for rod_idx in range(n_visualized_rods): inst_position, inst_radius = rod_history_unpacker( rod_idx, time_idx ) if not inst_position.shape[1] == inst_radius.shape[0]: inst_position = 0.5 * ( inst_position[..., 1:] + inst_position[..., :-1] ) rod_lines[rod_idx].set_xdata(inst_position[2]) rod_lines[rod_idx].set_ydata(inst_position[1]) com = com_history_unpacker(rod_idx, time_idx) rod_com_lines[rod_idx].set_xdata(com[2]) rod_com_lines[rod_idx].set_ydata(com[1]) rod_scatters[rod_idx].set_offsets( np.vstack((inst_position[2], inst_position[1])).T ) rod_scatters[rod_idx].set_sizes( np.pi * (scaling_factor * inst_radius) ** 2 ) if sphere_flag: for sphere_idx in range(n_visualized_spheres): sphere_position, _ = sphere_history_unpacker( sphere_idx, time_idx ) sphere_artists[sphere_idx].center = ( sphere_position[2], sphere_position[1], ) writer.grab_frame() # Be a good boy and close figures # https://stackoverflow.com/a/37451036 # plt.close(fig) alone does not suffice # See https://github.com/matplotlib/matplotlib/issues/8560/ plt.close(plt.gcf()) # Plot xz fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi) ax = fig.add_subplot(111) ax.set_xlim(*xlim) ax.set_ylim(*zlim) # The scaling factor from physical space to matplotlib space max_axis_length = max(difference(zlim), difference(xlim)) scaling_factor = (2 * 0.1) / (max_axis_length) # Octopus head dimension scaling_factor *= 2.6e3 # Along one-axis time_idx = 0 rod_lines = [None for _ in range(n_visualized_rods)] rod_com_lines = [None for _ in range(n_visualized_rods)] rod_scatters = [None for _ in range(n_visualized_rods)] for rod_idx in range(n_visualized_rods): inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx) if not inst_position.shape[1] == inst_radius.shape[0]: inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1]) rod_lines[rod_idx] = ax.plot( inst_position[0], inst_position[2], "r", lw=0.5 )[0] inst_com = com_history_unpacker(rod_idx, time_idx) rod_com_lines[rod_idx] = ax.plot(inst_com[0], inst_com[2], "k--", lw=2.0)[0] rod_scatters[rod_idx] = ax.scatter( inst_position[0], inst_position[2], s=np.pi * (scaling_factor * inst_radius) ** 2, ) if sphere_flag: sphere_artists = [None for _ in range(n_visualized_spheres)] for sphere_idx in range(n_visualized_spheres): sphere_position, sphere_radius = sphere_history_unpacker( sphere_idx, time_idx ) sphere_artists[sphere_idx] = Circle( (sphere_position[0], sphere_position[2]), sphere_radius, color=sphere_cmap(sphere_idx), ) ax.add_artist(sphere_artists[sphere_idx]) ax.set_aspect("equal") video_name_2D = folder_name + "2D_xz_" + video_name with writer.saving(fig, video_name_2D, dpi): with plt.style.context("seaborn-whitegrid"): for time_idx in tqdm(range(0, sim_time.shape[0], int(step))): for rod_idx in range(n_visualized_rods): inst_position, inst_radius = rod_history_unpacker( rod_idx, time_idx ) if not inst_position.shape[1] == inst_radius.shape[0]: inst_position = 0.5 * ( inst_position[..., 1:] + inst_position[..., :-1] ) rod_lines[rod_idx].set_xdata(inst_position[0]) rod_lines[rod_idx].set_ydata(inst_position[2]) com = com_history_unpacker(rod_idx, time_idx) rod_com_lines[rod_idx].set_xdata(com[0]) rod_com_lines[rod_idx].set_ydata(com[2]) rod_scatters[rod_idx].set_offsets( np.vstack((inst_position[0], inst_position[2])).T ) rod_scatters[rod_idx].set_sizes( np.pi * (scaling_factor * inst_radius) ** 2 ) if sphere_flag: for sphere_idx in range(n_visualized_spheres): sphere_position, _ = sphere_history_unpacker( sphere_idx, time_idx ) sphere_artists[sphere_idx].center = ( sphere_position[0], sphere_position[2], ) writer.grab_frame() # Be a good boy and close figures # https://stackoverflow.com/a/37451036 # plt.close(fig) alone does not suffice # See https://github.com/matplotlib/matplotlib/issues/8560/ plt.close(plt.gcf()) def plot_snake_velocity( plot_params: dict, period, filename="slithering_snake_velocity.png", ): time_per_period = np.array(plot_params["time"]) / period avg_velocity = np.array(plot_params["avg_velocity"]) [ velocity_in_direction_of_rod, velocity_in_rod_roll_dir, _, _, ] = compute_projected_velocity(plot_params, period) fig = plt.figure(figsize=(10, 8), frameon=True, dpi=150) ax = fig.add_subplot(111) ax.grid(b=True, which="minor", color="k", linestyle="--") ax.grid(b=True, which="major", color="k", linestyle="-") ax.plot( time_per_period[:], velocity_in_direction_of_rod[:, 0], "r-", label="forward" ) ax.plot( time_per_period[:], velocity_in_rod_roll_dir[:, 1], c=to_rgb("xkcd:bluish"), label="lateral", ) ax.plot(time_per_period[:],
<reponame>DeliciousLlama/MCturtle<gh_stars>0 from mcpi.minecraft import Minecraft from math import * import time import enum # ----enums---- class direction(enum.Enum): LEFT = 0 RIGHT = 1 UP = 2 DOWN = 3 KEEP_SAME = 4 class heading(enum.Enum): DOWN = 0 UP = 1 NORTH = 2 SOUTH = 3 WEST = 4 EAST = 5 class MCTurtle: def __init__(self, mc, posX, posY, posZ): self.mc = mc self.lifted = False self.yawDirections = [5, 3, 4, 2] # list of turn faces self.yaw = 0 # current pointer of yaw # v This you have to multiply by the current yaw direction v self.pitch = 1 # 1 = neural; 1/yaw = up; 0 = down self.rotation = self.yawDirections[self.yaw] * self.pitch self.strokeBlock = 0 self.isDown = True self.x = 0 self.y = 0 self.z = 0 self.homeX = 0 self.homeY = 0 self.HomeZ = 0 self.speed = 0.25 self.incomeStorage = None self.trailStorage = None self.createPen(posX, posY, posZ) @classmethod def initFromPlayerName(cls, mc, player): playerId = mc.getPlayerEntityId(player) pos = mc.entity.getPos(playerId) return cls(mc, pos) def headingToString(self, heading): if heading == 0 or heading == heading.UP: return "Facing Down" if heading == 1 or heading == heading.DOWN: return "Facing Up" if heading == 2 or heading == heading.NORTH: return "Facing North" if heading == 3 or heading == heading.SOUTH: return "Facing South" if heading == 4 or heading == heading.WEST: return "Facing West" if heading == 5 or heading == heading.EAST: return "Facing East" def createPen(self, cx, cy, cz): # make a pen facing east self.x = floor(cx) self.y = floor(cy) self.z = floor(cz) self.rotation = 5 self.trailStorage = self.mc.getBlockWithData(cx, cy, cz) self.incomeStorage = self.__get_block_on_side(self.rotation) self.homeX = self.x self.homeY = self.y self.homeZ = self.z self.mc.setBlock(cx, cy, cz, 33, 5) def updatePos(self, ux, uy, uz): self.x = ux self.y = uy self.z = uz def updateStroke(self, block): self.strokeBlock = block def __cycle_yaw(self, direction): assert direction in [direction.RIGHT, direction.LEFT, direction.KEEP_SAME], 'direction can only be LEFT, RIGHT, or KEEP_SAME.' if direction == direction.RIGHT: # cycle it to the RIGHT if self.yaw == 3: # reached end, reset self.yaw = 0 else: self.yaw += 1 elif direction == direction.LEFT: # cycle to left if self.yaw == 0: # reached end, reset self.yaw = 3 else: self.yaw -= 1 def __update_rotation(self, yawDirection, pitchDirection): # based on the yaw*pitch formula, update the master rotation value # first, we update the yaw self.__cycle_yaw(yawDirection) # second, we update the pitch if self.pitch == 1: # normal state if pitchDirection == direction.DOWN: # if user wants to face down self.pitch = 0 elif pitchDirection == direction.UP: # if user wants to face up self.pitch = 1/self.yawDirections[self.yaw] elif self.pitch == 0: # if it is currently facing down if pitchDirection == direction.UP: # then make it face up self.pitch = 1 else: # then it must be facing up, as it its not down or neutral if pitchDirection == direction.DOWN: self.pitch = 1 # third, we update the master rotational value self.rotation = self.yawDirections[self.yaw] * self.pitch # end of rotation update def __getBlockWithData(self, dx, dy, dz): return self.mc.getBlockWithData(self.x + dx, self.y + dy, self.z + dz) def __get_block_on_side(self, direction, onBack=False): # onBack's function serves to reverse the direction of where it is getting the block from. # Ex: direction = UP, onBack = True --> blockData(DOWN) if direction == 5 or direction == heading.EAST: if not onBack: return self.__getBlockWithData(1, 0, 0) else: return self.__getBlockWithData(-1, 0, 0) if direction == 4 or direction == heading.WEST: if not onBack: return self.__getBlockWithData(-1, 0, 0) else: return self.__getBlockWithData(1, 0, 0) if direction == 3 or direction == heading.SOUTH: if not onBack: return self.__getBlockWithData(0, 0, 1) else: return self.__getBlockWithData(0, 0, -1) if direction == 2 or direction == heading.NORTH: if not onBack: return self.__getBlockWithData(0, 0, -1) else: return self.__getBlockWithData(0, 0, 1) if direction == 1 or direction == heading.UP: if not onBack: return self.__getBlockWithData(0, 1, 0) else: return self.__getBlockWithData(0, -1, 0) if direction == 0 or direction == heading.DOWN: if not onBack: return self.__getBlockWithData(0, -1, 0) else: return self.__getBlockWithData(0, 1, 0) def home(self, offsetX=0, offsetY=0, offsetZ=0): # self.mc.setBlock(self.homeX + offsetX, self.homeY + # offsetY, self.homeZ + offsetZ, 33, self.rotation) # self.mc.setBlock(self.x, self.y, self.z, self.strokeBlock) # self.updatePos(self.homeX + offsetX, self.homeY + # offsetY, self.homeZ + offsetZ) self.goto(self.homeX+offsetX, self.homeY+offsetY, self.homeZ+offsetZ) def __setTurtle(self, dx, dy, dz): self.mc.setBlock(self.x + dx, self.y + dy, self.z + dz, 33, self.rotation) self.updatePos(self.x + dx, self.y + dy, self.z + dz) def __setBlock(self, dx, dy, dz, id): self.mc.setBlock(self.x + dx, self.y + dy, self.z + dz, id) def forward(self, amount): self.incomeStorage = self.__get_block_on_side(self.rotation) for i in range(0, amount): # move the piston in the specified rotation if not self.isDown: # store the block data infront into "incomeStorage" # move forward and place block in "trailStorage" # transfer the block data in "incomeStorage" into "trailStorage" self.__setBlock(0, 0, 0, self.trailStorage) else: self.__setBlock(0, 0, 0, self.strokeBlock) if self.rotation == 0: # down self.__setTurtle(0, -1, 0) if self.rotation == 1: # up self.__setTurtle(0, 1, 0) if self.rotation == 2: # north self.__setTurtle(0, 0, -1) if self.rotation == 3: # south self.__setTurtle(0, 0, 1) if self.rotation == 4: # west self.__setTurtle(-1, 0, 0) if self.rotation == 5: # east self.__setTurtle(1, 0, 0) time.sleep(self.speed) self.trailStorage = self.incomeStorage self.incomeStorage = self.__get_block_on_side(self.rotation) def fd(self, amount): self.forward(amount) def backward(self, amount): self.incomeStorage = self.__get_block_on_side(self.rotation, True) for i in range(0, amount): if not self.isDown: # store the block data infront into "incomeStorage" # move forward and place block in "trailStorage" # transfer the block data in "incomeStorage" into "trailStorage" self.__setBlock(0, 0, 0, self.trailStorage) else: self.__setBlock(0, 0, 0, self.strokeBlock) # move the piston in the specified rotation if self.rotation == 0: # down self.__setTurtle(0, 1, 0) if self.rotation == 1: # up self.__setTurtle(0, -1, 0) if self.rotation == 2: # north self.__setTurtle(0, 0, 1) if self.rotation == 3: # south self.__setTurtle(0, 0, -1) if self.rotation == 4: # west self.__setTurtle(1, 0, 0) if self.rotation == 5: # east self.__setTurtle(-1, 0, 0) time.sleep(self.speed) self.trailStorage = self.incomeStorage self.incomeStorage = self.__get_block_on_side(self.rotation, True) def bk(self, amount): self.backward(amount) def setx(self, newX): if newX > int(self.x): self.incomeStorage = self.__get_block_on_side(heading.EAST) for i in range(int(abs(newX-self.x))): if not self.isDown: self.__setBlock(0, 0, 0, self.trailStorage) else: self.__setBlock(0, 0, 0, self.strokeBlock) self.__setTurtle(1, 0, 0) self.trailStorage = self.incomeStorage self.incomeStorage = self.__get_block_on_side(heading.EAST) time.sleep(self.speed) else: self.incomeStorage = self.__get_block_on_side(heading.EAST, True) for i in range(int(abs(newX-self.x))): if not self.isDown: self.__setBlock(0, 0, 0, self.trailStorage) else: self.__setBlock(0, 0, 0, self.strokeBlock) self.__setTurtle(-1, 0, 0) self.trailStorage = self.incomeStorage self.incomeStorage = self.__get_block_on_side(heading.EAST, True) time.sleep(self.speed) def sety(self, newY): if newY > int(self.y): self.incomeStorage = self.__get_block_on_side(heading.UP) for i in range(int(abs(newY-self.y))): if not self.isDown: self.__setBlock(0, 0, 0, self.trailStorage) else: self.__setBlock(0, 0, 0, self.strokeBlock) self.__setTurtle(0, 1, 0) self.trailStorage = self.incomeStorage self.incomeStorage = self.__get_block_on_side(heading.UP) time.sleep(self.speed) else: self.incomeStorage = self.__get_block_on_side(heading.UP, True) for i in range(int(abs(newY-self.y))): if not self.isDown: self.__setBlock(0, 0, 0, self.trailStorage) else: self.__setBlock(0, 0, 0, self.strokeBlock) self.__setTurtle(0, -1, 0) self.trailStorage = self.incomeStorage self.incomeStorage = self.__get_block_on_side(heading.UP, True) time.sleep(self.speed) def setz(self, newZ): if newZ > int(self.z): self.incomeStorage = self.__get_block_on_side(heading.SOUTH) for i in range(int(abs(newZ-self.z))): if not self.isDown: self.__setBlock(0, 0, 0, self.trailStorage) else: self.__setBlock(0, 0, 0, self.strokeBlock) self.__setTurtle(0, 0, 1) self.trailStorage = self.incomeStorage self.incomeStorage = self.__get_block_on_side(heading.SOUTH) time.sleep(self.speed) else: self.incomeStorage = self.__get_block_on_side(heading.SOUTH, True) for i in range(int(abs(newZ-self.z))): if not self.isDown: self.__setBlock(0, 0, 0, self.trailStorage) else: self.__setBlock(0, 0, 0, self.strokeBlock) self.__setTurtle(0, 0, -1) self.trailStorage = self.incomeStorage self.incomeStorage = self.__get_block_on_side(heading.SOUTH, True) time.sleep(self.speed) def goto(self, newX, newY, newZ): # reference: https://en.wikipedia.org/wiki/Line_drawing_algorithm staticx = self.x staticy = self.y staticz = self.z deltax = newX - self.x deltay = newY - self.y deltaz = newZ - self.z for i in range(1, abs(newX - self.x)+1): if (newX-self.x) > 0: self.setx(staticx+i) self.sety(staticy+round(i*(deltay/deltax))) self.setz(staticz+round(i*(deltaz/deltax))) else: self.setx(staticx-i) self.sety(staticy-round(i*(deltay/deltax))) self.setz(staticz-round(i*(deltaz/deltax))) def setSpeed(self, newSpeed): # x blocks per second self.speed = 1/newSpeed def __rotate_turtle(self): self.mc.setBlock(self.x, self.y, self.z, 33, self.rotation) def turn(self, direction): # turn based on a circle of rotation assert direction in [direction.RIGHT, direction.LEFT, direction.UP, direction.DOWN, direction.KEEP_SAME], 'turn direction can only be RIGHT, LEFT, UP, DOWN, or KEEP_SAME.' if direction == direction.RIGHT or direction == direction.LEFT: self.__update_rotation(direction, direction.KEEP_SAME)
<filename>openerp/addons/web/http.py # -*- coding: utf-8 -*- #---------------------------------------------------------- # OpenERP Web HTTP layer #---------------------------------------------------------- import ast import cgi import contextlib import functools import getpass import logging import mimetypes import os import pprint import random import sys import tempfile import threading import time import traceback import urlparse import uuid import xmlrpclib import errno import babel.core import simplejson import werkzeug.contrib.sessions import werkzeug.datastructures import werkzeug.exceptions import werkzeug.utils import werkzeug.wrappers import werkzeug.wsgi import openerp import session _logger = logging.getLogger(__name__) #---------------------------------------------------------- # RequestHandler #---------------------------------------------------------- class WebRequest(object): """ Parent class for all OpenERP Web request types, mostly deals with initialization and setup of the request object (the dispatching itself has to be handled by the subclasses) :param request: a wrapped werkzeug Request object :type request: :class:`werkzeug.wrappers.BaseRequest` .. attribute:: httprequest the original :class:`werkzeug.wrappers.Request` object provided to the request .. attribute:: httpsession a :class:`~collections.Mapping` holding the HTTP session data for the current http session .. attribute:: params :class:`~collections.Mapping` of request parameters, not generally useful as they're provided directly to the handler method as keyword arguments .. attribute:: session_id opaque identifier for the :class:`session.OpenERPSession` instance of the current request .. attribute:: session :class:`~session.OpenERPSession` instance for the current request .. attribute:: context :class:`~collections.Mapping` of context values for the current request .. attribute:: debug ``bool``, indicates whether the debug mode is active on the client """ def __init__(self, request): self.httprequest = request self.httpresponse = None self.httpsession = request.session def init(self, params): self.params = dict(params) # OpenERP session setup self.session_id = self.params.pop("session_id", None) or uuid.uuid4().hex self.session = self.httpsession.get(self.session_id) if not self.session: self.session = session.OpenERPSession() self.httpsession[self.session_id] = self.session # set db/uid trackers - they're cleaned up at the WSGI # dispatching phase in openerp.service.wsgi_server.application if self.session._db: threading.current_thread().dbname = self.session._db if self.session._uid: threading.current_thread().uid = self.session._uid self.context = self.params.pop('context', {}) self.debug = self.params.pop('debug', False) is not False # Determine self.lang lang = self.params.get('lang', None) if lang is None: lang = self.context.get('lang') if lang is None: lang = self.httprequest.cookies.get('lang') if lang is None: lang = self.httprequest.accept_languages.best if not lang: lang = 'en_US' # tranform 2 letters lang like 'en' into 5 letters like 'en_US' lang = babel.core.LOCALE_ALIASES.get(lang, lang) # we use _ as seprator where RFC2616 uses '-' self.lang = lang.replace('-', '_') def reject_nonliteral(dct): if '__ref' in dct: raise ValueError( "Non literal contexts can not be sent to the server anymore (%r)" % (dct,)) return dct class JsonRequest(WebRequest): """ JSON-RPC2 over HTTP. Sucessful request:: --> {"jsonrpc": "2.0", "method": "call", "params": {"session_id": "SID", "context": {}, "arg1": "val1" }, "id": null} <-- {"jsonrpc": "2.0", "result": { "res1": "val1" }, "id": null} Request producing a error:: --> {"jsonrpc": "2.0", "method": "call", "params": {"session_id": "SID", "context": {}, "arg1": "val1" }, "id": null} <-- {"jsonrpc": "2.0", "error": {"code": 1, "message": "End user error message.", "data": {"code": "codestring", "debug": "traceback" } }, "id": null} """ def dispatch(self, method): """ Calls the method asked for by the JSON-RPC2 or JSONP request :param method: the method which received the request :returns: an utf8 encoded JSON-RPC2 or JSONP reply """ args = self.httprequest.args jsonp = args.get('jsonp') requestf = None request = None request_id = args.get('id') if jsonp and self.httprequest.method == 'POST': # jsonp 2 steps step1 POST: save call self.init(args) self.session.jsonp_requests[request_id] = self.httprequest.form['r'] headers=[('Content-Type', 'text/plain; charset=utf-8')] r = werkzeug.wrappers.Response(request_id, headers=headers) return r elif jsonp and args.get('r'): # jsonp method GET request = args.get('r') elif jsonp and request_id: # jsonp 2 steps step2 GET: run and return result self.init(args) request = self.session.jsonp_requests.pop(request_id, "") else: # regular jsonrpc2 requestf = self.httprequest.stream response = {"jsonrpc": "2.0" } error = None try: # Read POST content or POST Form Data named "request" if requestf: self.jsonrequest = simplejson.load(requestf, object_hook=reject_nonliteral) else: self.jsonrequest = simplejson.loads(request, object_hook=reject_nonliteral) self.init(self.jsonrequest.get("params", {})) if _logger.isEnabledFor(logging.DEBUG): _logger.debug("--> %s.%s\n%s", method.im_class.__name__, method.__name__, pprint.pformat(self.jsonrequest)) response['id'] = self.jsonrequest.get('id') response["result"] = method(self, **self.params) except session.AuthenticationError: error = { 'code': 100, 'message': "OpenERP Session Invalid", 'data': { 'type': 'session_invalid', 'debug': traceback.format_exc() } } except xmlrpclib.Fault, e: error = { 'code': 200, 'message': "OpenERP Server Error", 'data': { 'type': 'server_exception', 'fault_code': e.faultCode, 'debug': "Client %s\nServer %s" % ( "".join(traceback.format_exception("", None, sys.exc_traceback)), e.faultString) } } except Exception: logging.getLogger(__name__ + '.JSONRequest.dispatch').exception\ ("An error occured while handling a json request") error = { 'code': 300, 'message': "OpenERP WebClient Error", 'data': { 'type': 'client_exception', 'debug': "Client %s" % traceback.format_exc() } } if error: response["error"] = error if _logger.isEnabledFor(logging.DEBUG): _logger.debug("<--\n%s", pprint.pformat(response)) if jsonp: # If we use jsonp, that's mean we are called from another host # Some browser (IE and Safari) do no allow third party cookies # We need then to manage http sessions manually. response['httpsessionid'] = self.httpsession.sid mime = 'application/javascript' body = "%s(%s);" % (jsonp, simplejson.dumps(response),) else: mime = 'application/json' body = simplejson.dumps(response) r = werkzeug.wrappers.Response(body, headers=[('Content-Type', mime), ('Content-Length', len(body))]) return r def jsonrequest(f): """ Decorator marking the decorated method as being a handler for a JSON-RPC request (the exact request path is specified via the ``$(Controller._cp_path)/$methodname`` combination. If the method is called, it will be provided with a :class:`JsonRequest` instance and all ``params`` sent during the JSON-RPC request, apart from the ``session_id``, ``context`` and ``debug`` keys (which are stripped out beforehand) """ f.exposed = 'json' return f class HttpRequest(WebRequest): """ Regular GET/POST request """ def dispatch(self, method): params = dict(self.httprequest.args) params.update(self.httprequest.form) params.update(self.httprequest.files) self.init(params) akw = {} for key, value in self.httprequest.args.iteritems(): if isinstance(value, basestring) and len(value) < 1024: akw[key] = value else: akw[key] = type(value) _logger.debug("%s --> %s.%s %r", self.httprequest.method, method.im_class.__name__, method.__name__, akw) try: r = method(self, **self.params) except xmlrpclib.Fault, e: r = werkzeug.exceptions.InternalServerError(cgi.escape(simplejson.dumps({ 'code': 200, 'message': "OpenERP Server Error", 'data': { 'type': 'server_exception', 'fault_code': e.faultCode, 'debug': "Server %s\nClient %s" % ( e.faultString, traceback.format_exc()) } }))) except Exception: logging.getLogger(__name__ + '.HttpRequest.dispatch').exception( "An error occurred while handling a json request") r = werkzeug.exceptions.InternalServerError(cgi.escape(simplejson.dumps({ 'code': 300, 'message': "OpenERP WebClient Error", 'data': { 'type': 'client_exception', 'debug': "Client %s" % traceback.format_exc() } }))) if self.debug or 1: if isinstance(r, (werkzeug.wrappers.BaseResponse, werkzeug.exceptions.HTTPException)): _logger.debug('<-- %s', r) else: _logger.debug("<-- size: %s", len(r)) return r def make_response(self, data, headers=None, cookies=None): """ Helper for non-HTML responses, or HTML responses with custom response headers or cookies. While handlers can just return the HTML markup of a page they want to send as a string if non-HTML data is returned they need to create a complete response object, or the returned data will not be correctly interpreted by the clients. :param basestring data: response body :param headers: HTTP headers to set on the response :type headers: ``[(name, value)]`` :param collections.Mapping cookies: cookies to set on the client """ response = werkzeug.wrappers.Response(data, headers=headers) if cookies: for k, v in cookies.iteritems(): response.set_cookie(k, v) return response def not_found(self, description=None): """ Helper for 404 response, return its result from the method """ return werkzeug.exceptions.NotFound(description) def httprequest(f): """ Decorator marking the decorated method as being a handler for a normal HTTP request (the exact request path is specified via the ``$(Controller._cp_path)/$methodname`` combination. If the method is called, it will be provided with a :class:`HttpRequest` instance and all ``params`` sent during the request (``GET`` and ``POST`` merged in the same dictionary), apart from the ``session_id``, ``context`` and ``debug`` keys (which are stripped out beforehand) """ f.exposed = 'http' return f #---------------------------------------------------------- # Controller registration with a metaclass #---------------------------------------------------------- addons_module = {} addons_manifest = {} controllers_class = [] controllers_class_path = {} controllers_object = {} controllers_object_path = {} controllers_path = {} class ControllerType(type): def __init__(cls, name, bases, attrs): super(ControllerType, cls).__init__(name, bases, attrs) name_class = ("%s.%s" % (cls.__module__, cls.__name__), cls) controllers_class.append(name_class) path = attrs.get('_cp_path') if path not in controllers_class_path: controllers_class_path[path] = name_class class Controller(object): __metaclass__ = ControllerType def __new__(cls, *args, **kwargs): subclasses = [c for c in cls.__subclasses__() if c._cp_path == cls._cp_path] if subclasses: name = "%s (extended by %s)" % (cls.__name__, ', '.join(sub.__name__ for sub in subclasses)) cls = type(name, tuple(reversed(subclasses)), {}) return object.__new__(cls) #---------------------------------------------------------- # Session context manager #---------------------------------------------------------- @contextlib.contextmanager def session_context(request, session_store, session_lock, sid): with session_lock: if sid: request.session = session_store.get(sid) else: request.session = session_store.new() try: yield request.session finally: # Remove all OpenERPSession instances with no uid, they're generated # either by login process or by HTTP requests without an OpenERP # session id, and are generally noise removed_sessions = set() for key, value in request.session.items(): if not isinstance(value, session.OpenERPSession): continue if getattr(value, '_suicide', False) or ( not value._uid and not value.jsonp_requests # FIXME
"gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }) ] user_features = UserFeatures(user, tweets) self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["emoji_only_tweets_mean"]], np.mean([0, 1, 1])) def test_number_of_tweet_languages_nan(self): user_dic = { "id": 1, "name": "Test Account", "screen_name": "test_account", "location": "", "url": None, "expanded_url": None, "description": "", "protected": False, "verified": False, "followers_count": 10, "friends_count": 15, "listed_count": 2, "favourites_count": 50, "statuses_count": 9, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT), "profile_image_url_https": "", "default_profile": True, "default_profile_image": True, "withheld_in_countries": "", "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) } user = User.parse(api=None, json=user_dic) user_features = UserFeatures(user, []) self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_tweet_languages"]])) def test_number_of_tweet_languages(self): user_dic = { "id": 1, "name": "Test Account", "screen_name": "test_account", "location": "", "url": None, "expanded_url": None, "description": "", "protected": False, "verified": False, "followers_count": 10, "friends_count": 15, "listed_count": 2, "favourites_count": 50, "statuses_count": 9, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT), "profile_image_url_https": "", "default_profile": True, "default_profile_image": True, "withheld_in_countries": "", "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) } user = User.parse(api=None, json=user_dic) tweets = [ Status.parse(api=None, json={ "id": 0, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "This is just a simple test tweet text.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }), Status.parse(api=None, json={ "id": 1, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "This is just a simple test tweet text.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }), Status.parse(api=None, json={ "id": 2, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "Dies ist ein Testext.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "de", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }) ] user_features = UserFeatures(user, tweets) self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["number_of_tweet_languages"]], 2) def test_most_used_tweet_language_nan(self): user_dic = { "id": 1, "name": "<NAME>", "screen_name": "test_account", "location": "", "url": None, "expanded_url": None, "description": "", "protected": False, "verified": False, "followers_count": 10, "friends_count": 15, "listed_count": 2, "favourites_count": 50, "statuses_count": 9, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT), "profile_image_url_https": "", "default_profile": True, "default_profile_image": True, "withheld_in_countries": "", "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) } user = User.parse(api=None, json=user_dic) user_features = UserFeatures(user, []) self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["most_used_tweet_language"]])) def test_most_used_tweet_language(self): user_dic = { "id": 1, "name": "Test Account", "screen_name": "test_account", "location": "", "url": None, "expanded_url": None, "description": "", "protected": False, "verified": False, "followers_count": 10, "friends_count": 15, "listed_count": 2, "favourites_count": 50, "statuses_count": 9, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT), "profile_image_url_https": "", "default_profile": True, "default_profile_image": True, "withheld_in_countries": "", "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) } user = User.parse(api=None, json=user_dic) tweets = [ Status.parse(api=None, json={ "id": 0, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "This is just a simple test tweet text.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }), Status.parse(api=None, json={ "id": 1, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "This is just a simple test tweet text.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }), Status.parse(api=None, json={ "id": 2, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "Dies ist ein Testext.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "de", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }) ] user_features = UserFeatures(user, tweets) self.assertEqual(user_features[USER_FEATURES_INDEX["most_used_tweet_language"]], 41.0) def test_pagination_tweets_mean_nan(self): user_dic = { "id": 1, "name": "Test Account", "screen_name": "test_account", "location": "", "url": None, "expanded_url": None, "description": "", "protected": False, "verified": False, "followers_count": 10, "friends_count": 15, "listed_count": 2, "favourites_count": 50, "statuses_count": 9, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT), "profile_image_url_https": "", "default_profile": True, "default_profile_image": True, "withheld_in_countries": "", "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) } user = User.parse(api=None, json=user_dic) user_features = UserFeatures(user, []) self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["pagination_tweets_mean"]])) def test_pagination_tweets_mean(self): user_dic = { "id": 1, "name": "Test Account", "screen_name": "test_account", "location": "", "url": None, "expanded_url": None, "description": "", "protected": False, "verified": False, "followers_count": 10, "friends_count": 15, "listed_count": 2, "favourites_count": 50, "statuses_count": 9, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT), "profile_image_url_https": "", "default_profile": True, "default_profile_image": True, "withheld_in_countries": "", "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) } user = User.parse(api=None, json=user_dic) tweets = [ Status.parse(api=None, json={ "id": 0, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "This is just a simple test tweet text.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }), Status.parse(api=None, json={ "id": 1, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "2/2 This is just a simple test tweet text.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }), Status.parse(api=None, json={ "id": 2, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "This is just a simple test tweet text. (2/2)", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }) ] user_features = UserFeatures(user, tweets) self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["pagination_tweets_mean"]], np.mean([0, 1, 1])) def test_own_tweets_text_similarity_mean_nan(self): user_dic = { "id": 1, "name": "Test Account", "screen_name": "test_account", "location": "", "url": None, "expanded_url": None, "description": "", "protected": False, "verified": False, "followers_count": 10, "friends_count": 15, "listed_count": 2, "favourites_count": 50, "statuses_count": 9, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT), "profile_image_url_https": "", "default_profile": True, "default_profile_image": True, "withheld_in_countries": "", "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) } user = User.parse(api=None, json=user_dic) user_features = UserFeatures(user, []) self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["own_tweets_text_similarity_mean"]])) def test_own_tweets_text_similarity_mean(self): user_dic = { "id": 1, "name": "Test Account", "screen_name": "test_account", "location": "", "url": None, "expanded_url": None, "description": "", "protected": False, "verified": False, "followers_count": 10, "friends_count": 15, "listed_count": 2, "favourites_count": 50, "statuses_count": 9, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT), "profile_image_url_https": "", "default_profile": True, "default_profile_image": True, "withheld_in_countries": "", "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) } user = User.parse(api=None, json=user_dic) tweets = [ Status.parse(api=None, json={ "id": 0, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "This is just a simple test tweet text.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }), Status.parse(api=None, json={ "id": 1, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "This is just a simple test tweet text.", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }), Status.parse(api=None, json={ "id": 2, "user_id": 1, "created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime( TWITTER_DATE_TIME_FORMAT), "text": "This is just a simple test tweet text. #test", "coordinates": None, "place": None, "in_reply_to_status_id": None, "in_reply_to_user_id": None, "quoted_status_id": None, "retweet_count": 2, "favorite_count": 3, "lang": "en", "withheld_copyright": False, "withheld_in_countries": None, "entities": { "urls": [] }, "source": "Twitter Web App", "videos": 0, "photos": 0, "gifs": 0, "fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT) }) ] tweet_features_1 = TweetFeatures(tweets[0], user)[TWEET_TEXT_SIMILARITY_FEATURES] tweet_features_2 = TweetFeatures(tweets[1], user)[TWEET_TEXT_SIMILARITY_FEATURES] tweet_features_3 = TweetFeatures(tweets[2], user)[TWEET_TEXT_SIMILARITY_FEATURES] similarity = cosine_similarity([tweet_features_1, tweet_features_2, tweet_features_3]) user_features = UserFeatures(user, tweets) self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["own_tweets_text_similarity_mean"]], similarity.mean()) def test_own_tweets_text_similarity_mean_2(self): user_dic = { "id": 1, "name": "Test Account", "screen_name": "test_account", "location": "", "url": None, "expanded_url": None, "description": "", "protected": False, "verified": False, "followers_count": 10, "friends_count": 15, "listed_count": 2, "favourites_count":
# MIT License # # Copyright (c) 2020 Sixshaman # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import urllib.request import re from bs4 import BeautifulSoup header_license = """\ /******************************************************************************** MIT License Copyright (c) 2020 Sixshaman Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ********************************************************************************/ """ header_start_h = """\ #ifndef VULKAN_GENERIC_STRUCTURES_H #define VULKAN_GENERIC_STRUCTURES_H #include <vulkan/vulkan.h> #include <vector> #include <unordered_map> #include <cassert> namespace vgs { using VulkanStructureType = VkStructureType; template<typename VkStruct> constexpr VulkanStructureType ValidStructureType = VK_STRUCTURE_TYPE_MAX_ENUM; """ header_start_hpp = """\ #ifndef VULKAN_GENERIC_STRUCTURES_HPP #define VULKAN_GENERIC_STRUCTURES_HPP #include <vulkan/vulkan.hpp> #include <vector> #include <unordered_map> #include <cassert> namespace vgs { using VulkanStructureType = vk::StructureType; template<typename VkStruct> constexpr VulkanStructureType ValidStructureType = VkStruct::structureType; """ header_stype_init_h = """\ inline void InitSType(void* ptr, ptrdiff_t offset, VulkanStructureType value) { memcpy((std::byte*)ptr + offset, &value, sizeof(VulkanStructureType)); } """ header_stype_init_hpp = """\ inline void InitSType(void* ptr, ptrdiff_t offset, VulkanStructureType value) { } """ header_end = """\ //========================================================================================================================== //Base interface for StructureBlob(owning type-erased structure) and GenericStructureView(non-owning type-erased structure) class GenericStructBase { protected: GenericStructBase(); GenericStructBase(std::byte* data, size_t dataSize, ptrdiff_t pNextOffset, ptrdiff_t sTypeOffset); ~GenericStructBase(); public: template<typename Struct> Struct& GetDataAs(); std::byte* GetStructureData() const; size_t GetStructureSize() const; ptrdiff_t GetPNextOffset() const; void* GetPNext() const; ptrdiff_t GetSTypeOffset() const; VulkanStructureType GetSType() const; protected: std::byte* StructureData; size_t StructureSize; ptrdiff_t PNextPointerOffset; ptrdiff_t STypeOffset; }; inline GenericStructBase::GenericStructBase(): StructureData(nullptr), StructureSize(0), PNextPointerOffset(0), STypeOffset(0) { } inline GenericStructBase::GenericStructBase(std::byte* data, size_t dataSize, ptrdiff_t pNextOffset, ptrdiff_t sTypeOffset): StructureData(data), StructureSize(dataSize), PNextPointerOffset(pNextOffset), STypeOffset(sTypeOffset) { } inline GenericStructBase::~GenericStructBase() { } template<typename Struct> inline Struct& GenericStructBase::GetDataAs() { assert(GetStructureSize() == sizeof(Struct)); assert(StructureData != nullptr); Struct* structureData = reinterpret_cast<Struct*>(StructureData); return *structureData; } inline std::byte* GenericStructBase::GetStructureData() const { assert(GetStructureSize() != 0); return StructureData; } inline size_t GenericStructBase::GetStructureSize() const { return StructureSize; } inline ptrdiff_t GenericStructBase::GetPNextOffset() const { return PNextPointerOffset; } inline void* GenericStructBase::GetPNext() const { assert(PNextPointerOffset + sizeof(void*) <= GetStructureSize()); void* pNext = nullptr; memcpy(&pNext, StructureData + PNextPointerOffset, sizeof(void*)); return pNext; } inline ptrdiff_t GenericStructBase::GetSTypeOffset() const { return STypeOffset; } inline VulkanStructureType GenericStructBase::GetSType() const { assert(STypeOffset + sizeof(VulkanStructureType) <= GetStructureSize()); VulkanStructureType sType; memcpy(&sType, StructureData + STypeOffset, sizeof(VulkanStructureType)); return sType; } //========================================================================================================================== //Non-owning version of a generic structure class GenericStruct: public GenericStructBase { public: template<typename Struct> GenericStruct(Struct& structure); //The copy constructor should be template-specialized, because <const GenericStructureView&> can be passed as a <Struct&> template<> GenericStruct(const GenericStruct& right); GenericStruct& operator=(const GenericStruct& right); }; template<typename Struct> inline GenericStruct::GenericStruct(Struct& structure): GenericStructBase(reinterpret_cast<std::byte*>(&structure), sizeof(Struct), offsetof(Struct, pNext), offsetof(Struct, sType)) { } template<> inline GenericStruct::GenericStruct(const GenericStruct& right): GenericStructBase(right.StructureData, right.StructureSize, right.PNextPointerOffset, right.STypeOffset) { } inline GenericStruct& GenericStruct::operator=(const GenericStruct& right) { StructureData = right.StructureData; StructureSize = right.StructureSize; STypeOffset = right.STypeOffset; PNextPointerOffset = right.PNextPointerOffset; return *this; } //Creates a GenericStruct and automatically fills in sType (a side effect which is undesireable in constructors) template<typename Struct> inline GenericStruct TransmuteTypeToSType(Struct& structure) { InitSType(&structure, offsetof(Struct, sType), ValidStructureType<Struct>); return GenericStruct(structure); } //========================================================================================================================== //Owning version of a generic structure class StructureBlob: public GenericStructBase { public: StructureBlob(); StructureBlob(const StructureBlob& right); StructureBlob& operator=(const StructureBlob& right); template<typename Struct> StructureBlob(const Struct& structure); private: std::vector<std::byte> StructureBlobData; }; inline StructureBlob::StructureBlob() { PNextPointerOffset = 0; STypeOffset = 0; StructureData = nullptr; StructureSize = 0; } template<typename Struct> inline StructureBlob::StructureBlob(const Struct& structure) { static_assert(std::is_trivially_destructible<Struct>::value, "Structure blob contents must be trivially destructible"); PNextPointerOffset = offsetof(Struct, pNext); STypeOffset = offsetof(Struct, sType); StructureBlobData.resize(sizeof(Struct)); memcpy(StructureBlobData.data(), &structure, sizeof(Struct)); StructureData = StructureBlobData.data(); StructureSize = StructureBlobData.size(); //Init sType and set pNext to null VulkanStructureType structureType = ValidStructureType<Struct>; InitSType(StructureBlobData.data(), STypeOffset, structureType); void* nullPNext = nullptr; memcpy(StructureBlobData.data() + PNextPointerOffset, &nullPNext, sizeof(void*)); } inline StructureBlob::StructureBlob(const StructureBlob& right) { *this = right; } inline StructureBlob& StructureBlob::operator=(const StructureBlob& right) { StructureBlobData.assign(right.StructureBlobData.begin(), right.StructureBlobData.end()); StructureData = StructureBlobData.data(); StructureSize = StructureBlobData.size(); STypeOffset = right.STypeOffset; PNextPointerOffset = right.PNextPointerOffset; assert(PNextPointerOffset + sizeof(void*) <= StructureBlobData.size()); //Zero out PNext memset(StructureBlobData.data() + PNextPointerOffset, 0, sizeof(void*)); return *this; } //========================================================================================================================== //Base class for a generic structure chain, hiding chain link type info template<typename HeadType> class GenericStructureChainBase { protected: GenericStructureChainBase(); ~GenericStructureChainBase(); public: HeadType& GetChainHead(); template<typename Struct> Struct& GetChainLinkDataAs(); public: GenericStructureChainBase(const GenericStructureChainBase& rhs) = delete; GenericStructureChainBase& operator=(const GenericStructureChainBase& rhs) = delete; protected: std::vector<std::byte*> StructureDataPointers; std::vector<ptrdiff_t> PNextPointerOffsets; std::vector<ptrdiff_t> STypeOffsets; std::unordered_map<VulkanStructureType, size_t> StructureDataIndices; }; template<typename HeadType> inline GenericStructureChainBase<HeadType>::GenericStructureChainBase() { } template<typename HeadType> inline GenericStructureChainBase<HeadType>::~GenericStructureChainBase() { } template<typename HeadType> inline HeadType& GenericStructureChainBase<HeadType>::GetChainHead() { assert(StructureDataPointers.size() > 0); assert(StructureDataPointers[0] != nullptr); HeadType* head = reinterpret_cast<HeadType*>(StructureDataPointers[0]); return *head; } template<typename HeadType> template<typename Struct> inline Struct& GenericStructureChainBase<HeadType>::GetChainLinkDataAs() { Struct* structPtr = reinterpret_cast<Struct*>(StructureDataPointers[StructureDataIndices.at(ValidStructureType<Struct>)]); return *structPtr; } //========================================================================================================================== //Generic structure chain, non-owning version template<typename HeadType> class GenericStructureChain: public GenericStructureChainBase<HeadType> { using GenericStructureChainBase<HeadType>::StructureDataPointers; using GenericStructureChainBase<HeadType>::STypeOffsets; using GenericStructureChainBase<HeadType>::PNextPointerOffsets; using GenericStructureChainBase<HeadType>::StructureDataIndices; public: GenericStructureChain(); GenericStructureChain(HeadType& head); ~GenericStructureChain(); //Clears everything EXCEPT head void Clear(); template<typename Struct> void AppendToChain(Struct& next); void AppendToChainGeneric(GenericStructBase& nextBlobData); public: GenericStructureChain(const GenericStructureChain& rhs) = delete; GenericStructureChain& operator=(const GenericStructureChain& rhs) = delete; private: void AppendDataToChain(void* dataPtr, size_t sTypeOffset, size_t pNextOffset, VulkanStructureType sType); protected: HeadType HeadData; }; template<typename HeadType> inline GenericStructureChain<HeadType>::GenericStructureChain() { //Init HeadData's sType and pNext InitSType(&HeadData, offsetof(HeadData, sType), ValidStructureType<HeadType>); HeadData.pNext = nullptr; StructureDataPointers.push_back(reinterpret_cast<std::byte*>(&HeadData)); STypeOffsets.push_back(offsetof(HeadType, sType)); PNextPointerOffsets.push_back(offsetof(HeadType, pNext)); StructureDataIndices[ValidStructureType<HeadType>] = 0; } template<typename HeadType> inline GenericStructureChain<HeadType>::GenericStructureChain(HeadType& head) { HeadData = head; InitSType(&HeadData, offsetof(HeadData, sType), ValidStructureType<HeadType>); HeadData.pNext = nullptr; //Head is always the first pointer StructureDataPointers.push_back(reinterpret_cast<std::byte*>(&HeadData)); STypeOffsets.push_back(offsetof(HeadType, sType)); PNextPointerOffsets.push_back(offsetof(HeadType, pNext)); StructureDataIndices[ValidStructureType<HeadType>] = 0; } template<typename HeadType> inline GenericStructureChain<HeadType>::~GenericStructureChain() { } template<typename HeadType> inline void GenericStructureChain<HeadType>::Clear() { //Just reset the pointers StructureDataPointers.clear(); PNextPointerOffsets.clear(); StructureDataIndices.clear(); HeadData.pNext = nullptr; StructureDataPointers.push_back(&HeadData); STypeOffsets.push_back(offsetof(HeadType, sType)); PNextPointerOffsets.push_back(offsetof(HeadType, pNext)); StructureDataIndices[ValidStructureType<HeadType>] = 0; } template<typename HeadType> template<typename Struct> inline void GenericStructureChain<HeadType>::AppendToChain(Struct& next) { AppendDataToChain(&next, offsetof(Struct, sType), offsetof(Struct, pNext), ValidStructureType<Struct>); } template<typename HeadType> inline void GenericStructureChain<HeadType>::AppendToChainGeneric(GenericStructBase& nextBlobData) { AppendDataToChain(nextBlobData.GetStructureData(), nextBlobData.GetSTypeOffset(), nextBlobData.GetPNextOffset(), nextBlobData.GetSType()); } template<typename HeadType> inline void GenericStructureChain<HeadType>::AppendDataToChain(void* dataPtr, size_t sTypeOffset, size_t pNextOffset, VulkanStructureType sType) { std::byte* prevLastStruct = StructureDataPointers.back(); ptrdiff_t prevPNextOffset = PNextPointerOffsets.back(); StructureDataPointers.push_back(reinterpret_cast<std::byte*>(dataPtr)); STypeOffsets.push_back(sTypeOffset); PNextPointerOffsets.push_back(pNextOffset); std::byte* currLastStructPtr = StructureDataPointers.back(); InitSType(dataPtr, sTypeOffset, sType); //Set sType of the current struct memcpy(prevLastStruct + prevPNextOffset, &currLastStructPtr, sizeof(std::byte*)); //Set pNext pointer of the previous struct StructureDataIndices[sType] = StructureDataPointers.size() - 1; } //========================================================================================================================== //Generic structure chain, owning version template<typename HeadType> class StructureChainBlob: public GenericStructureChainBase<HeadType> { using GenericStructureChainBase<HeadType>::StructureDataPointers; using GenericStructureChainBase<HeadType>::STypeOffsets; using GenericStructureChainBase<HeadType>::PNextPointerOffsets; using GenericStructureChainBase<HeadType>::StructureDataIndices; public: StructureChainBlob(); StructureChainBlob(const HeadType& head); ~StructureChainBlob(); //Clears everything EXCEPT head void Clear(); template<typename Struct> void AppendToChain(const Struct& next); void AppendToChainGeneric(const GenericStructBase& nextBlobData); public: StructureChainBlob(const StructureChainBlob& rhs) = delete; StructureChainBlob& operator=(const StructureChainBlob& rhs) = delete; private: void AppendDataToBlob(const std::byte* data, size_t dataSize, const void* dataPNext, ptrdiff_t dataSTypeOffset, ptrdiff_t dataPNextOffset, VulkanStructureType sType); private: std::vector<std::byte> StructureChainBlobData; }; template<typename HeadType> inline StructureChainBlob<HeadType>::StructureChainBlob() { static_assert(std::is_trivially_destructible<HeadType>::value, "All members of the structure chain blob must be trivially destructible"); //Store head in the blob StructureChainBlobData.resize(sizeof(HeadType)); HeadType head; InitSType(&head, offsetof(HeadType, sType), ValidStructureType<HeadType>); head.pNext = nullptr; memcpy(StructureChainBlobData.data(), &head, sizeof(HeadType)); StructureDataPointers.push_back(StructureChainBlobData.data()); STypeOffsets.push_back(offsetof(HeadType, sType)); PNextPointerOffsets.push_back(offsetof(HeadType, pNext)); StructureDataIndices[ValidStructureType<HeadType>] = 0; } template<typename HeadType> inline StructureChainBlob<HeadType>::StructureChainBlob(const HeadType& head) { static_assert(std::is_trivially_destructible<HeadType>::value, "All members of the structure chain blob must be trivially destructible"); //Store head in the blob StructureChainBlobData.resize(sizeof(HeadType)); memcpy(StructureChainBlobData.data(), &head, sizeof(HeadType)); StructureDataPointers.push_back(StructureChainBlobData.data()); STypeOffsets.push_back(offsetof(HeadType, sType)); PNextPointerOffsets.push_back(offsetof(HeadType, pNext)); VulkanStructureType headSType = ValidStructureType<HeadType>; void* headPNext = nullptr; InitSType(StructureDataPointers.back(), STypeOffsets.back(), headSType); memcpy(StructureDataPointers.back() + PNextPointerOffsets.back(), &headPNext, sizeof(void*)); StructureDataIndices[ValidStructureType<HeadType>] = 0; } template<typename HeadType> inline StructureChainBlob<HeadType>::~StructureChainBlob() { } template<typename HeadType> inline void StructureChainBlob<HeadType>::Clear() { //Save the head HeadType& headData = GenericStructureChainBase<HeadType>::GetChainHead(); std::vector<std::byte> oldHead(sizeof(HeadType)); memcpy(oldHead.data(), &headData, sizeof(HeadType)); //Clear everything StructureChainBlobData.clear(); StructureDataPointers.clear(); STypeOffsets.clear(); PNextPointerOffsets.clear(); StructureDataIndices.clear(); //Reinit StructureChainBlobData.resize(sizeof(HeadType)); memcpy(StructureChainBlobData.data(), oldHead.data(), sizeof(HeadType)); StructureDataPointers.push_back(StructureChainBlobData.data()); STypeOffsets.push_back(offsetof(HeadType, sType)); PNextPointerOffsets.push_back(offsetof(HeadType, pNext)); StructureDataIndices[ValidStructureType<HeadType>] = 0; } template<typename HeadType> template<typename Struct> inline void StructureChainBlob<HeadType>::AppendToChain(const Struct& next) { static_assert(std::is_trivially_destructible<Struct>::value, "All members of the structure chain blob must be trivially destructible"); AppendDataToBlob((const std::byte*)(&next), sizeof(Struct), next.pNext, offsetof(Struct, sType), offsetof(Struct, pNext), ValidStructureType<Struct>); } template<typename HeadType> inline void StructureChainBlob<HeadType>::AppendToChainGeneric(const GenericStructBase& nextBlobData) { AppendDataToBlob(nextBlobData.GetStructureData(), nextBlobData.GetStructureSize(), nextBlobData.GetPNext(), nextBlobData.GetSTypeOffset(), nextBlobData.GetPNextOffset(), nextBlobData.GetSType()); } template<typename HeadType> inline void StructureChainBlob<HeadType>::AppendDataToBlob(const std::byte* data, size_t dataSize, const void* dataPNext, ptrdiff_t dataSTypeOffset, ptrdiff_t dataPNextOffset, VulkanStructureType sType) { size_t prevDataSize = StructureChainBlobData.size(); size_t nextDataOffset = prevDataSize; //Copy all current structures to the new chain, and append new structure std::vector<std::byte> newStructureChainData(prevDataSize + dataSize); memcpy(newStructureChainData.data(), StructureChainBlobData.data(), prevDataSize); memcpy(newStructureChainData.data() + prevDataSize, data, dataSize); //Initialize sType InitSType(newStructureChainData.data() + prevDataSize, dataSTypeOffset, sType); //Rebuild StructureDataPointers std::vector<ptrdiff_t> structureDataOffsets(StructureDataPointers.size()); for(size_t i = 0; i < StructureDataPointers.size(); i++) { structureDataOffsets[i] = (StructureDataPointers[i] - &StructureChainBlobData[0]); } StructureDataPointers.clear(); for(size_t i = 0; i < structureDataOffsets.size(); i++) { StructureDataPointers.push_back(newStructureChainData.data() + structureDataOffsets[i]); } StructureDataPointers.push_back(newStructureChainData.data() + nextDataOffset); STypeOffsets.push_back(dataSTypeOffset); PNextPointerOffsets.push_back(dataPNextOffset); //Invalidate pNext pointers for(size_t i = 0; i < PNextPointerOffsets.size() - 1; i++) { void** currPPNext = (void**)(StructureDataPointers[i] + PNextPointerOffsets[i]); memcpy(currPPNext, &StructureDataPointers[i + 1], sizeof(void*)); } //Invalidate the last pNext pointer with the provided one std::byte* pLastStruct = StructureDataPointers.back(); memcpy(pLastStruct + dataPNextOffset, &dataPNext, sizeof(void*)); //Only use move semantics, because copy semantics will make pNext pointers invalid once again StructureChainBlobData = std::move(newStructureChainData); //Make sure all pNext point to inside of StructureChainBlobData. The last pointer can point to whatever the user specified for(size_t i = 0; i < PNextPointerOffsets.size() - 1; i++) { void* pNextPointer = nullptr; memcpy(&pNextPointer, StructureDataPointers[i] + PNextPointerOffsets[i], sizeof(void*)); //Init the pointer data assert(pNextPointer >= &StructureChainBlobData[0] && pNextPointer < (&StructureChainBlobData[0] + StructureChainBlobData.size())); //Move semantics should never break pNext pointers, they should always point to inside the blob } StructureDataIndices[sType] = StructureDataPointers.size() - 1; } } #endif """ def open_vk_spec(url): with urllib.request.urlopen(url) as response: spec_data = response.read() return spec_data.decode("utf8") def parse_stypes(spec_contents): spec_soup = BeautifulSoup(spec_contents, features="xml") spec_platform_defines = {} spec_platforms_block = spec_soup.find("platforms") if spec_platforms_block is not None: spec_platform_tags = spec_platforms_block.find_all("platform") for platform_tag in spec_platform_tags: spec_platform_defines[platform_tag["name"]] = platform_tag["protect"] spec_struct_extensions = {} extension_define_names = {} extension_blocks = spec_soup.find_all("extension") for extension_block in extension_blocks: extension_name = extension_block["name"] extension_define_tag = extension_block.find("enum", {"value": re.compile(".*" + extension_name +".*")}) if extension_define_tag is None: continue extension_define_name = extension_define_tag["name"] if extension_define_name is None: continue extension_define_names[extension_name] =
Rune if you have not detonated Runes in the past 1.5 seconds", "Frostblink has 75% increased maximum travel distance", "15% increased Rallying Cry Buff Effect", "Tectonic Slam deals 25% increased Damage", "25% increased Creeping Frost Damage", "Earthshatter deals 25% increased Damage", "Wintertide Brand deals 25% increased Damage", "Ancestral Cry has a minimum of 10 Power", "Penance Brand deals 25% increased Damage", "Sunder has 15% reduced delay between Areas in the Wave", "Rallying Cry Exerts 1 additional Attack", "Intimidating Cry has 16% increased Area of Effect", "Attacks Exerted by Seismic Cry deal 35% increased Damage ", "Earthquake deals 8% increased Damage per 0.1 seconds Duration", "Bear Trap has 15% increased Cooldown Recovery Rate", "General's Cry has 30% increased Cooldown Recovery Rate", "Mirror Arrow has 30% increased Cooldown Recovery Rate", "Flicker Strike has 20% increased Cooldown Recovery Rate", "General's Cry has 20% increased Cooldown Recovery Rate", "Sniper's Mark has 20% increased Curse Effect", "40% increased Lightning Trap Lightning Ailment Effect", "Enduring Cry has 20% increased Cooldown Recovery Rate", "Infernal Cry has 30% increased Cooldown Recovery Rate", "Mirror Arrow has 20% increased Cooldown Recovery Rate", "Crackling Lance has 24% increased branching angle", "Lancing Steel has 20% chance count as consumeing Steel Shards without Consuming them", "Splitting Steel has 16% increased Area of Effect", "Crackling Lance has 8% increased Cast Speed", "Discharge has +3 to Radius", "Void Sphere has 20% increased Cooldown Recovery Rate", "Enemies in Void Sphere's range take up to 6% increased Damage, based on distance from the Void Sphere", "Flame Dash has 20% increased Cooldown Recovery Rate", "Blazing Salvo deals 25% increased Damage", "Shattering Steel has 20% chance to not consume Steel Shards", "Glacial Cascade gains 6% of Physical Damage as Extra Cold Damage", "Enemies in Void Sphere's range take up to 10% increased Damage, based on distance from the Void Sphere", "Hexblast has 16% increased Area of Effect", "Splitting Steel has 20% chance to not consume Steel Shards", "Crackling Lance has 18% increased branching angle", "Lancing Steel has 30% chance count as consuming Steel Shards without Consuming them", "16% increased Storm Burst Area of Effect", "Wither has 16% increased Area of Effect", "Rune Blast teleports you to the detonated Rune if you have not detonated Runes in the past 1 second", "30% increased Warlord's Mark Duration", "Earthshatter has 16% increased Area of Effect", "Vengeance has 20% increased Cooldown Recovery Rate", "24% increased Searing Bond Totem Elemental Resistances", "6% chance to Dodge Attack Hits while at maximum Blade Flurry stages", "16% increased Ice Shot Area of Effect", "10% increased Whirling Blades Attack Speed", "30% increased Despair Duration", "16% increased Kinetic Blast Area of Effect", "20% increased Poacher's Mark Curse Effect", "40% increased Lightning Tendrils Critical Strike Chance", "Stone Golems deal 25% increased Damage", "Withering Step has 20% increased Elusive Effect", "25% increased Power Siphon Damage", "25% increased Cold Snap Damage", "25% increased Ice Crash Damage", "60% increased Flame Surge Critical Strike Chance", "10% increased Power Siphon Attack Speed", "25% increased Blade Flurry Damage", "10% increased Shockwave Totem Cast Speed", "Lancing Steel deals 25% increased Damage", "16% increased Shock Nova Area of Effect", "25% increased Kinetic Blast Damage", "25% increased Fireball Damage", "Charged Dash has +4 to Radius of each Wave's last damage Area", "Penance Brand has 16% increased Area of Effect", "20% increased Frost Blades Projectile Speed", "Blood Rage grants additional 20% chance to gain a Frenzy Charge on Kill", "24% increased Convocation Buff Effect", "30% increased Vigilant Strike Fortify Duration", "24% increased Frost Wall Duration", "20% chance to Summon an additional Skeleton Warrior with Summon Skeleton", "Lightning Strike pierces 2 additional Targets", "25% increased Detonate Dead Damage", "Venom Gyre deals 25% increased Damage", "10% of Galvanic Arrow Physical Damage gained as extra Lightning Damage", "25% increased Static Strike Damage", "25% increased Leap Slam Damage", "Elemental Hit deals 25% increased Damage", "16% increased Static Strike Area of Effect", "20% chance for Phase Run to increase Duration without removing Frenzy Charges", "Hexblast deals 25% increased Damage", "+24% to Lightning Golem Elemental Resistances", "Explosive Arrow deals 25% increased Damage", "16% increased Infernal Blow Area of Effect", "Flamethrower Trap has 20% increased Skill Effect Duration", "Armageddon Brand deals 25% increased Damage", "20% increased Elemental Weakness Curse Effect", "Explosive Trap deals 25% increased Damage", "Herald of Ice has 20% reduced Mana Reservation", "Conversion Trap 20% increased Cooldown Recovery Rate", "Purity of Lightning has 14% reduced Mana Reservation", "Scourge Arrow has 6% chance to Poison per Stage", "Earthquake deals 5% increased Damage per 0.1 seconds Duration", "8% increased Bodyswap Cast Speed", "Kinetic Bolt has 20% increased Projectile Speed", "25% increased Shock Nova Damage", "Split Arrow fires 2 additional Projectiles", "Frost Bomb has 20% increased Cooldown Recovery Rate", "20% reduced Storm Call Duration", "30% increased Punishment Duration", "Kinetic Bolt changes direction 1 additional time", "Frost Wall has 20% increased Cooldown Recovery Rate", "16% increased Detonate Dead Area of Effect", "Cobra Lash deals 25% increased Damage", "100% increased Effect of the Buff granted by your Lightning Golems", "Toxic Rain gains 6% of Physical Damage as Extra Chaos Damage", "25% increased Tornado Shot Damage", "Consecrated Path has 10% increased teleport range", "16% increased Cold Snap Area of Effect", "25% increased Burning Arrow Damage", "Dread Banner has 25% increased Aura Effect", "25% increased Volatile Dead Damage", "20% increased Conductivity Curse Effect", "Power Siphon fires an additional Projectile", "25% increased Barrage Damage", "16% increased Ancestral Warchief Totem Area of Effect", "8% increased Cremation Cast Speed", "Explosive Trap has 16% increased Area of Effect", "40% increased Decoy Totem Life", "Storm Brand deals 25% increased Damage", "Tempest Shield chains an additional 2 times", "Burning Arrow has 16% increased Debuff Effect", "24% increased Creeping Frost Duration", "Splitting Steel deals 25% increased Damage", "Cold Snap has 20% increased Cooldown Recovery Rate", "Hatred has 10% reduced Mana Reservation", "30% increased Flesh Offering Duration", "25% increased Sweep Damage", "20% increased Spectral Shield Throw Projectile Speed", "10% increased Dual Strike Attack Speed", "Blazing Salvo Projectiles land in a 20% increased Area", "Seismic Trap deals 25% increased Damage", "40% increased Lacerate Critical Strike Chance", "25% increased Split Arrow Damage", "Storm Brand Damage Penetrates 8% of Branded Enemy's Lightning Resistance", "20% increased Animate Weapon Duration", "Plague Bearer Buff grants +12% to Poison Damage over Time Multiplier while Infecting", "30% increased Conductivity Duration", "25% increased Searing Bond Damage", "24% increased Arctic Armour Buff Effect", "20% increased Smoke Mine Duration", "25% increased Ice Trap Damage", "20% increased Temporal Chains Curse Effect", "25% increased Herald of Thunder Damage", "25% increased Spectral Throw Damage", "25% increased Herald of Ash Damage", "10% increased Cleave Attack Speed", "25% increased Heavy Strike Damage", "Burning Arrow has +20% chance to Ignite", "25% increased Wild Strike Damage", "25% increased Cyclone Damage", "20% increased Viper Strike Duration", "25% increased Magma Orb Damage", "25% increased Shield Charge Damage", "Heavy Strike has a 8% chance to deal Double Damage", "25% increased Whirling Blades Damage", "25% increased Storm Call Damage", "24% increased Devouring Totem Leech per second", "25% increased Vengeance Damage", "25% increased Earthquake Damage", "25% increased Flame Surge Damage", "16% increased Decoy Totem Area of Effect", "Glacial Hammer has +20% chance to Freeze", "Animated Guardians deal 25% increased Damage", "+24% to Ancestral Protector Totem Elemental Resistances", "75% increased Effect of the Buff granted by your Chaos Golems", "25% increased Flameblast Damage", "30% increased Poacher's Mark Duration", "25% increased Incinerate Damage", "25% increased Flame Dash Damage", "8% increased Volatile Dead Cast Speed", "60% increased Split Arrow Critical Strike Chance", "Summon Raging Spirit has 12% chance to summon an extra Minion", "Fireball has +20% chance to Ignite", "10% of Infernal Blow Physical Damage gained as Extra Fire Damage", "40% increased Flame Surge Damage against Burning Enemies", "20% increased Despair Curse Effect", "8% increased Scorching Ray Cast Speed", "20% increased Spectral Throw Projectile Speed", "25%
documented below. :param pulumi.Input[str] fingerprint: Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. :param pulumi.Input[str] health_checks: The set of URLs to HealthCheck resources for health checking this RegionBackendService. Currently at most one health check can be specified. A health check must be specified unless the backend service uses an internet or serverless NEG as a backend. :param pulumi.Input[pulumi.InputType['RegionBackendServiceIapArgs']] iap: Settings for enabling Cloud Identity Aware Proxy Structure is documented below. :param pulumi.Input[str] load_balancing_scheme: Indicates what kind of load balancing this regional backend service will be used for. A backend service created for one type of load balancing cannot be used with the other(s). Default value is `INTERNAL`. Possible values are `EXTERNAL`, `INTERNAL`, and `INTERNAL_MANAGED`. :param pulumi.Input[str] locality_lb_policy: The load balancing algorithm used within the scope of the locality. The possible values are - * ROUND_ROBIN - This is a simple policy in which each healthy backend is selected in round robin order. * LEAST_REQUEST - An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. * RING_HASH - The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. * RANDOM - The load balancer selects a random healthy host. * ORIGINAL_DESTINATION - Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. * MAGLEV - used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED and the `protocol` is set to HTTP, HTTPS, or HTTP2. Possible values are `ROUND_ROBIN`, `LEAST_REQUEST`, `RING_HASH`, `RANDOM`, `ORIGINAL_DESTINATION`, and `MAGLEV`. :param pulumi.Input[pulumi.InputType['RegionBackendServiceLogConfigArgs']] log_config: This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver. Structure is documented below. :param pulumi.Input[str] name: Name of the cookie. :param pulumi.Input[str] network: The URL of the network to which this backend service belongs. This field can only be specified when the load balancing scheme is set to INTERNAL. :param pulumi.Input[pulumi.InputType['RegionBackendServiceOutlierDetectionArgs']] outlier_detection: Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED and the `protocol` is set to HTTP, HTTPS, or HTTP2. Structure is documented below. :param pulumi.Input[str] port_name: A named port on a backend instance group representing the port for communication to the backend VMs in that group. Required when the loadBalancingScheme is EXTERNAL, INTERNAL_MANAGED, or INTERNAL_SELF_MANAGED and the backends are instance groups. The named port must be defined on each backend instance group. This parameter has no meaning if the backends are NEGs. API sets a default of "http" if not given. Must be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Balancing). :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] protocol: The protocol this RegionBackendService uses to communicate with backends. The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer types and may result in errors if used with the GA API. Possible values are `HTTP`, `HTTPS`, `HTTP2`, `SSL`, `TCP`, `UDP`, `GRPC`, and `UNSPECIFIED`. :param pulumi.Input[str] region: The Region in which the created backend service should reside. If it is not provided, the provider region is used. :param pulumi.Input[str] self_link: The URI of the created resource. :param pulumi.Input[str] session_affinity: Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. Possible values are `NONE`, `CLIENT_IP`, `CLIENT_IP_PORT_PROTO`, `CLIENT_IP_PROTO`, `GENERATED_COOKIE`, `HEADER_FIELD`, and `HTTP_COOKIE`. :param pulumi.Input[int] timeout_sec: How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds. Valid range is [1, 86400]. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _RegionBackendServiceState.__new__(_RegionBackendServiceState) __props__.__dict__["affinity_cookie_ttl_sec"] = affinity_cookie_ttl_sec __props__.__dict__["backends"] = backends __props__.__dict__["cdn_policy"] = cdn_policy __props__.__dict__["circuit_breakers"] = circuit_breakers __props__.__dict__["connection_draining_timeout_sec"] = connection_draining_timeout_sec __props__.__dict__["consistent_hash"] = consistent_hash __props__.__dict__["creation_timestamp"] = creation_timestamp __props__.__dict__["description"] = description __props__.__dict__["enable_cdn"] = enable_cdn __props__.__dict__["failover_policy"] = failover_policy __props__.__dict__["fingerprint"] = fingerprint __props__.__dict__["health_checks"] = health_checks __props__.__dict__["iap"] = iap __props__.__dict__["load_balancing_scheme"] = load_balancing_scheme __props__.__dict__["locality_lb_policy"] = locality_lb_policy __props__.__dict__["log_config"] = log_config __props__.__dict__["name"] = name __props__.__dict__["network"] = network __props__.__dict__["outlier_detection"] = outlier_detection __props__.__dict__["port_name"] = port_name __props__.__dict__["project"] = project __props__.__dict__["protocol"] = protocol __props__.__dict__["region"] = region __props__.__dict__["self_link"] = self_link __props__.__dict__["session_affinity"] = session_affinity __props__.__dict__["timeout_sec"] = timeout_sec return RegionBackendService(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="affinityCookieTtlSec") def affinity_cookie_ttl_sec(self) -> pulumi.Output[Optional[int]]: """ Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day. When the load balancing scheme is INTERNAL, this field is not used. """ return pulumi.get(self, "affinity_cookie_ttl_sec") @property @pulumi.getter def backends(self) -> pulumi.Output[Optional[Sequence['outputs.RegionBackendServiceBackend']]]: """ The set of backends that serve this RegionBackendService. Structure is documented below. """ return pulumi.get(self, "backends") @property @pulumi.getter(name="cdnPolicy") def cdn_policy(self) -> pulumi.Output['outputs.RegionBackendServiceCdnPolicy']: """ Cloud CDN configuration for this BackendService. Structure is documented below. """ return pulumi.get(self, "cdn_policy") @property @pulumi.getter(name="circuitBreakers") def circuit_breakers(self) -> pulumi.Output[Optional['outputs.RegionBackendServiceCircuitBreakers']]: """ Settings controlling the volume of connections to a backend service. This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED and the `protocol` is set to HTTP, HTTPS, or HTTP2. Structure is documented below. """ return pulumi.get(self, "circuit_breakers") @property @pulumi.getter(name="connectionDrainingTimeoutSec") def connection_draining_timeout_sec(self) -> pulumi.Output[Optional[int]]: """ Time for which instance will be drained (not accept new connections, but still work to finish started). """ return pulumi.get(self, "connection_draining_timeout_sec") @property @pulumi.getter(name="consistentHash") def consistent_hash(self) -> pulumi.Output[Optional['outputs.RegionBackendServiceConsistentHash']]: """ Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field only applies when all of the following are true - """ return pulumi.get(self, "consistent_hash") @property @pulumi.getter(name="creationTimestamp") def creation_timestamp(self) -> pulumi.Output[str]: """ Creation timestamp in RFC3339 text format. """ return pulumi.get(self, "creation_timestamp") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @property @pulumi.getter(name="enableCdn") def enable_cdn(self) -> pulumi.Output[Optional[bool]]: """ If true, enable Cloud CDN for this RegionBackendService. """ return pulumi.get(self, "enable_cdn") @property @pulumi.getter(name="failoverPolicy") def failover_policy(self) -> pulumi.Output[Optional['outputs.RegionBackendServiceFailoverPolicy']]: """ Policy for failovers. Structure is documented below. """ return pulumi.get(self, "failover_policy") @property @pulumi.getter def fingerprint(self) -> pulumi.Output[str]: """ Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. """ return pulumi.get(self, "fingerprint") @property @pulumi.getter(name="healthChecks") def health_checks(self) -> pulumi.Output[Optional[str]]: """ The set of URLs to HealthCheck resources for health checking this RegionBackendService. Currently at most one health check can be specified. A health check must be specified unless the backend service uses an internet or serverless NEG as a backend. """ return pulumi.get(self, "health_checks") @property @pulumi.getter def iap(self) -> pulumi.Output[Optional['outputs.RegionBackendServiceIap']]: """ Settings for enabling Cloud Identity Aware Proxy Structure is documented below. """ return pulumi.get(self, "iap") @property @pulumi.getter(name="loadBalancingScheme") def load_balancing_scheme(self) -> pulumi.Output[Optional[str]]: """ Indicates what kind of load balancing this regional backend service will be used for. A backend service created for one type of load balancing cannot be used with the other(s). Default value is `INTERNAL`. Possible values are `EXTERNAL`, `INTERNAL`, and
string by x number of characters.""" indented_string = '' for line in string.splitlines(): indented_string += '%s%s\n' % ((' ' * chars), line) # Strip the ending '\n' and return result. return indented_string[0:-1] def is_binary_file(file_path, bytes_to_read=1024): """Return true if the file looks like a binary file.""" file_extension = os.path.splitext(file_path)[1].lower() if file_extension in BINARY_EXTENSIONS: return True if file_extension in TEXT_EXTENSIONS: return False text_characters = list(map(chr, list(range(32, 128)))) + ['\r', '\n', '\t'] try: with open(file_path, 'rb') as file_handle: data = file_handle.read(bytes_to_read) except: logs.log_error('Could not read file %s in is_binary_file.' % file_path) return None binary_data = [char for char in data if char not in text_characters] return len(binary_data) > len(data) * 0.1 def is_recursive_call(): """Returns true if the caller function is called recursively.""" try: stack_frames = inspect.stack() caller_name = stack_frames[1][3] for stack_frame_index in range(2, len(stack_frames)): if caller_name == stack_frames[stack_frame_index][3]: return True except: pass return False def is_valid_testcase_file(file_path, check_if_exists=True, size_limit=None, allowed_extensions=None): """Return true if the file looks like a testcase file.""" filename = os.path.basename(file_path) if filename.startswith('.') or filename.startswith(FUZZ_PREFIX): return False if allowed_extensions: file_extension = os.path.splitext(file_path)[1].lower() if file_extension not in allowed_extensions: return False directories_to_ignore = ['.git', '.hg', '.svn'] for directory_to_ignore in directories_to_ignore: directory_string = '%s%s%s' % (os.sep, directory_to_ignore, os.sep) if directory_string in file_path: return False if (check_if_exists or size_limit) and not os.path.exists(file_path): return False if size_limit and os.path.getsize(file_path) > size_limit: return False return True def maximum_parallel_processes_allowed(): """Return maxium number of parallel processes allowed. Adjust it based on thread multiplier.""" if environment.is_trusted_host(): # gRPC only supports 1 thread/process. return 1 max_parallel_process_count = environment.get_value('MAX_FUZZ_THREADS', 1) thread_multiplier = environment.get_value('THREAD_MULTIPLIER', 1) max_parallel_process_count *= thread_multiplier return int(max_parallel_process_count) def normalize_path(path): """Normalize path. This is needed on windows because windows' paths are case-insensitive.""" return os.path.normcase(os.path.normpath(path)) def python_gc(): """Call python's garbage collector.""" # gc_collect isn't perfectly synchronous, because it may # break reference cycles that then take time to fully # finalize. Call it thrice and hope for the best. for _ in range(3): gc.collect() def random_element_from_list(element_list): """Returns a random element from list.""" return element_list[random.SystemRandom().randint(0, len(element_list) - 1)] def random_number(start, end): """Returns a random number between start and end.""" return random.SystemRandom().randint(start, end) # pylint: disable=inconsistent-return-statements def random_weighted_choice(element_list, weight_attribute='weight'): """Returns a random element from list taking its weight into account.""" total = sum(getattr(e, weight_attribute) for e in element_list) random_pick = random.SystemRandom().uniform(0, total) temp = 0 for element in element_list: element_weight = getattr(element, weight_attribute) if element_weight == 0: continue if temp + element_weight >= random_pick: return element temp += element_weight assert False, 'Failed to make a random weighted choice.' def read_data_from_file(file_path, eval_data=True, default=None): """Returns eval-ed data from file.""" if not os.path.exists(file_path): return default failure_wait_interval = environment.get_value('FAIL_WAIT') file_content = None retry_limit = environment.get_value('FAIL_RETRIES') for _ in range(retry_limit): try: with open(file_path, 'rb') as file_handle: file_content = file_handle.read() except: file_content = None logs.log_warn('Error occurred while reading %s, retrying.' % file_path) time.sleep(random.uniform(1, failure_wait_interval)) continue if file_content is None: logs.log_error('Failed to read data from file %s.' % file_path) return None if not eval_data: return file_content if not file_content: return default try: return ast.literal_eval(file_content) except (SyntaxError, TypeError): return None def remove_prefix(string, prefix): """Strips the prefix from a string.""" if string.startswith(prefix): return string[len(prefix):] return string def remove_sub_strings(string, substrings): """Strips substrings from a given string.""" result = string for substring in substrings: result = result.replace(substring, '') return result def restart_machine(): """Restart machine.""" if environment.platform() == 'WINDOWS': os.system('shutdown /f /r /t 0') else: # POSIX platforms. os.system('sudo shutdown -r now') def search_string_in_file(search_string, file_handle): """Helper to search for a string in a large binary file without memory issues. """ # TODO(aarya): This is too brittle and will fail if we have a very large line. for line in file_handle: if search_string in line: return True return False def string_hash(obj): """Returns a SHA-1 hash of the object. Not used for security purposes.""" return hashlib.sha1(str(obj)).hexdigest() def entity_hash(obj): """Returns a deterministic hash of a ndb entity. If an entity has been recently modified, put() must be called on it before this function will pick up the changes. """ hasher = hashlib.sha1() entity_dict = obj.to_dict() for key in sorted(entity_dict.keys()): hasher.update(str(entity_dict[key])) return hasher.hexdigest() def string_is_true(value): """Check to see if a string has a value that should be treated as True.""" return value and value != 'false' and value != 'False' and value != '0' def strip_from_left(string, prefix): """Strip a prefix from start from string.""" if not string.startswith(prefix): return string return string[len(prefix):] def strip_from_right(string, suffix): """Strip a suffix from end of string.""" if not string.endswith(suffix): return string return string[:len(string) - len(suffix)] def sub_string_exists_in(substring_list, string): """Return true if one of the substring in the list is found in |string|.""" for substring in substring_list: if substring in string: return True return False def time_difference_string(timestamp): """Return time difference as a string.""" if not timestamp: return '' delta = int((datetime.datetime.utcnow() - timestamp).total_seconds()) d_minutes = delta // 60 d_hours = d_minutes // 60 d_days = d_hours // 24 if d_days > 6: return '%s' % str(timestamp).split()[0] if d_days > 1: return '%s days ago' % d_days # starts at 2 days. if d_hours > 1: return '%s hours ago' % d_hours # starts at 2 hours. if d_minutes > 1: return '%s minutes ago' % d_minutes if d_minutes > 0: return '1 minute ago' if delta > -30: return 'moments ago' # Only say something is in the future if it is more than just clock skew. return 'in the future' def timeout(duration): """Timeout decorator for functions.""" def decorator(func): """Decorates the given function.""" if environment.is_running_on_app_engine(): # multiprocessing doesn't work on App Engine. return func @functools.wraps(func) def _wrapper(*args, **kwargs): """Wrapper.""" # FIXME: Weird exceptions in imports, might be something relating to our # reload module. Needs furthur investigation, try this as a temporary fix. import multiprocessing.pool import threading # Fix for Python < 2.7.2. if not hasattr(threading.current_thread(), '_children'): # pylint: disable=protected-access threading.current_thread()._children = weakref.WeakKeyDictionary() global THREAD_POOL if THREAD_POOL is None: THREAD_POOL = multiprocessing.pool.ThreadPool(processes=3) try: from datastore import ndb_init # Avoid circular import. async_result = THREAD_POOL.apply_async( ndb_init.thread_wrapper(func), args=args, kwds=kwargs) return async_result.get(timeout=duration) except multiprocessing.TimeoutError: # Sleep for some minutes in order to wait for flushing metrics. time.sleep(120) # If we don't exit here, we will cause threads to pile up and leading to # out-of-memory. Safe to just exit here. logs.log_fatal_and_exit( ('Exception occurred in function {0}: args: {1}, kwargs: {2}' ' exception: {3}').format(func, args, kwargs, sys.exc_info()[1])) return _wrapper return decorator def wait_until_timeout(threads, thread_timeout): """Wait for all threads to finish unless the given timeout is reached. If no thread is alive, it waits much shorter than the given timeout. Return True if timeout is exceeded, and return False otherwise. """ thread_alive_check_interval = environment.get_value( 'THREAD_ALIVE_CHECK_INTERVAL') if not thread_alive_check_interval: time.sleep(thread_timeout) return False wait_timeout = time.time() + thread_timeout while time.time() < wait_timeout: time.sleep(thread_alive_check_interval) thread_alive = False for thread in threads: if thread.is_alive(): thread_alive = True break if not thread_alive: return False return True def write_data_to_file(content, file_path, append=False): """Writes data to file.""" failure_wait_interval = environment.get_value('FAIL_WAIT') file_mode = 'ab' if append else 'wb' retry_limit = environment.get_value('FAIL_RETRIES') # TODO(mbarbella): One extra iteration is allowed for the type conversion hack # included here. Once this function is converted to only accept bytes-like # objects, it should be adjusted back to the normal retry limit. for _ in range(retry_limit + 1): try: with open(file_path, file_mode) as file_handle: file_handle.write(content) except TypeError: # If we saw a TypeError, content was not bytes-like. Convert it. content = str(content).encode('utf-8') continue except EnvironmentError: # An EnvironmentError signals a problem writing the file. Retry in case # it was a spurious error. logs.log_warn('Error occurred while writing %s, retrying.' % file_path) time.sleep(random.uniform(1, failure_wait_interval)) continue # Successfully written data file. return logs.log_error('Failed to write data to file %s.' % file_path) @memoize.wrap(memoize.FifoInMemory(1)) def default_backup_bucket(): """Return the default backup bucket for this instance of ClusterFuzz.""" # Do not use |BACKUP_BUCKET| environment variable as that is the overridden # backup bucket from job type and is not the default backup bucket. return local_config.ProjectConfig().get('env.BACKUP_BUCKET') @memoize.wrap(memoize.FifoInMemory(1)) def default_project_name(): """Return the default project name for this instance of ClusterFuzz.""" # Do not use |PROJECT_NAME| environment variable as that is the overridden # project name from job type and is not the default
check that formats are added only when CWL can resolve references # FIXME: no format is back-propagated from WPS format to CWL at the moment # (https://github.com/crim-ca/weaver/issues/50) "wps_only_format_exists": "File", "wps_only_format_not_exists": "File", "wps_only_format_both": "File", "cwl_only_format_exists": {"type": "File", "format": type_json}, # non-existing schema references should not be provided directly in CWL # since these would enforce raising the validation error directly... # "cwl_only_format_not_exists": {"type": "File", "format": ct_not_exists} }, "outputs": {"dont_care": "File"}, "$namespaces": dict(list(ns_json.items())) }}], } desc, pkg = self.deploy_process(body, describe_schema=PROCESS_SCHEMA_OLD) proc = desc["process"] assert proc["inputs"][0]["id"] == "wps_only_format_exists" assert len(proc["inputs"][0]["formats"]) == 1 assert proc["inputs"][0]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert pkg["inputs"][0]["id"] == "wps_only_format_exists" assert pkg["inputs"][0]["type"] == "File" # FIXME: back-propagate WPS format to CWL without format specified # (https://github.com/crim-ca/weaver/issues/50) # assert pkg["inputs"][0]["format"] == type_json assert proc["inputs"][1]["id"] == "wps_only_format_not_exists" assert len(proc["inputs"][1]["formats"]) == 1 assert proc["inputs"][1]["formats"][0]["mediaType"] == ct_not_exists assert pkg["inputs"][1]["id"] == "wps_only_format_not_exists" assert pkg["inputs"][1]["type"] == "File" assert "format" not in pkg["inputs"][1], "Non-existing CWL format reference should have been dropped." assert proc["inputs"][2]["id"] == "wps_only_format_both" assert len(proc["inputs"][2]["formats"]) == 2 assert proc["inputs"][2]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert proc["inputs"][2]["formats"][1]["mediaType"] == ct_not_exists assert pkg["inputs"][2]["id"] == "wps_only_format_both" assert pkg["inputs"][2]["type"] == "File" # FIXME: for now we don't even back-propagate, but if we did, must be none because one is unknown reference # (https://github.com/crim-ca/weaver/issues/50) assert "format" not in pkg["inputs"][2], "Any non-existing CWL format reference should drop all entries." assert proc["inputs"][3]["id"] == "cwl_only_format_exists" assert len(proc["inputs"][3]["formats"]) == 1 assert proc["inputs"][3]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert pkg["inputs"][3]["id"] == "cwl_only_format_exists" assert pkg["inputs"][3]["type"] == "File" assert pkg["inputs"][3]["format"] == type_json desc = self.describe_process(self._testMethodName, describe_schema=PROCESS_SCHEMA_OGC) assert len(desc["inputs"]["wps_only_format_exists"]["formats"]) == 1 assert desc["inputs"]["wps_only_format_exists"]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert len(desc["inputs"]["wps_only_format_not_exists"]["formats"]) == 1 assert desc["inputs"]["wps_only_format_not_exists"]["formats"][0]["mediaType"] == ct_not_exists assert len(desc["inputs"]["wps_only_format_both"]["formats"]) == 2 assert desc["inputs"]["wps_only_format_both"]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert desc["inputs"]["wps_only_format_both"]["formats"][1]["mediaType"] == ct_not_exists def test_deploy_merge_mediatype_io_format_references(self): """ Test to validates ``mimeType`` is replaced by ``mediaType`` for all descriptions. Also we validate that processes that use ``mimeType`` or ``mediaType`` can be deployed successfully. """ ns_json, type_json = get_cwl_file_format(CONTENT_TYPE_APP_JSON) namespaces = dict(list(ns_json.items())) body = { "processDescription": { "process": { "id": self._testMethodName, "title": "some title", "abstract": "this is a test", "inputs": [ { "id": "wps_format_mimeType", "formats": [ { "mimeType": CONTENT_TYPE_APP_JSON, "default": True, } ] }, { "id": "wps_format_mediaType", "formats": [ { "mediaType": CONTENT_TYPE_APP_JSON, "default": True, } ] }, ], "outputs": [ { "id": "wps_format_mimeType", "formats": [{"mediaType": CONTENT_TYPE_APP_JSON}], }, { "id": "wps_format_mediaType", "formats": [{"mediaType": CONTENT_TYPE_APP_JSON}], }, ], }, }, "deploymentProfileName": "http://www.opengis.net/profiles/eoc/wpsApplication", "executionUnit": [{ "unit": { "cwlVersion": "v1.0", "class": "CommandLineTool", "inputs": [ { "id": "wps_format_mimeType", "type": "File", "format": type_json, }, { "id": "wps_format_mediaType", "type": "File", "format": type_json, }, ], "outputs": [ { "id": "wps_format_mimeType", "type": "File", "format": type_json, }, { "id": "wps_format_mediaType", "type": "File", "format": type_json, }, ], "$namespaces": namespaces } }] } desc, _ = self.deploy_process(body, describe_schema=PROCESS_SCHEMA_OLD) proc = desc["process"] assert proc["inputs"][0]["id"] == "wps_format_mimeType" assert proc["inputs"][0]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert proc["inputs"][1]["id"] == "wps_format_mediaType" assert proc["inputs"][1]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert proc["outputs"][0]["id"] == "wps_format_mimeType" assert proc["outputs"][0]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert proc["outputs"][1]["id"] == "wps_format_mediaType" assert proc["outputs"][1]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON desc = self.describe_process(self._testMethodName, describe_schema=PROCESS_SCHEMA_OGC) assert desc["inputs"]["wps_format_mimeType"]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert desc["inputs"]["wps_format_mediaType"]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert desc["outputs"]["wps_format_mimeType"]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON assert desc["outputs"]["wps_format_mediaType"]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_JSON def test_deploy_block_builtin_processes_from_api(self): """ Test to validates if ``builtin`` process type is explicitly blocked during deployment from API. .. versionchanged:: 4.2 """ cwl = { "cwlVersion": "v1.0", "class": "CommandLineTool", "baseCommand": ["python3"], "inputs": { "stringInput": "string" }, "requirements": { CWL_REQUIREMENT_APP_DOCKER: { "dockerPull": "python:3.7-alpine" }, }, "outputs": [], } body = { "processDescription": { "process": { "id": self._testMethodName, "title": "some title", "abstract": "this is a test", "type": PROCESS_BUILTIN, }, }, "deploymentProfileName": "http://www.opengis.net/profiles/eoc/wpsApplication", "executionUnit": [{"unit": cwl}], } with contextlib.ExitStack() as stack_exec: for mock_exec in mocked_execute_process(): stack_exec.enter_context(mock_exec) resp = mocked_sub_requests(self.app, "post_json", "/processes", data=body, timeout=5, headers=self.json_headers, only_local=True, expect_errors=True) # With Weaver<=4.1.x, the 'type' was explicitly checked to block it since Deploy payload was kept as is # This field was allowed to trickle all they way down to the instantiation of Process object # assert resp.status_code == 200 # With Weaver>4.1.x, the deserialized result from Deploy payload is employed, which drops unknown 'type' # Ensure that deploy now succeeds, but the obtained Process is not 'builtin' (just a regular application) assert resp.status_code == 201 assert PROCESS_BUILTIN not in resp.json["processSummary"]["keywords"] process = self.process_store.fetch_by_id(self._testMethodName) assert process.type == PROCESS_APPLICATION def test_deploy_block_unknown_processes(self): """ Test to validates that any process that cannot be resolved against one of known :py:data:`weaver.processes.constants.CWL_REQUIREMENT_APP_TYPES` is explicitly blocked. """ cwl = { "cwlVersion": "v1.0", "class": "CommandLineTool", "baseCommand": ["python3"], "inputs": { "stringInput": "string" }, "requirements": { CWL_REQUIREMENT_APP_DOCKER: {"dockerPull": "python:3.7-alpine"}, "InlineJavascriptRequirement": {}, "ResourceRequirement": {"ramMin": 10240, "coresMin": 3} }, "outputs": [], } body = { "processDescription": { "process": { "id": self._testMethodName, "title": "some title", "abstract": "this is a test", }, }, "deploymentProfileName": "http://www.opengis.net/profiles/eoc/wpsApplication", "executionUnit": [{"unit": cwl}], } with contextlib.ExitStack() as stack_exec: for mock_exec in mocked_execute_process(): stack_exec.enter_context(mock_exec) resp = mocked_sub_requests(self.app, "post_json", "/processes", data=body, timeout=5, headers=self.json_headers, only_local=True, expect_errors=True) assert resp.status_code == 422 def test_deploy_merge_complex_io_with_multiple_formats_and_defaults(self): """ Test validates that different format types are set on different input variations simultaneously: - input with 1 format, single value, no default value - input with 1 format, array values, no default value - input with 1 format, single value, 1 default value - input with 1 format, array values, 1 default value - input with many formats, single value, no default value - input with many formats, array values, no default value - input with many formats, single value, 1 default value - input with many formats, array values, 1 default value In the case of outputs, CWL 'format' refers to 'applied' format instead of 'supported' format. Therefore, 'format' field is omitted if >1 supported format is specified in WPS to avoid incompatibilities. - output with 1 format, single value (has format in CWL and WPS) - output with 1 format, array values (has format in CWL and WPS) - output with many formats, single value (no format in CWL, WPS formats must be provided) - output with many formats, array values (no format in CWL, WPS formats must be provided) In addition, the test evaluates that: - CWL I/O specified as list preserves the specified ordering - CWL 'default' "value" doesn't interfere with WPS 'default' "format" and vice-versa - partial WPS definition of I/O format to indicate 'default' are resolved with additional CWL I/O formats - min/max occurrences are solved accordingly to single/array values and 'default' if not overridden by WPS NOTE: field 'default' in CWL refers to default "value", in WPS refers to default "format" for complex inputs """ ns_json, type_json = get_cwl_file_format(CONTENT_TYPE_APP_JSON) ns_text, type_text = get_cwl_file_format(CONTENT_TYPE_TEXT_PLAIN) ns_ncdf, type_ncdf = get_cwl_file_format(CONTENT_TYPE_APP_NETCDF) namespaces = dict(list(ns_json.items()) + list(ns_text.items()) + list(ns_ncdf.items())) default_file = "https://server.com/file" cwl = { "cwlVersion": "v1.0", "class": "CommandLineTool", "inputs": [ { "id": "single_value_single_format", "type": "File", "format": type_json, }, { "id": "multi_value_single_format", "type": { "type": "array", "items": "File", }, "format": type_text, }, { "id": "single_value_single_format_default", "type": "File", "format": type_ncdf, "default": default_file, }, { "id": "multi_value_single_format_default", "type": { "type": "array", "items": "File", }, "format": type_text, "default": default_file, }, { "id": "single_value_multi_format", "type": "File", "format": [type_json, type_text, type_ncdf], }, { "id": "multi_value_multi_format", "type": { "type": "array", "items": "File", }, "format": [type_ncdf, type_text, type_json], }, { "id": "single_value_multi_format_default", "type": "File", "format": [type_json, type_text, type_ncdf], "default": default_file, }, { "id": "multi_value_multi_format_default", "type": { "type": "array", "items": "File", }, "format": [type_json, type_text, type_ncdf], "default": default_file, }, ], "outputs": [ { "id": "single_value_single_format", "type": "File", "format": type_json, }, { "id": "single_value_multi_format", "type": "File", # NOTE: # not valid to have array of format for output as per: # https://github.com/common-workflow-language/common-workflow-language/issues/482 # WPS payload must specify them # "format": [type_json, type2, type3] }, # FIXME: multiple output (array) not implemented (https://github.com/crim-ca/weaver/issues/25) # { # "id": "multi_value_single_format", # "type": { # "type": "array", # "items": "File", # }, # "format": type3, # }, # { # "id": "multi_value_multi_format", # "type": { # "type": "array", # "items": "File", # }, # # NOTE: # # not valid to have array of format for output as per: # # https://github.com/common-workflow-language/common-workflow-language/issues/482 # # WPS payload must specify them # "format": [type3, type2, type_json], # }, ],
<filename>src/LineageTree/lineageTree.py #!python # This file is subject to the terms and conditions defined in # file 'LICENCE', which is part of this source code package. # Author: <NAME> (<EMAIL>) from scipy.spatial import cKDTree as KDTree import os import xml.etree.ElementTree as ET from copy import copy from scipy import spatial import numpy as np from multiprocessing import Pool from scipy.spatial import Delaunay from itertools import combinations from numbers import Number import struct import sys from scipy.spatial.distance import cdist class lineageTree(object): def get_next_id(self): """ Computes the next authorized id. Returns: int: next authorized id """ if self.next_id == []: self.max_id += 1 return self.max_id else: return self.next_id.pop() def add_node(self, t=None, succ=None, pos=None, id=None, reverse=False): """ Adds a node to the lineageTree and update it accordingly. Args: t (int): int, time to which to add the node succ (int): id of the node the new node is a successor to pos ([float, ]): list of three floats representing the 3D spatial position of the node id (int): id value of the new node, to be used carefully, if None is provided the new id is automatically computed. reverse (bool): True if in this lineageTree the predecessors are the successors and reciprocally. This is there for bacward compatibility, should be left at False. Returns: int: id of the new node. """ if id is None: C_next = self.get_next_id() else: C_next = id self.time_nodes.setdefault(t, []).append(C_next) if not succ is None and not reverse: self.successor.setdefault(succ, []).append(C_next) self.predecessor.setdefault(C_next, []).append(succ) self.edges.append((succ, C_next)) elif not succ is None: self.predecessor.setdefault(succ, []).append(C_next) self.successor.setdefault(C_next, []).append(succ) self.edges.append((C_next, succ)) else: self.roots.append(C_next) self.nodes.append(C_next) self.pos[C_next] = pos self.progeny[C_next] = 0 self.time[C_next] = t return C_next def remove_node(self, c): """ Removes a node and update the lineageTree accordingly Args: c (int): id of the node to remove """ self.nodes.remove(c) self.time_nodes[self.time[c]].remove(c) # self.time_nodes.pop(c, 0) pos = self.pos.pop(c, 0) e_to_remove = [e for e in self.edges if c in e] for e in e_to_remove: self.edges.remove(e) if c in self.roots: self.roots.remove(c) succ = self.successor.pop(c, []) s_to_remove = [s for s, ci in self.successor.items() if c in ci] for s in s_to_remove: self.successor[s].remove(c) pred = self.predecessor.pop(c, []) p_to_remove = [s for s, ci in self.predecessor.items() if ci == c] for s in p_to_remove: self.predecessor[s].remove(c) self.time.pop(c, 0) self.spatial_density.pop(c, 0) self.next_id.append(c) return e_to_remove, succ, s_to_remove, pred, p_to_remove, pos def fuse_nodes(self, c1, c2): """ Fuses together two nodes that belong to the same time point and update the lineageTree accordingly. Args: c1 (int): id of the first node to fuse c2 (int): id of the second node to fuse """ e_to_remove, succ, s_to_remove, pred, p_to_remove, c2_pos = self.remove_node(c2) for e in e_to_remove: new_e = [c1] + [other_c for other_c in e if e != c2] self.edges.append(new_e) self.successor.setdefault(c1, []).extend(succ) self.predecessor.setdefault(c1, []).extend(pred) for s in s_to_remove: self.successor[s].append(c1) for p in p_to_remove: self.predecessor[p].append(c1) self.pos[c1] = np.mean([self.pos[c1], c2_pos], axis = 0) self.progeny[c1] += 1 def _write_header_am(self, f, nb_points, length): """ Header for Amira .am files """ f.write('# AmiraMesh 3D ASCII 2.0\n') f.write('define VERTEX %d\n'%(nb_points*2)) f.write('define EDGE %d\n'%nb_points) f.write('define POINT %d\n'%((length)*nb_points)) f.write('Parameters {\n') f.write('\tContentType "HxSpatialGraph"\n') f.write('}\n') f.write('VERTEX { float[3] VertexCoordinates } @1\n') f.write('EDGE { int[2] EdgeConnectivity } @2\n') f.write('EDGE { int NumEdgePoints } @3\n') f.write('POINT { float[3] EdgePointCoordinates } @4\n') f.write('VERTEX { float Vcolor } @5\n') f.write('VERTEX { int Vbool } @6\n') f.write('EDGE { float Ecolor } @7\n') f.write('VERTEX { int Vbool2 } @8\n') def write_to_am(self, path_format, t_b=None, t_e=None, length=5, manual_labels=None, default_label=5, new_pos=None): """ Writes a lineageTree into an Amira readable data (.am format). Args: path_format (str): path to the output. It should contain 1 %03d where the time step will be entered t_b (int): first time point to write (if None, min(LT.to_take_time) is taken) t_e (int): last time point to write (if None, max(LT.to_take_time) is taken) note, if there is no 'to_take_time' attribute, self.time_nodes is considered instead (historical) length (int): length of the track to print (how many time before). manual_labels ({id: label, }): dictionary that maps cell ids to default_label (int): default value for the manual label new_pos ({id: [x, y, z]}): dictionary that maps a 3D position to a cell ID. if new_pos == None (default) then self.pos is considered. """ if not hasattr(self, 'to_take_time'): self.to_take_time = self.time_nodes if t_b is None: t_b = min(self.to_take_time.keys()) if t_e is None: t_e = max(self.to_take_time.keys()) if new_pos is None: new_pos = self.pos if manual_labels is None: manual_labels = {} for t in range(t_b, t_e + 1): f = open(path_format%t, 'w') nb_points = len(self.to_take_time[t]) self._write_header_am(f, nb_points, length) points_v = {} for C in self.to_take_time[t]: C_tmp = C positions = [] for i in range(length): C_tmp = self.predecessor.get(C_tmp, [C_tmp])[0] positions.append(new_pos[C_tmp]) points_v[C] = positions f.write('@1\n') for i, C in enumerate(self.to_take_time[t]): f.write('%f %f %f\n'%tuple(points_v[C][0])) f.write('%f %f %f\n'%tuple(points_v[C][-1])) f.write('@2\n') for i, C in enumerate(self.to_take_time[t]): f.write('%d %d\n'%(2*i, 2*i+1)) f.write('@3\n') for i, C in enumerate(self.to_take_time[t]): f.write('%d\n'%(length)) f.write('@4\n') tmp_velocity = {} for i, C in enumerate(self.to_take_time[t]): for p in points_v[C]: f.write('%f %f %f\n'%tuple(p)) f.write('@5\n') for i, C in enumerate(self.to_take_time[t]): f.write('%f\n'%(manual_labels.get(C, default_label))) f.write('%f\n'%(0)) f.write('@6\n') for i, C in enumerate(self.to_take_time[t]): f.write('%d\n'%(int(manual_labels.get(C, default_label) != default_label))) f.write('%d\n'%(0)) f.write('@7\n') for i, C in enumerate(self.to_take_time[t]): f.write('%f\n'%(np.linalg.norm(points_v[C][0] - points_v[C][-1]))) f.write('@8\n') for i, C in enumerate(self.to_take_time[t]): f.write('%d\n'%(1)) f.write('%d\n'%(0)) f.close() def _get_height(self, c, done): """ Recursively computes the height of a cell within a tree * a space factor. This function is specific to the function write_to_svg. Args: c (int): id of a cell in a lineage tree from which the height will be computed from done ({int: [int, int]}): a dictionary that maps a cell id to its vertical and horizontal position Returns: float: """ if c in done: return done[c][0] else: P = np.mean([self._get_height(di, done) for di in self.successor[c]]) done[c] = [P, self.vert_space_factor*self.time[c]] return P def write_to_svg(self, file_name, roots=None, draw_nodes=True, draw_edges=True, order_key=None, vert_space_factor=.5, horizontal_space=1, node_size=None, stroke_width=None, factor=1., node_color=None, stroke_color=None, positions=None): """ Writes the lineage tree to an SVG file. Node and edges coloring and size can be provided. Args: file_name: str, filesystem filename valid for `open()` roots: [int, ...], list of node ids to be drawn. If `None` all the nodes will be drawn. Default `None` draw_nodes: bool, wether to print the nodes or not, default `True` draw_edges: bool, wether to print the edges or not, default `True` order_key: function that would work for the attribute `key=` for the `sort`/`sorted` function vert_space_factor: float, the vertical position of a node is its time. `vert_space_factor` is a multiplier to space more or less nodes in time horizontal_space: float, space between two consecutive nodes node_size: func, a function that maps a node id to a `float` value that will determine the radius of the node. The default function return the constant value `vertical_space_factor/2.1` stroke_width: func, a function that maps a node id to a `float` value that will determine the width of the daughter edge. The default function return the constant value `vertical_space_factor/2.1` factor: float, scaling factor for nodes positions, default 1 node_color: func, a function that maps a node id to a triplet between 0 and 255. The triplet will determine the color of the node. stroke_color: func, a function that maps a node id to a triplet between 0 and 255. The triplet will determine the color of the stroke of the inward edge. positions: {int: [float, float], ...}, dictionary that maps a node id to a 2D position. Default `None`. If provided it will be used to position the nodes. """ import svgwrite if roots is None: roots = list(set(self.successor).difference(self.predecessor)) if node_size is None: node_size = lambda x: vert_space_factor/2.1 if stroke_width is None: stroke_width = lambda x: vert_space_factor/2.2 if node_color is None: node_color = lambda x: (0, 0, 0) coloring_edges = not stroke_color is None if not coloring_edges: stroke_color = lambda x: (0, 0, 0) prev_x = 0 self.vert_space_factor = vert_space_factor if order_key is not None: roots.sort(key=order_key) treated_cells = [] pos_given = not positions is None if not pos_given: positions = dict(zip(self.nodes, [[0., 0.],]*len(self.nodes))) for i, r in enumerate(roots): r_leaves = [] to_do = [r] while len(to_do) != 0: curr = to_do.pop(0)
# -*- coding: utf-8 -*- """ Created on Wed Nov 11 14:01:00 2020 @author: hvf811 """ seed_val = 1234 import os import tensorflow as tf from tensorflow.keras.layers import Dense, Input, Dropout,Multiply, LSTM, Add, Concatenate, TimeDistributed from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU from tensorflow.keras.layers import Reshape from tensorflow.keras.models import Model from keras.regularizers import l2, l1 from tensorflow.keras.losses import BinaryCrossentropy from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant from tensorflow.keras import backend as K from tensorflow.keras.constraints import non_neg from tensorflow.keras.models import load_model from tensorflow.keras.activations import relu from tensorflow.keras.optimizers import Adam import random import pickle import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import matthews_corrcoef, roc_auc_score, precision_score, recall_score,f1_score,cohen_kappa_score,accuracy_score import time ### Controlling randomness tf.random.set_seed(seed_val) np.random.seed(seed_val) random.seed(seed_val) #### Readaing peptide sequence data with open("total_classes_seq_bins32.pkl","rb") as f: tot_bins = pickle.load(f) with open("total_classes_seq32.pkl","rb") as f: encoder = pickle.load(f) with open("branches32.pkl","rb") as f: branches = pickle.load(f) branches = [list(i) for i in branches] lvl1 = [list(branches[0]+branches[1]), list(branches[2]+branches[3]+branches[4])] lvl2_1 = [list(branches[0]),list(branches[1])] lvl2_2 = [list(branches[2]),list(branches[3]+branches[4])] lvl3 = [list(branches[3]),list(branches[4])] lvl4_1 = [list(branches[0])] lvl4_2 = [list(branches[1])] lvl4_3 = [list(branches[2])] lvl4_4 = [list(branches[3])] lvl4_5 = [list(branches[4])] levels = [lvl1, lvl2_1, lvl2_2, lvl3, lvl4_1, lvl4_2, lvl4_3, lvl4_4, lvl4_5] vocab = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V',] folds = [ [[0], [1], [2, 3, 4, 5, 6, 7, 8, 9]], [[1], [2], [0, 3, 4, 5, 6, 7, 8, 9]], [[2], [3], [0, 1, 4, 5, 6, 7, 8, 9]], [[3], [4], [0, 1, 2, 5, 6, 7, 8, 9]], [[4], [5], [0, 1, 2, 3, 6, 7, 8, 9]], [[5], [6], [0, 1, 2, 3, 4, 7, 8, 9]], [[6], [7], [0, 1, 2, 3, 4, 5, 8, 9]], [[7], [8], [0, 1, 2, 3, 4, 5, 6, 9]], [[8], [9], [0, 1, 2, 3, 4, 5, 6, 7]], [[9], [0], [1, 2, 3, 4, 5, 6, 7, 8]] ] ## Getting the labels labels1 = set() for i in list(encoder.values()): labels1.update(i) labels1 = list(labels1) labels1.sort() labels = set() for k,v in tot_bins.items(): #print(v) tmp = k.split(" | ") tmp.remove("") labels.update(tmp) labels = list(labels) labels.sort() print(labels1) print("------") print(labels) assert labels == labels1 del labels1 ############################################################# ## Functions def calc_real_acc(yt,tmp_val): return np.sum(yt == np.round(tmp_val))/(yt.shape[0] * yt.shape[1]) def print_function(name,x,y): print(name,"||",sep=" ", end=" ") for i in range(len(x)): print(x[i], np.round(y[i],4),"|",sep=" ", end=" ") print("") try: print("average:", np.average(y)) except: print("average:", np.average(y[1:])) print("\n") def calc_score(yt,tmp_val,funk): out = [] for i in range(len(yt)): indx = np.sum(yt[i],axis=-1) ind = indx > 0 out.append(np.average(funk(tmp_val[i][ind], yt[i][ind]))) return out def calc_score_wzero(yt,tmp_val,funk): out = [] out0 = [] for i in range(len(yt)): indx = np.sum(yt[i],axis=-1) ind = indx > 0 ind0 = indx == 0 out.append(funk(yt[i][ind], tmp_val[i][ind])) if np.sum(ind0) > 0: out0.append(funk(yt[i][ind0], tmp_val[i][ind0])) if np.sum(ind0) == 0: out0.append([]) return out, out0 def calc_roc(yt,tmp_val,funk): out = [] out0 = [] for i in range(len(yt)): indx = np.sum(yt[i],axis=-1) ind = indx > 0 out.append(funk(yt[i][ind], tmp_val[i][ind])) out0.append(funk(yt[i], tmp_val[i])) return out, out0 def calc_score_wzero_round(yt,tmp_val,funk): out = [] out0 = [] for i in range(len(yt)): indx = np.sum(yt[i],axis=-1) ind = indx > 0 ind0 = indx == 0 out.append(funk(yt[i][ind], np.round(tmp_val[i][ind]))) if np.sum(ind0) > 0: out0.append(funk(yt[i][ind0], np.round(tmp_val[i][ind0]))) if np.sum(ind0) == 0: out0.append([]) return out, out0 def per_pred(yv,tmp_val,funk, name): mccs = [] for iq in range(len(yv)): mccs.append([funk(yv[iq][:,iqq],np.round(tmp_val[iq][:,iqq])) for iqq in range(len(yv[iq][0]))]) for iq in range(len(mccs)): print(level_names[iq]) for iqq in mccs[iq]: print(np.round(iqq,4), sep=" ", end=" ") print("") all_mcc = [] for iq in mccs: all_mcc += iq all_mcc1 = np.prod(all_mcc) all_mcc2 = np.average(all_mcc) print("\naverage {}:".format(name), all_mcc2, "| prod", all_mcc1) print("\naverage {} for leaves:".format(name), np.average(all_mcc[-len(labels):]), "| prod", np.prod(all_mcc[-len(labels):])) return all_mcc2 def per_pred2(yv,tmp_val,funk, name): mccs = [] for iq in range(len(yv)): mccs.append([funk(yv[iq][:,iqq],np.round(tmp_val[iq][:,iqq])) for iqq in range(len(yv[iq][0]))]) for iq in range(len(mccs)): print(level_names[iq]) for iqq in mccs[iq]: print(np.round(iqq,4), sep=" ", end=" ") print("") all_mcc = [] for iq in mccs: all_mcc += iq all_mcc1 = np.prod(all_mcc) all_mcc2 = np.average(all_mcc) #print("\naverage {}:".format(name), all_mcc2, "| prod", all_mcc1) #print("\naverage {} for leaves:".format(name), np.average(all_mcc[-len(labels):]), "| prod", np.prod(all_mcc[-len(labels):])) return all_mcc2 def printer_stuff(yv,xv,modd): tmp_val = modd.predict(xv) val_loss = [losser(yv[iq],tmp_val[iq]).numpy() for iq in range(len(yv))] print_function("val_loss",level_names,val_loss) val_acc = [accuracy_score(yv[iq],np.round(tmp_val[iq])) for iq in range(len(yv))] print_function("val_exact_ACC",level_names,val_acc) ac1, ac2 = calc_score_wzero_round(yv,tmp_val,accuracy_score) print_function("exact_acc_labels",level_names,ac1) print_function("exact_acc_zeros",level_names,ac2) print_function("TP_ranked",level_names,calc_score(yv,tmp_val,estimate_acc)) ac1, ac2 = calc_score_wzero(yv,tmp_val,calc_real_acc) print_function("real_acc_labels",level_names,ac1) print_function("real_acc_zeros",level_names,ac2) roc1, roc0 = calc_roc(yv,tmp_val,roc_auc_score) print_function("roc_labels",level_names,roc1) print_function("roc_with_zeros",level_names,roc0) _ = per_pred(yv,tmp_val,precision_score,"PREC") _ = per_pred(yv,tmp_val,recall_score,"REC") _ = per_pred(yv,tmp_val,f1_score,"F1") _ = per_pred(yv,tmp_val,cohen_kappa_score,"KAPPA") all_mcc2 = per_pred(yv,tmp_val,matthews_corrcoef,"MCC") ### Functions for sequence encoding def making_ys(activity,levels): lvls = [] for l in levels: lvls.append([]) for j,l in enumerate(levels): if len(l) == 2: lab = np.zeros((len(l),)) for jj,ll in enumerate(l): if activity in ll: lab[jj] = 1 lvls[j].append(lab) if len(l) == 1: lab = np.zeros((len(l[0]),)) if activity in l[0]: lab[l[0].index(activity)] = 1 lvls[j].append(lab) return lvls def encode_seqs(sq_dct, encoder, max_, voc, ac_over, levels): lnv = len(voc) #dims = len(ac_over) alsqs = [] y_list = [] x_list = [] for sq in sq_dct: tmp = [] for ii in encoder[sq]: tmp2 = making_ys(ii, levels) if len(tmp) == 0: for iii in tmp2: tmp.append(iii) else: for iii in range(len(tmp2)): #print(sq,tmp[iii]) tmp[iii][0] += tmp2[iii][0] for ii in tmp: ii[0][ii[0] > 0] = 1 y_list.append(tmp) diff = max_ - len(sq) if diff % 2 == 0: tmps = "9"*int(diff/2) + sq + "9"*int(diff/2) if diff % 2 != 0: tmps = "9"*int((diff-1)/2) + sq + "9"*int((diff-1)/2 + 1) #tmps = sq + "9"*int(diff) alsqs.append(sq) tmp_x = np.zeros((max_,lnv)) for ii in range(len(tmp_x)): if tmps[ii] in voc: tmp_x[ii][voc.index(tmps[ii])] = 1. x_list.append([tmp_x.flatten()]) return np.concatenate(x_list,axis=0), y_list, alsqs def folder(folder, tot_bins): train_to_encode = [] val_to_encode = [] test_to_encode = [] for k,v in tot_bins.items(): for i in folder[-1]: train_to_encode += v[i] val_to_encode += v[folder[0][0]] test_to_encode += v[folder[1][0]] return train_to_encode, val_to_encode, test_to_encode #### def make_rn(x): rn = np.arange(len(x)) np.random.shuffle(rn) return rn def res(X): X = X.reshape((len(X),len(X[0]),1)) return X def y_sets(y,levels): lvls = [[] for i in levels] for j,i in enumerate(y): for jj,ii in enumerate(i): lvls[jj].append(ii) for j,i in enumerate(lvls): lvls[j] = np.concatenate(lvls[j],axis=0) return lvls def sorter(a,b): c = list(zip(a,b)) c.sort() a1,b1 = zip(*c) b1 = list(b1[::-1]) a1 = list(a1[::-1]) return a1, b1 def estimate_acc(pred,y): acc = [] for i in range(len(y)): a,b = sorter(pred[i],np.arange(len(pred[i]))) a1,b1 = sorter(y[i],np.arange(len(y[i]))) ln = len(y[i][y[i] > 0]) ac = len(set(b1[:ln]).intersection(set(b[:ln])))/len(b1[:ln]) acc.append(ac) return acc binary_cross = BinaryCrossentropy()#reduction="sum") binnz = K.binary_crossentropy def mcc_norm_rev_sumXxX(y_true, y_pred): def mcc_loss_binary_mean_cor(y_true, y_pred): y = K.sum(K.cast(y_true, 'float32'), axis=0) q = K.cast(K.equal(y/K.cast(K.shape(y_true)[0],'float32'),1),'float32') q_ = K.cast(K.equal(y/K.cast(K.shape(y_true)[0],'float32'),0),'float32') yh = K.sum(K.cast(y_pred, 'float32'), axis=0) qq = K.cast(K.equal(yh/K.cast(K.shape(y_true)[0],'float32'),1),'float32') qq_ = K.cast(K.equal(yh/K.cast(K.shape(y_true)[0],'float32'),0),'float32') e_ = K.sum(K.cast(K.abs(y_true-y_pred), 'float32'), axis=0) e = K.cast(K.not_equal(e_,0),'float32') tp = K.clip(K.sum(K.cast(y_true*y_pred, 'float32'), axis=0),K.clip(q_,0,1), K.cast(K.shape(y_true)[0],'float32')) tn = K.clip(K.sum(K.cast((1-y_true)*(1-y_pred), 'float32'), axis=0),K.clip(q,0,1), K.cast(K.shape(y_true)[0],'float32')) fp = K.clip(K.sum(K.cast((1-y_true)*y_pred, 'float32'), axis=0),K.clip(qq_,0,1), K.cast(K.shape(y_true)[0],'float32')) fn = K.clip(K.sum(K.cast(y_true*(1-y_pred), 'float32'), axis=0),K.clip(qq,0,1), K.cast(K.shape(y_true)[0],'float32')) up = tp*tn - fp*fn down = K.sqrt((tp+fp) * (tp+fn) * (tn+fp) * (tn+fn)) mcc = up / down return (1-mcc)*e e_ = K.sum(K.cast(K.abs(y_true-y_pred), 'float32'), axis=0) e = K.cast(K.equal(e_,K.cast(K.shape(y_true)[0],'float32')),'float32') e = e * 2 m1 = mcc_loss_binary_mean_cor(y_true, y_pred) return K.clip(m1,e,2) def upper_loss(y_true, y_pred): y_pred = K.cast(y_pred, 'float32') y_true = K.cast(y_true, 'float32') return K.sum(mcc_norm_rev_sumXxX(y_true, y_pred)) def loss_1(y_true, y_pred): y_pred = K.cast(y_pred, 'float32') y_true = K.cast(y_true, 'float32') return K.sum(K.mean(binnz(y_true, y_pred),axis=0)+mcc_norm_rev_sumXxX(y_true, y_pred)) #return K.sum(K.mean(binnz(y_true, y_pred)+mcc_norm_rev_sumXxX(y_true, y_pred),axis=0)) #return K.square(mcc_norm_rev_sumXxX(y_true, y_pred)) ############################################################# #### Intializing the network init3 = RandomUniform(minval=-0.001, maxval=0.001, seed=seed_val) init4 = Constant(1) init5 = RandomUniform(minval=0.001, maxval=0.05, seed=seed_val) init6 = RandomUniform(minval=-1, maxval=1, seed=seed_val) init7 = Constant(0.001) def max_sec_ax_keep_dims(inp): return K.max(inp,axis=-2, keepdims=True) def divider(inp): #return inp / (K.max(K.abs(inp),axis=-1, keepdims=True) + K.epsilon()) return inp / (K.sum(K.abs(inp),axis=-1, keepdims=True) + K.epsilon()) def bottum_up(inp): pred = K.max(inp, axis=-1, keepdims=True) return pred def small_cnn(x,kernels,LR,init2): l1x = [] l2_reg = 0.0 drop = 0.5 for i in [4,6,10,16,22,30,40]: cxc = len(vocab) x4 = Conv1D(kernels,kernel_size=cxc*i, strides=cxc, padding="valid", activation=LR,kernel_initializer=init2, use_bias=False)(x) x4 = PReLU(alpha_initializer=Constant(value=0.3))(x4) x4 = Lambda(max_sec_ax_keep_dims)(x4) l1x.append(x4) x4 = Concatenate(axis=-2)(l1x) z41x = Flatten()(x4) z41x = Lambda(divider)(z41x) z41 = Dropout(0.2)(z41x) z42 = Dense(500, activation='linear',kernel_initializer=init6, use_bias=True)(z41) z42 = PReLU(alpha_initializer=Constant(value=0.3))(z42) #z42 = Lambda(divider)(z42) #z42x = Concatenate()([z41x, z42]) z42 = Dropout(drop)(z42) z43 = Dense(500, activation='linear',kernel_initializer=init6, use_bias=True)(z42) z43 = PReLU(alpha_initializer=Constant(value=0.3))(z43) #z43 = Lambda(divider)(z43) #z43x = Concatenate()([z42x, z43]) z43 = Dropout(drop)(z43) z44 = Dense(500, activation='linear',kernel_initializer=init6, use_bias=True)(z43) z44 = PReLU(alpha_initializer=Constant(value=0.3))(z44) #z44 = Lambda(divider)(z44) #z44x = Concatenate()([z43x, z44]) z4 = Dropout(drop)(z44) return z4 def activation3(x): #return K.relu((x-0.5)*2) #return K.relu(x, threshold=0.5) return K.relu(x/0.5-0.5,max_value=1) inputs = Input(shape=(len(vocab)*200,1)) x = inputs xx = Flatten()(x) init2 = "orthogonal" name1 = "fold1_" kernels = 40 LR = "linear" l2_reg = 0.0 z4 = small_cnn(x,kernels,LR,init2) lvl3_1 = Dense(len(levels[4][0]), activation='sigmoid',kernel_initializer=init5, use_bias=True, name="outputs5") outputs5 = lvl3_1(z4) #outputs5 = Lambda(activation3)(outputs5) z4 = small_cnn(x,kernels,LR,init2) lvl3_1 = Dense(len(levels[5][0]), activation='sigmoid',kernel_initializer=init5, use_bias=True, name="outputs6") outputs6 = lvl3_1(z4) #outputs6 = Lambda(activation3)(outputs6) z7 = small_cnn(x,kernels,LR,init2) lvl3_2 =
import re from typing import Optional, Dict, List, Union import functools def select_most_frequent_shingles(matches: List[str], db: Dict[str, int], min_count_split: int, threshold: float): """Select the most frequent shingles that matches the wildcard shingle Parameters: ----------- matches : List[str] A list of shingles from the database (`db`) that matches the current wildcard shingle in the `expandshingle` function. db: dict Database with shingles as `key` and their frequencies/counts as `value` Assumptions: - Python's `dict` are automatically in alphabetic order but not key length, i.e. we still need to filter keys by length initially. - The database `db` is immutable. Preprocessing: Make sure that each shingle has a sufficient number of counts/frequencies, e.g. remove shingles that occur less than 20x. threshold: float (Default: 0.80) Replace max. `1.0 - threshold` of the least frequent shingles with the wildcard shingle. min_count_split: int (Default: 2) If the combined frequency of all shingles covered by one wildcard shingle (count sum of the regex query results) is less than the specified minimum total frequency, then the recursion aborts. Returns: -------- selected_shingles : List[str] The selected most frequent shingles residual_count : int The residual counts (of the unselected shingles) that will be assigned to the wildcard shingle in `expandshingle` Example: -------- selected_shingles, residual_count = select_most_frequent_shingles( matches, db, min_count_split, threshold) """ # read only matches candidates = [item for item in db.items() if item[0] in matches] # proceed if there at least 2 candidates if len(candidates) == 0: return [], 0 if len(candidates) == 1: return [], candidates[0][1] # sort by descending frequency candidates = sorted(candidates, key=lambda item: -item[1]) # compute total counts total = sum([val for _, val in candidates]) if total < min_count_split: return [], total # find most frequent (`val`) shingles (`key`) up to 90% of all matches # always ignore the least frequent shingle and use the wildcard version cumpct = 0.0 cumcnt = 0 selected = [] for key, val in candidates[:-1]: # abort if the sum of selected shingles reached threshold cumpct += val / total if cumpct > threshold: break # select shingle cumcnt += val selected.append(key) # done return selected, total - cumcnt def expandshingle(s: str, db: Dict[str, int], memo: Optional[dict] = {}, wildcard: Optional[str] = '\uFFFF', threshold: Optional[float] = 0.8, min_count_split: Optional[int] = 2, max_wildcards: Optional[int] = 3): """Recursive algorithm to select given k-shingles Parameters: ----------- s: str A shingle, i.e. a string of text db: dict Database with shingles as `key` and their frequencies/counts as `value` Assumptions: - Python's `dict` are automatically in alphabetic order but not key length, i.e. we still need to filter keys by length initially. - The database `db` is immutable. Preprocessing: Make sure that each shingle has a sufficient number of counts/frequencies, e.g. remove shingles that occur less than 20x. memo: dict (default: {}) Database for memoization, i.e. `memo[shingle]=count`. In case of the wildcarded shingle, the residual counts that are not covered by the selected set of shingles (Basically the `1 - p`). The keys in the memoization cache are selected shingles of the CEWS algorithm. wildcard: str (Default: '\uFFFF') An unicode char that is not actually used by any natural language, or the text that you analyzing, e.g. U+FFFE or U+FFFF See https://en.wikipedia.org/wiki/Specials_(Unicode_block) threshold: float (Default: 0.80) Replace max. `1.0 - threshold` of the least frequent shingles with the wildcard shingle. min_count_split: int (Default: 2) If the combined frequency of all shingles covered by one wildcard shingle (count sum of the regex query results) is less than the specified minimum total frequency, then the recursion aborts. max_wildcards: int (Default: 3) If an input string `s` contains more than the specified maximum number of wildcard characters, the recursion aborts. Returns: -------- memo: dict The updated memoization cache. """ # (0a) stop if the shingle reached the maximum number of wildcards if s.count(wildcard) >= max_wildcards: return memo # (0b) abort if min_count_split < 1: raise Exception( f"min_count_split={min_count_split} but must greater equal 1.") # (1a) Prefix/Suffix Wildcards # - Expand on the left side (prefix wildcard) and right side (suffix w.) # - This will lengthen the shingle by 1 character (k+1) tmp = [] tmp.append(f"{wildcard}{s}") # "_[shingle]" tmp.append(f"{s}{wildcard}") # "[shingle]_" # (1b) Infix Wildcard # - Check all wildcard variants of a shingle. # - This will NOT expand the length of the shingles directly n_len = len(s) if n_len >= 3: # never replace 1st and last with wildcard ("1") # and avoid consequtive wildcards at the left and right end off_start = 1 + int(s[0] == wildcard) off_end = 1 + int(s[-1] == wildcard) # create each infix wildcard combination for i in range(off_start, n_len - off_end): # Find all matches "[s1]_[s2]" tmp.append(f"{s[:i]}{wildcard}{s[(i + 1):]}") # (2a) Find all matches for snew in tmp: # memoization trick if snew not in memo: # regex search reg = snew.replace(wildcard, r"\w{1}") pat = re.compile(f"^{reg}$") matches = list(filter(pat.match, db.keys())) # (2b) Find and select the most frequent shingles selected_shingles, residual_count = select_most_frequent_shingles( matches, db, min_count_split, threshold) # (2c) Assign the counts of the unselected shingles to the new # wildcard-shingle (`residual_count`), store it the database # (`db`) and memoization cache (`memo`), and traverse to the next # knot if residual_count >= min_count_split: memo[snew] = residual_count memo = expandshingle(snew, db=db, memo=memo, wildcard=wildcard, threshold=threshold, min_count_split=min_count_split, max_wildcards=max_wildcards) # (2d) Store the selected shingles to the memoization cache # (`memo`), and trigger the next recursion step (traverse # down the tree) for key in selected_shingles: if key not in memo: # memoization trick memo[key] = db[key] memo = expandshingle(key, db=db, memo=memo, wildcard=wildcard, threshold=threshold, min_count_split=min_count_split, max_wildcards=max_wildcards) # done return memo def cews(db: Dict[str, int], memo: Optional[dict] = {}, wildcard: Optional[str] = '\uFFFF', threshold: Optional[float] = 0.8, min_count_split: Optional[int] = 2, max_wildcards: Optional[int] = 3): """Collectively Exhaustive Wildcard Shingling (CEWS) Parameters: ----------- db: dict Database with shingles as `key` and their frequencies/counts as `value` Assumptions: - Python's `dict` are automatically in alphabetic order but not key length, i.e. we still need to filter keys by length initially. - The database `db` is immutable. Preprocessing: Make sure that each shingle has a sufficient number of counts/frequencies, e.g. remove shingles that occur less than 20x. memo: dict Add specific shingles to the memoization cache to make sure that these part of subword pattern list lateron. These shingles might certain keywords, common stopwords, all chars, emojis, abbreviations. Call the function as follows: import kshingle as ks memo = {k: db[k] for k in ["i.e.", "e.g."]} memo = ks.cews(db, memo=memo) wildcard : str An unicode char that is not actually used by any natural language, or the text that you analyzing, e.g. U+FFFE or U+FFFF See https://en.wikipedia.org/wiki/Specials_(Unicode_block) threshold: float (Default: 0.80) Replace max. `1.0 - threshold` of the least frequent shingles with the wildcard shingle. min_count_split: int (Default: 2) If the combined frequency of all shingles covered by one wildcard shingle (count sum of the regex query results) is less than the specified minimum total frequency, then the recursion aborts. max_wildcards: int (Default: 3) If an input string `s` contains more than the specified maximum number of wildcard characters, the recursion aborts. Return: ------- memo: dict Database for memoization, i.e. `memo[shingle]=count`. In case of the wildcarded shingle, the residual counts that are not covered by the selected set of shingles (Basically the `1 - p`). The keys in the memoization cache are selected shingles of the CEWS algorithm. """ # add single-chars to memo automatically if len(memo) == 0: memo = {k: v for k, v in db.items() if len(k) == 1} # loop over all db entries shingles = list(db.keys()) for s in shingles: memo = expandshingle( s, db=db, memo=memo, wildcard=wildcard, threshold=threshold, min_count_split=min_count_split, max_wildcards=max_wildcards) # done return memo @functools.cmp_to_key def sort_by_patterns(a, b): """Comparision function for PATTERNS.sort Example: -------- PATTERNS.sort(key=sort_by_patterns) """ # (1) Prefer exact matches (no wildcards) or resp. less wilcards a_wild = len(a.pattern.split(r"\w{1}")) -
import os import sys import importlib import pkg_resources import json import argparse import yaml import re import copy from datetime import datetime from .config_loader import ConfigLoader from .dependency_grapher import DependencyGrapher from .git import Git from .run import run from .token_interpolator import TokenInterpolator from .logger import Logger class Servicer(): def __init__(self, args=None, init=True): logger_params = {} if os.getenv('DEBUG'): logger_params['level'] = 'debug' self.logger = Logger(**logger_params) if not init: return self.datetime = datetime self.version = pkg_resources.get_distribution('servicer').version self.run = run self.token_interpolator = TokenInterpolator(logger=self.logger) if args == None: args = vars(self.load_arguments()) if 'version' in args and args['version']: self.logger.log(self.version) sys.exit(0) self.logger.log('servicer version: %s' % self.version) self.load_environment(args) self.config_loader = ConfigLoader(args, logger=self.logger) self.config = self.config_loader.load_config() self.active_services = None self.normalize_ci_environment() self.determine_service_environment() self.config_loader.interpolate_config(self.config) self.logger.log('Services Config:', level='debug') self.logger.log(json.dumps(self.config, indent=4, sort_keys=True, default=str), level='debug') if 'show_config' in self.config['args'] and self.config['args']['show_config']: self.logger.log('Services Config:') self.logger.log(json.dumps(self.config, indent=4, sort_keys=True, default=str)) sys.exit(0) self.git_init() self.decide_service_step_order() def decide_service_step_order(self): self.load_steps() if not self.active_services: self.active_services = self.load_service_modules() self.dependency_grapher = DependencyGrapher( self.config, self.active_services, self.steps, self.step_order, self.active_steps, logger=self.logger, ) self.service_step_order = self.dependency_grapher.order_service_steps(self.active_services) def load_arguments(self): parser = argparse.ArgumentParser(description='Process deployment options.') parser.add_argument('--generate_ci', action='store_true', help='generate a ci config file, do not run any deploy options') parser.add_argument('--service', help='deploy only the provided service') parser.add_argument('--services_file', default='services.yaml', help='custom path to your services config file (default is services.yaml)') parser.add_argument('--servicer_config_path', default='%s/.servicer' % os.getcwd(), help='path to your servicer directory (default is ./servicer)') parser.add_argument('--env_file_paths', default='%s/.servicer/.env.yaml:%s/.servicer/.env.yaml' % (os.getenv('HOME'), os.getcwd()), help='paths to your local .env files, colon-separated') parser.add_argument('--step', help='perform the comma-separated build steps, defaults to all steps') parser.add_argument('--show_config', action='store_true', help='prints the interpolated config file') parser.add_argument('--no_ignore_unchanged', action='store_true', help='disables ignoring services through change detection') parser.add_argument('--no_tag', action='store_true', help='disables build tagging') parser.add_argument('--no_auth', action='store_true', help='disables build authentication, useful if you are already authenticated locally') parser.add_argument('--ignore_dependencies', action='store_true', help='disables automatic dependency execution') parser.add_argument('--tag', action='store_true', help='generate a git tag') parser.add_argument('--version', action='store_true', help='display the package version') return parser.parse_args() def load_environment(self, args): os.environ['PROJECT_PATH'] = os.environ['PWD'] os.environ['BUILD_DATETIME'] = str(self.datetime.utcnow()) os.environ['BUILD_DATE'] = self.datetime.now().strftime('%Y-%m-%d') if 'env_file_paths' not in args: return for path in args['env_file_paths'].split(':'): self.load_env_file(path) def load_env_file(self, path): self.logger.log('checking for (.env.yaml) at (%s)' % path) if os.path.exists(path): self.logger.log('(.env.yaml) found, including these arguments:') yaml_dict = yaml.load(open(path)) for key, value in yaml_dict.items(): os.environ[key] = value self.logger.log(key) self.logger.log() def normalize_ci_environment(self): if 'ci' not in self.config: return self.print_title('normalizing CI environment') self.config['ci']['adapters'] = {} for p in self.config['ci']['providers']: self.logger.log('CI Adapter: %s' % p) ci_adapter_modules = [ { 'name': 'ci_adapters.%s' % p, 'package': 'ci_adapters', 'file_path': '%s/ci_adapters/%s.py' % (self.config['config_path'], p), }, { 'name': 'servicer.builtin.ci_adapters.%s' % p, 'package': 'servicer.builtin.ci_adapters', 'file_path': '%s/builtin/ci_adapters/%s.py' % (self.config['module_path'], p), }, ] module = self.load_module_from_paths(ci_adapter_modules) self.config['ci']['adapters'][p] = module.CIAdapter(logger=self.logger) if 'generate_ci' in self.config['args'] and self.config['args']['generate_ci']: for ci_adapter in self.config['ci']['adapters'].values(): self.active_services = self.config['services'].keys() self.decide_service_step_order() ci_adapter.generate_ci_config(self.config, self.service_step_order) sys.exit(0) for ci_adapter in self.config['ci']['adapters'].values(): ci_adapter.convert_environment_variables() def determine_service_environment(self): self.logger.log() self.service_environment = os.getenv('SERVICE_ENVIRONMENT') or self.get_service_environment(os.getenv('BRANCH', 'local')) self.logger.log('branch: %s' % os.getenv('BRANCH')) self.logger.log('service environment: %s' % self.service_environment) if self.service_environment: os.environ['SERVICE_ENVIRONMENT'] = self.service_environment def get_service_environment(self, branch): service_environment = None if 'environment' not in self.config or 'mappings' not in self.config['environment']: return service_environment mappings = self.config['environment']['mappings'] service_environment, self.service_environment_config = self.map_service_environment(branch, mappings) if service_environment: for ch in ['\\', '/', '_']: if ch in service_environment: service_environment = service_environment.replace(ch, '-') if 'variables' in self.service_environment_config: self.config_loader.load_environment_variables(self.service_environment_config['variables']) return service_environment def map_service_environment(self, branch, mappings=[]): for m in mappings: if 'branch' in m: if m['branch'].startswith('/') and m['branch'].endswith('/'): regex = m['branch'][1:-1] else: regex = '^%s$' % m['branch'].replace('*', '.*') result = re.match(regex, branch) if result: return m.get('environment', branch), m elif 'tag' in m: # TODO: support tag mapping pass return None, None def git_init(self): if not 'git' in self.config or not self.config['git']['enabled']: return self.print_title('initializing git integration') git_args = { 'hide_output': 'DEBUG' not in os.environ, 'logger': self.logger } if 'protocol' in self.config['git']: git_args['protocol'] = self.config['git']['protocol'] self.git = Git(**git_args) self.git.config = self.config['git'] if 'auto-set-branch' in self.config['git'] and self.config['git']['auto-set-branch']: if 'BRANCH' not in os.environ: os.environ['BRANCH'] = self.git.current_branch() if 'auto-set-commit' in self.config['git'] and self.config['git']['auto-set-commit']: if 'COMMIT_SHORT' not in os.environ: os.environ['COMMIT_SHORT'] = self.git.current_commit(min_length=self.config['git']['commit-min-length']) if 'COMMIT_LONG' not in os.environ: os.environ['COMMIT_LONG'] = self.git.current_commit() if 'COMMIT' not in os.environ: os.environ['COMMIT'] = os.environ['COMMIT_SHORT'] if 'config' in self.config['git']: for key, value in self.config['git']['config'].items(): result = self.run('git config %s' % key, check=False)['stdout'].strip() if result == '': self.run('git config %s "%s"' % (key, value)) if 'fetch-all' in self.config['git'] and self.config['git']['fetch-all']: self.run('git fetch') elif 'fetch-tags' in self.config['git'] and self.config['git']['fetch-tags']: self.run('git fetch --tags') if 'tag' in self.config['args'] and self.config['args']['tag']: self.tag_build(check_git=False) sys.exit(0) if 'GIT_DIFF_REF' in os.environ: result = self.run('git cat-file -t %s' % os.environ['GIT_DIFF_REF'], check=False) if result['status'] != 0: self.logger.log('Invalid GIT_DIFF_REF provided!') else: self.config['git']['diff-ref'] = os.environ['GIT_DIFF_REF'] if 'BRANCH' in os.environ: if self.config['git']['diff-tagging-enabled'] and 'diff-ref' not in self.config['git']: servicer_tag_part = 'servicer-%s' % self.git.sanitize_tag(os.environ['BRANCH']) self.build_tags = [t for t in self.git.list_tags() if t.startswith(servicer_tag_part)] self.logger.log('branch tag: %s' % servicer_tag_part, level='debug') self.logger.log('matching tags:', level='debug') self.logger.log('\n'.join(self.build_tags), level='debug') if len(self.build_tags) > 0: self.config['git']['diff-ref'] = self.build_tags[-1] if 'diff-defaults-to-latest-tag' in self.config['git'] and self.config['git']['diff-defaults-to-latest-tag'] and 'diff-ref' not in self.config['git']: result = self.run('git describe --tags --abbrev=0 --match "servicer-*" HEAD', check=False) if result['status'] == 0: latest_tag = result['stdout'].strip() if latest_tag: self.logger.log('defaulting to latest servicer git tag') self.config['git']['diff-ref'] = latest_tag # TODO: remove this feature in next breaking update if 'default-branch' in self.config['git'] and self.config['git']['default-branch'] and 'diff-ref' not in self.config['git']: if os.environ['BRANCH'] != self.config['git']['default-branch']: self.logger.log('defaulting Git Diff Ref to default-branch') self.config['git']['diff-ref'] = 'origin/%s' % self.config['git']['default-branch'] if 'diff-ref' in self.config['git']: self.logger.log('Git Diff Ref: %s\n' % self.config['git']['diff-ref']) if self.config['git']['ignore-servicer-commits'] and 'diff-ref' in self.config['git']: authors = self.git.authors_for_changes_ahead_of_ref(self.config['git']['diff-ref']) self.logger.log('Commit authors: %s' % authors) if 'servicer' in authors: self.logger.log('Automated servicer changes were detected, skipping this build.') sys.exit(0) def tag_build(self, check_git=True): if check_git: if not 'git' in self.config or not self.config['git']['enabled']: return if not self.config['git']['auto-tag'] or 'BUILD_NUMBER' not in os.environ: return if 'no_tag' in self.config['args'] and self.config['args']['no_tag']: return self.remove_stale_tags() servicer_tag = self.servicer_git_tag() if servicer_tag: self.logger.log('Tagging: %s' % servicer_tag) self.git.tag(servicer_tag, push=True) def remove_stale_tags(self): self.logger.log('Removing old tags...') build_tags = [t for t in self.git.list_tags() if t.startswith('servicer-')] self.logger.log('\nexisting servicer tags:', level='debug') self.logger.log('\n'.join(build_tags), level='debug') branches = ['/'.join(b.split('/')[1:]) for b in self.git.list_remote_branches()] tag_prefixes = [self.git.sanitize_tag(b) for b in branches] self.logger.log('\nexisting branch tag prefixes:', level='debug') self.logger.log('\n'.join(tag_prefixes), level='debug') valid_tags = set() for tp in tag_prefixes: prefix = 'servicer-%s' % tp for bt in build_tags: if bt.startswith(prefix): valid_tags.add(bt) tags_to_delete = list(set(build_tags) - valid_tags) self.logger.log('\nstale tags for other branches:', level='debug') self.logger.log('\n'.join(tags_to_delete), level='debug') self.git.delete_tag(tags_to_delete) tag_prefix = self.git.sanitize_tag(os.environ['BRANCH']) build_tags_for_branch = [bt for bt in build_tags if bt.startswith('servicer-%s' % tag_prefix)] self.logger.log('\nstale tags for this branch: %s' % tag_prefix, level='debug') self.logger.log('\n'.join(build_tags_for_branch), level='debug') self.git.delete_tag(build_tags_for_branch) def servicer_git_tag(self): if 'BRANCH' not in os.environ: return sanitized_tag = self.git.sanitize_tag(os.environ['BRANCH']) return 'servicer-%s-%s-%s' % (sanitized_tag, os.environ['BUILD_DATE'], os.environ['BUILD_NUMBER']) def load_service_modules(self): self.print_title('loading service modules') if 'services' in self.config: for name, service in self.config['services'].items(): service['name'] = name active_services = [] if 'service' in self.config['args'] and self.config['args']['service']: active_services.extend(self.config['args']['service'].split(',')) elif 'services' in self.config: active_services.extend(self.config['services'].keys()) self.logger.log('Active Services:\n%s\n' % '\n'.join(active_services)) self.ignore_unchanged_services(active_services) for service_name in active_services: service = self.config['services'][service_name] self.load_service_module(service) return active_services def load_service_module(self, service): service['module'] = None if 'config' not in service: service['config'] = {} if 'service_type' not in service: service['service_type'] = 'service' adapter_path = service['service_type'] if 'provider' in service: adapter_path = '%s/%s' % (service['provider'], adapter_path) self.try_initialize_provider(service['provider'], service) if 'providers' in service: for provider in service['providers']: self.try_initialize_provider(provider, service) adapter_name = adapter_path.replace('/', '.') service_modules = [ { 'name': 'service_adapters.%s' % adapter_name, 'package': 'service_adapters', 'file_path': '%s/service_adapters/%s.py' % (self.config['config_path'], adapter_path), }, { 'file_path': '%s/service_adapters/%s.sh' % (self.config['config_path'], adapter_path), }, { 'name': 'servicer.builtin.service_adapters.%s' % adapter_name, 'package': 'servicer.builtin.service_adapters', 'file_path': '%s/builtin/service_adapters/%s.py' % (self.config['module_path'], adapter_path), }, { 'file_path': '%s/builtin/service_adapters/%s.sh' % (self.config['module_path'], adapter_path), }, ] module = self.load_module_from_paths(service_modules) if isinstance(module, str): service['shell_script'] = module else: service['module'] = module def ignore_unchanged_services(self, services): if 'no_ignore_unchanged' in self.config['args'] and self.config['args']['no_ignore_unchanged']: return if not 'git' in self.config or not self.config['git']['enabled'] or not self.config['git']['ignore-unchanged']: return if 'diff-ref' not in self.config['git']: self.logger.log('No GIT_DIFF_REF found, aborting change detection.') return diff_files = self.git.diff(self.config['git']['diff-ref'], name_only=True, merge_base=True) self.logger.log('\nChanged Files:') self.logger.log('\n'.join(diff_files)) # TODO: think through what top level 'watch_paths' means if 'ignore_paths' in self.config['git']: regexes = [self.sanitize_regex(matcher) for matcher in self.config['git']['ignore_paths']] matched_files, diff_files = self.match_regexes(diff_files, regexes) ignored_services = [] for service_name in services: service = self.config['services'][service_name] service_changed_files = diff_files if 'git' in service: if 'watch_paths' in service['git']: watch_regexes = [self.sanitize_regex(matcher) for matcher in service['git']['watch_paths']] self.logger.log('\nService: %s' % service_name, level='debug') self.logger.log('Matchers:', level='debug') service_changed_files, _ = self.match_regexes(diff_files, watch_regexes) if 'ignore_paths' in service['git']: ignore_regexes = [self.sanitize_regex(matcher) for matcher in service['git']['ignore_paths']] _, service_changed_files = self.match_regexes(service_changed_files, ignore_regexes) if len(service_changed_files) > 0: self.logger.log('\nChanged Files:', level='debug') self.logger.log('\n'.join(service_changed_files), level='debug')
#!/usr/bin/python3.5 class Playing: clicks = 9 def __init__(self, random, app, who_winner): self.app = app self.random = random self.who_winner = who_winner self.first_player = random.randint(1,2)# فعلا از این قابلیت استفاده نشود self.check_button_list = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B9'] def onclick_event_B1(self): if self.clicks%2 == 1: self.app.B1['text'] = 'X' self.app.B1['bg'] = 'blue' self.clicks =self.clicks - 1 self.check_button_list.remove('B1') self.who_winner.performance() def onclick_event_B2(self): if self.clicks%2 == 1: self.app.B2['text'] = 'X' self.app.B2['bg'] = 'blue' self.clicks =self.clicks - 1 self.check_button_list.remove('B2') self.who_winner.performance() def onclick_event_B3(self): if self.clicks%2 == 1: self.app.B3['text'] = 'X' self.app.B3['bg'] = 'blue' self.clicks =self.clicks - 1 self.check_button_list.remove('B3') self.who_winner.performance() def onclick_event_B4(self): if self.clicks%2 == 1: self.app.B4['text'] = 'X' self.app.B4['bg'] = 'blue' self.clicks =self.clicks - 1 self.check_button_list.remove('B4') self.who_winner.performance() def onclick_event_B5(self): if self.clicks%2 == 1: self.app.B5['text'] = 'X' self.app.B5['bg'] = 'blue' self.clicks =self.clicks - 1 self.check_button_list.remove('B5') self.who_winner.performance() def onclick_event_B6(self): if self.clicks%2 == 1: self.app.B6['text'] = 'X' self.app.B6['bg'] = 'blue' self.clicks =self.clicks - 1 self.check_button_list.remove('B6') self.who_winner.performance() def onclick_event_B7(self): if self.clicks%2 == 1: self.app.B7['text'] = 'X' self.app.B7['bg'] = 'blue' self.clicks =self.clicks - 1 self.check_button_list.remove('B7') self.who_winner.performance() def onclick_event_B8(self): if self.clicks%2 == 1: self.app.B8['text'] = 'X' self.app.B8['bg'] = 'blue' self.clicks =self.clicks - 1 self.check_button_list.remove('B8') self.who_winner.performance() def onclick_event_B9(self): if self.clicks%2 == 1: self.app.B9['text'] = 'X' self.app.B9['bg'] = 'blue' self.clicks =self.clicks - 1 self.check_button_list.remove('B9') self.who_winner.performance() def compyter_event(self): # ways to win computer if ((self.app.B1['text'] == self.app.B2['text'] == 'O') and (self.app.B3['text'] == '')) and (self.clicks%2 == 0):# row1.1 self.app.B3['text'] = 'O' self.app.B3['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B3') self.who_winner.performance() elif ((self.app.B4['text'] == self.app.B5['text'] == 'O') and (self.app.B6['text'] == '')) and (self.clicks%2 == 0):# row2.1 self.app.B6['text'] = 'O' self.app.B6['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B6') self.who_winner.performance() elif ((self.app.B7['text'] == self.app.B8['text'] == 'O') and (self.app.B9['text'] == '')) and (self.clicks%2 == 0):# row3.1 self.app.B9['text'] = 'O' self.app.B9['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B9') self.who_winner.performance() elif ((self.app.B2['text'] == self.app.B3['text'] == 'O') and (self.app.B1['text'] == '')) and (self.clicks%2 == 0):# row1.2 self.app.B1['text'] = 'O' self.app.B1['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B1') self.who_winner.performance() elif ((self.app.B5['text'] == self.app.B6['text'] == 'O') and (self.app.B4['text'] == '')) and (self.clicks%2 == 0):# row2.2 self.app.B4['text'] = 'O' self.app.B4['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B4') self.who_winner.performance() elif ((self.app.B8['text'] == self.app.B9['text'] == 'O') and (self.app.B7['text'] == '')) and (self.clicks%2 == 0):# row3.2 self.app.B7['text'] = 'O' self.app.B7['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B7') self.who_winner.performance() elif ((self.app.B1['text'] == self.app.B3['text'] == 'O') and (self.app.B2['text'] == '')) and (self.clicks%2 == 0):# row1.3 self.app.B2['text'] = 'O' self.app.B2['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B2') self.who_winner.performance() elif ((self.app.B4['text'] == self.app.B6['text'] == 'O') and (self.app.B5['text'] == '')) and (self.clicks%2 == 0):# row2.3 self.app.B5['text'] = 'O' self.app.B5['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B5') self.who_winner.performance() elif ((self.app.B7['text'] == self.app.B9['text'] == 'O') and (self.app.B8['text'] == '')) and (self.clicks%2 == 0):# row3.3 self.app.B8['text'] = 'O' self.app.B8['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B8') self.who_winner.performance() elif ((self.app.B1['text'] == self.app.B4['text'] == 'O') and (self.app.B7['text'] == '')) and (self.clicks%2 == 0):# column1.1 self.app.B7['text'] = 'O' self.app.B7['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B7') self.who_winner.performance() elif ((self.app.B2['text'] == self.app.B5['text'] == 'O') and (self.app.B8['text'] == '')) and (self.clicks%2 == 0):# column1.2 self.app.B8['text'] = 'O' self.app.B8['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B8') self.who_winner.performance() elif ((self.app.B3['text'] == self.app.B6['text'] == 'O') and (self.app.B9['text'] == '')) and (self.clicks%2 == 0):# column1.3 self.app.B9['text'] = 'O' self.app.B9['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B9') self.who_winner.performance() elif ((self.app.B4['text'] == self.app.B7['text'] == 'O') and (self.app.B1['text'] == '')) and (self.clicks%2 == 0):# column1.2 self.app.B1['text'] = 'O' self.app.B1['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B1') self.who_winner.performance() elif ((self.app.B5['text'] == self.app.B8['text'] == 'O') and (self.app.B2['text'] == '')) and (self.clicks%2 == 0):# column2.2 self.app.B2['text'] = 'O' self.app.B2['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B2') self.who_winner.performance() elif ((self.app.B6['text'] == self.app.B9['text'] == 'O') and (self.app.B3['text'] == '')) and (self.clicks%2 == 0):# column3.2 self.app.B3['text'] = 'O' self.app.B3['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B3') self.who_winner.performance() elif ((self.app.B1['text'] == self.app.B7['text'] == 'O') and (self.app.B4['text'] == '')) and (self.clicks%2 == 0):# column1.3 self.app.B4['text'] = 'O' self.app.B4['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B4') self.who_winner.performance() elif ((self.app.B2['text'] == self.app.B8['text'] == 'O') and (self.app.B5['text'] == '')) and (self.clicks%2 == 0):# column2.3 self.app.B5['text'] = 'O' self.app.B5['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B5') self.who_winner.performance() elif ((self.app.B3['text'] == self.app.B9['text'] == 'O') and (self.app.B6['text'] == '')) and (self.clicks%2 == 0):# column3.3 self.app.B6['text'] = 'O' self.app.B6['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B6') self.who_winner.performance() elif ((self.app.B1['text'] == self.app.B5['text'] == 'O') and (self.app.B9['text'] == '')) and (self.clicks%2 == 0):# diameter1.1 self.app.B9['text'] = 'O' self.app.B9['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B9') self.who_winner.performance() elif ((self.app.B3['text'] == self.app.B5['text'] == 'O') and (self.app.B7['text'] == '')) and (self.clicks%2 == 0):# diameter2.1 self.app.B7['text'] = 'O' self.app.B7['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B7') self.who_winner.performance() elif ((self.app.B5['text'] == self.app.B9['text'] == 'O') and (self.app.B1['text'] == '')) and (self.clicks%2 == 0):# diameter1.2 self.app.B1['text'] = 'O' self.app.B1['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B1') self.who_winner.performance() elif ((self.app.B5['text'] == self.app.B7['text'] == 'O') and (self.app.B3['text'] == '')) and (self.clicks%2 == 0):# diameter2.2 self.app.B3['text'] = 'O' self.app.B3['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B3') self.who_winner.performance() # ways to lose computer elif ((self.app.B1['text'] == self.app.B2['text'] == 'X') and (self.app.B3['text'] == '')) and (self.clicks%2==0):# row1.1 self.app.B3['text'] = 'O' self.app.B3['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B3') self.who_winner.performance() elif ((self.app.B4['text'] == self.app.B5['text'] == 'X') and (self.app.B6['text'] == '')) and (self.clicks%2 == 0):# row2.1 self.app.B6['text'] = 'O' self.app.B6['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B6') self.who_winner.performance() elif ((self.app.B7['text'] == self.app.B8['text'] == 'X') and (self.app.B9['text'] == '')) and (self.clicks%2 == 0):# row3.1 self.app.B9['text'] = 'O' self.app.B9['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B9') self.who_winner.performance() elif ((self.app.B2['text'] == self.app.B3['text'] == 'X') and (self.app.B1['text'] == '')) and (self.clicks%2 == 0):# row1.2 self.app.B1['text'] = 'O' self.app.B1['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B1') self.who_winner.performance() elif ((self.app.B5['text'] == self.app.B6['text'] == 'X') and (self.app.B4['text'] == '')) and (self.clicks%2 == 0):# row2.2 self.app.B4['text'] = 'O' self.app.B4['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B4') self.who_winner.performance() elif ((self.app.B8['text'] == self.app.B9['text'] == 'X') and (self.app.B7['text'] == '')) and (self.clicks%2 == 0):# row3.2 self.app.B7['text'] = 'O' self.app.B7['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B7') self.who_winner.performance() elif ((self.app.B1['text'] == self.app.B3['text'] == 'X') and (self.app.B2['text'] == '')) and (self.clicks%2 == 0):# row1.3 self.app.B2['text'] = 'O' self.app.B2['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B2') self.who_winner.performance() elif ((self.app.B4['text'] == self.app.B6['text'] == 'X') and (self.app.B5['text'] == '')) and (self.clicks%2 == 0):# row2.3 self.app.B5['text'] = 'O' self.app.B5['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B5') self.who_winner.performance() elif ((self.app.B7['text'] == self.app.B9['text'] == 'X') and (self.app.B8['text'] == '')) and (self.clicks%2 == 0):# row3.3 self.app.B8['text'] = 'O' self.app.B8['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B8') self.who_winner.performance() elif ((self.app.B1['text'] == self.app.B4['text'] == 'X') and (self.app.B7['text'] == '')) and (self.clicks%2 == 0):# column1.1 self.app.B7['text'] = 'O' self.app.B7['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B7') self.who_winner.performance() elif ((self.app.B2['text'] == self.app.B5['text'] == 'X') and (self.app.B8['text'] == '')) and (self.clicks%2 == 0):# column1.2 self.app.B8['text'] = 'O' self.app.B8['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B8') self.who_winner.performance() elif ((self.app.B3['text'] == self.app.B6['text'] == 'X') and (self.app.B9['text'] == '')) and (self.clicks%2 == 0):# column1.3 self.app.B9['text'] = 'O' self.app.B9['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B9') self.who_winner.performance() elif ((self.app.B4['text'] == self.app.B7['text'] == 'X') and (self.app.B1['text'] == '')) and (self.clicks%2 == 0):# column1.2 self.app.B1['text'] = 'O' self.app.B1['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B1') self.who_winner.performance() elif ((self.app.B5['text'] == self.app.B8['text'] == 'X') and (self.app.B2['text'] == '')) and (self.clicks%2 == 0):# column2.2 self.app.B2['text'] = 'O' self.app.B2['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B2') self.who_winner.performance() elif ((self.app.B6['text'] == self.app.B9['text'] == 'X') and (self.app.B3['text'] == '')) and (self.clicks%2 == 0):# column3.2 self.app.B3['text'] = 'O' self.app.B3['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B3') self.who_winner.performance() elif ((self.app.B1['text'] == self.app.B7['text'] == 'X') and (self.app.B4['text'] == '')) and (self.clicks%2 == 0):# column1.3 self.app.B4['text'] = 'O' self.app.B4['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B4') self.who_winner.performance() elif ((self.app.B2['text'] == self.app.B8['text'] == 'X') and (self.app.B5['text'] == '')) and (self.clicks%2 == 0):# column2.3 self.app.B5['text'] = 'O' self.app.B5['bg'] = 'red' self.clicks =self.clicks - 1 self.check_button_list.remove('B5') self.who_winner.performance() elif ((self.app.B3['text'] == self.app.B9['text'] == 'X') and (self.app.B6['text'] == '')) and (self.clicks%2 == 0):# column3.3 self.app.B6['text']
<filename>dorado/lagrangian_walker.py<gh_stars>0 # -*- coding: utf-8 -*- """ Core functions to handle the Lagrangian random walk movement of the particles. Project Homepage: https://github.com/passaH2O/dorado """ from __future__ import division, print_function, absolute_import from builtins import range, map from math import cos import numpy as np from numpy.random import random def random_pick_seed(choices, probs=None): """Randomly pick a number from array of choices. **Inputs** : choices : `ndarray` Array of possible values to draw from probs : `ndarray` *Optional*, can add weighted probabilities to draw **Outputs** : choices[idx] : `int` The randomly chosen value """ # randomly pick tracer drop cell to use given a list of potential spots if not probs: probs = np.array([1 for i in list(range(len(choices)))]) # find the corresp. index value from input 'choices' list of indices cutoffs = np.cumsum(probs) idx = cutoffs.searchsorted(np.random.uniform(0, cutoffs[-1])) return choices[idx] def get_weight(Particles, ind): """Assign weights to cells surrounding current index. Function to assign weights to the surrounding 8 cells around the current index and randomly choose one of those cells. **Inputs** : Particles : :obj:`dorado.particle_track.Particles` A :obj:`dorado.particle_track.Particles` object ind : `tuple` Tuple (x,y) with the current location indices **Outputs** : new_cell : `int` New location given as a value between 1 and 8 (inclusive) """ # pull surrounding cell values from stage array stage_ind = Particles.stage[ind[0]-1:ind[0]+2, ind[1]-1:ind[1]+2] # define water surface gradient weight component (minimum of 0) weight_sfc = np.maximum(0, (Particles.stage[ind] - stage_ind) / Particles.distances) # define flow inertial weighting component (minimum of 0) weight_int = np.maximum(0, (Particles.qx[ind] * Particles.jvec + Particles.qy[ind] * Particles.ivec) / Particles.distances) # pull surrounding cell values from depth and cell type arrays depth_ind = Particles.depth[ind[0]-1:ind[0]+2, ind[1]-1:ind[1]+2] ct_ind = Particles.cell_type[ind[0]-1:ind[0]+2, ind[1]-1:ind[1]+2] # if the depth is below minimum depth for cell to be weight # or it is a cell type that is not water, then make it impossible for # the parcel to travel there by setting associated weight to 0 weight_sfc[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0 weight_int[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0 # if sum of weights is above 0 normalize by sum of weights if np.nansum(weight_sfc) > 0: weight_sfc = weight_sfc / np.nansum(weight_sfc) # if sum of weight is above 0 normalize by sum of weights if np.nansum(weight_int) > 0: weight_int = weight_int / np.nansum(weight_int) # define actual weight by using gamma, and defined weight components Particles.weight = Particles.gamma * weight_sfc + \ (1 - Particles.gamma) * weight_int # modify the weight by the depth and theta weighting parameter Particles.weight = depth_ind ** Particles.theta * Particles.weight # if the depth is below the minimum depth then location is not # considered therefore set the associated weight to nan Particles.weight[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] \ = np.nan # if it's a dead end with only nans and 0's, choose deepest cell if np.nansum(Particles.weight) <= 0: Particles.weight = np.zeros_like(Particles.weight) Particles.weight[depth_ind == np.max(depth_ind)] = 1.0 # randomly pick the new cell for the particle to move to using the # random_pick function and the set of weights just defined if Particles.steepest_descent is not True: new_cell = random_pick(Particles.weight) elif Particles.steepest_descent is True: new_cell = steep_descent(Particles.weight) return new_cell def calculate_new_ind(ind, new_cell, iwalk, jwalk): """Add new cell location (1-8 value) to the previous index. **Inputs** : ind : `tuple` Tuple (x,y) of old particle location new_cell : `int` Integer 1-8 indicating new cell location relative to the old one in a D-8 sense iwalk : `ndarray` A D8 array with the positive and negative x directions jwalk : `ndarray` A D8 array with the positive and negative y directions **Outputs** : new_ind : `tuple` tuple (x,y) of the new particle location """ # add the index and the flattened x and y walk component # x,y walk component is related to the next cell chosen as a # 1-8 location new_ind = (ind[0] + jwalk.flat[new_cell], ind[1] + iwalk.flat[new_cell]) return new_ind def step_update(new_cell, distances, dx): """Get distance to new particle location. Function to check new location is some distance away from old one, also provides way to track the travel distance of the particles **Inputs** : new_cell : `int` Integer 1-8 indicating new location in D-8 way distances : `ndarray` D8 distances between cells dx : `float` Length along one square cell face **Outputs** : dist : `float` Distance between current (old) and new particle location """ # compute the step distance to be taken dist = distances.flat[new_cell]*float(dx) return dist def calc_travel_times(Particles, new_cell, ind, new_ind, dist): """Calculate travel time for particle to get to the new location. Function to calculate the travel time for the particle to get from the current location to the new location. Calculated by taking the inverse of the velocity at the old and new locations. **Inputs** : Particles : :obj:`dorado.particle_track.Particles` A :obj:`dorado.particle_track.Particles` object new_cell : `int` Integer 1-8 indicating new location in D-8 way ind : `tuple` Tuple (x,y) of the current location new_ind : `tuple` Tuple (x,y) of the new location dist : `float` Distance between current (old) and new particle location **Outputs** : trav_time : `float` Travel time it takes the particle to get from the current location to the new proposed location using the inverse of the average velocity """ # make sure the new location is different from the current one if ind != new_ind: # get old position velocity value old_vel = Particles.velocity[ind[0], ind[1]] # new position velocity value new_vel = Particles.velocity[new_ind[0], new_ind[1]] # Compute diffusion term # (sample uniform distribution centered at 0 and diff_coeff wide) if Particles.diff_coeff > 0: diff = (0.5 - random())*Particles.diff_coeff else: diff = 0.0 # Compute distance traveled in the orientation of mean flow path # If we moved backwards/orthogonal, step was instantaneous projected_dist = dist*max(0, cos(Particles.angles.flat[new_cell] - Particles.velocity_angle[ind[0], ind[1]])) # Compute average velocity over step trav_time = 0.5*projected_dist*(1/old_vel + 1/new_vel)*(1+diff) else: trav_time = 0 # particle did not move return trav_time def check_for_boundary(new_inds, current_inds, cell_type): """Ensure new location is not a boundary cell. Function to make sure particle is not exiting the boundary with the proposed new location. **Inputs** : new_inds : `list` List [] of tuples (x,y) of new indices current_inds : `list` List [] of tuples (x,y) of old indices cell_type : `numpy.ndarray` Array of the different types of cells in the domain where 2 = land, 1 = channel, 0 = ocean, and -1 = edge. If not explicitly defined then the values are estimated based on the depth array and the defined dry_depth **Outputs** : new_inds : `list` list [] of tuples (x,y) of new indices where any proposed indices outside of the domain have been replaced by the old indices so those particles will not travel this iteration """ # Check if the new indices are on an edge (type==-1) # If so, then stop moving particle for i in range(0, len(new_inds)): # If particle borders an edge, cancel out any additional steps if -1 in cell_type[current_inds[i][0]-1:current_inds[i][0]+2, current_inds[i][1]-1:current_inds[i][1]+2]: new_inds[i][0] = current_inds[i][0] new_inds[i][1] = current_inds[i][1] return new_inds def random_pick(probs): """Pick value from weighted probability array. Randomly pick a number weighted by array probs (len 8) Return the index of the selected weight in array probs **Inputs** : probs : `list` 8 values indicating the probability (weight) associated with the surrounding cells for the random walk **Outputs** : idx : `int` 1-8 value chosen randomly based on the weighted probabilities """ probs[np.isnan(probs)] = 0 # any nans are assigned as 0 cutoffs = np.cumsum(probs) # cumulative sum of all probabilities # randomly pick indices from cutoffs based on uniform distribution idx = cutoffs.searchsorted(np.random.uniform(0, cutoffs[-1])) return idx def steep_descent(probs): """Choose value with greatest probability, no longer random. Pick the array value with the greatest probability, no longer a stochastic process, instead just choosing the steepest descent **Inputs** : probs : `float` 8 values indicating probability (weight) associated with the surrounding cells **Outputs** : idx : `int` 1-8 value chosen by greatest probs """ max_val = np.nanmax(probs) #
MA', 'pt': 'Lajeado Novo - MA'}, '55993586':{'en': 'Ribamar Fiquene - MA', 'pt': 'Ribamar Fiquene - MA'}, '55993587':{'en': u('S\u00e3o Francisco do Brej\u00e3o - MA'), 'pt': u('S\u00e3o Francisco do Brej\u00e3o - MA')}, '55993592':{'en': u('A\u00e7ail\u00e2ndia - MA'), 'pt': u('A\u00e7ail\u00e2ndia - MA')}, '55993601':{'en': u('Feira Nova do Maranh\u00e3o - MA'), 'pt': u('Feira Nova do Maranh\u00e3o - MA')}, '55993602':{'en': 'Nova Colinas - MA', 'pt': 'Nova Colinas - MA'}, '55993604':{'en': u('S\u00e3o Pedro dos Crentes - MA'), 'pt': u('S\u00e3o Pedro dos Crentes - MA')}, '55993613':{'en': u('Graja\u00fa - MA'), 'pt': u('Graja\u00fa - MA')}, '55993614':{'en': u('Itaipava do Graja\u00fa - MA'), 'pt': u('Itaipava do Graja\u00fa - MA')}, '55993621':{'en': 'Bacabal - MA', 'pt': 'Bacabal - MA'}, '55993622':{'en': 'Bacabal - MA', 'pt': 'Bacabal - MA'}, '55993623':{'en': 'Bom Lugar - MA', 'pt': 'Bom Lugar - MA'}, '55993626':{'en': 'Pedreiras - MA', 'pt': 'Pedreiras - MA'}, '55993627':{'en': 'Bacabal - MA', 'pt': 'Bacabal - MA'}, '55993631':{'en': u('S\u00e3o Lu\u00eds Gonzaga do Maranh\u00e3o - MA'), 'pt': u('S\u00e3o Lu\u00eds Gonzaga do Maranh\u00e3o - MA')}, '55993632':{'en': '<NAME> - MA', 'pt': 'Lago dos Rodrigues - MA'}, '55993633':{'en': u('Lagoa Grande do Maranh\u00e3o - MA'), 'pt': u('Lagoa Grande do Maranh\u00e3o - MA')}, '55993634':{'en': 'Lago do Junco - MA', 'pt': 'Lago do Junco - MA'}, '55993635':{'en': '<NAME> - MA', 'pt': 'Lago Verde - MA'}, '55993636':{'en': u('Po\u00e7\u00e3o de Pedras - MA'), 'pt': u('Po\u00e7\u00e3o de Pedras - MA')}, '55993637':{'en': u('Josel\u00e2ndia - MA'), 'pt': u('Josel\u00e2ndia - MA')}, '55993638':{'en': u('Alto Alegre do Maranh\u00e3o - MA'), 'pt': u('Alto Alegre do Maranh\u00e3o - MA')}, '55993639':{'en': u('S\u00e3o Mateus do Maranh\u00e3o - MA'), 'pt': u('S\u00e3o Mateus do Maranh\u00e3o - MA')}, '55993641':{'en': u('Coroat\u00e1 - MA'), 'pt': u('Coroat\u00e1 - MA')}, '55993642':{'en': 'Pedreiras - MA', 'pt': 'Pedreiras - MA'}, '55993643':{'en': 'Barra do Corda - MA', 'pt': 'Barra do Corda - MA'}, '55993644':{'en': 'Lago da Pedra - MA', 'pt': 'Lago da Pedra - MA'}, '55993645':{'en': u('Esperantin\u00f3polis - MA'), 'pt': u('Esperantin\u00f3polis - MA')}, '55993646':{'en': 'Lima Campos - MA', 'pt': 'Lima Campos - MA'}, '55993647':{'en': u('Igarap\u00e9 Grande - MA'), 'pt': u('Igarap\u00e9 Grande - MA')}, '55993648':{'en': '<NAME> Mearim - MA', 'pt': 'B<NAME> Mearim - MA'}, '55993649':{'en': u('Peritor\u00f3 - MA'), 'pt': u('Peritor\u00f3 - MA')}, '55993661':{'en': u('Cod\u00f3 - MA'), 'pt': u('Cod\u00f3 - MA')}, '55993662':{'en': '<NAME> - MA', 'pt': 'Dom Pedro - MA'}, '55993663':{'en': 'Presidente Dutra - MA', 'pt': 'Presidente Dutra - MA'}, '55993665':{'en': 'Capinzal do Norte - MA', 'pt': 'Capinzal do Norte - MA'}, '55993666':{'en': u('Santo Ant\u00f4nio dos Lopes - MA'), 'pt': u('Santo Ant\u00f4nio dos Lopes - MA')}, '55993667':{'en': 'Governador Archer - MA', 'pt': 'Governador Archer - MA'}, '55993668':{'en': 'Timbiras - MA', 'pt': 'Timbiras - MA'}, '55994102':{'en': 'Imperatriz - MA', 'pt': 'Imperatriz - MA'}, '56211':{'en': 'Santiago, Metropolitan Region', 'es': u('Santiago, Regi\u00f3n Metropolitana')}, '562198':{'en': 'Santiago, Metropolitan Region', 'es': u('Santiago, Regi\u00f3n Metropolitana')}, '5622':{'en': 'Santiago, Metropolitan Region', 'es': u('Santiago, Regi\u00f3n Metropolitana')}, '5623':{'en': 'Santiago, Metropolitan Region', 'es': u('Santiago, Regi\u00f3n Metropolitana')}, '5632':{'en': u('Valpara\u00edso'), 'es': u('Valpara\u00edso')}, '5633':{'en': u('Quillota, Valpara\u00edso'), 'es': u('Quillota, Valpara\u00edso')}, '5634':{'en': u('San Felipe, Valpara\u00edso'), 'es': u('San Felipe, Valpara\u00edso')}, '5635':{'en': u('San Antonio, Valpara\u00edso'), 'es': u('San Antonio, Valpara\u00edso')}, '5641':{'en': u('Concepci\u00f3n, Biob\u00edo'), 'es': u('Concepci\u00f3n, Biob\u00edo')}, '5642':{'en': u('Chill\u00e1n, Biob\u00edo'), 'es': u('Chill\u00e1n, Biob\u00edo')}, '5643':{'en': u('Los Angeles, Biob\u00edo'), 'es': u('Los Angeles, Biob\u00edo')}, '5645':{'en': u('Temuco, Araucan\u00eda'), 'es': u('Temuco, Araucan\u00eda')}, '5651':{'en': 'La Serena, Coquimbo', 'es': 'La Serena, Coquimbo'}, '5652':{'en': u('Copiap\u00f3, Atacama'), 'es': u('Copiap\u00f3, Atacama')}, '56530':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56531':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '565320':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '565321':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '565322':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '565323':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5653240':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5653241':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5653242':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5653243':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5653244':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56532452':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56532453':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56532454':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56532455':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56532456':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56532457':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56532458':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56532459':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5653246':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5653247':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5653248':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5653249':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '565325':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '565326':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '565327':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '565328':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '565329':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56533':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56534':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56535':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56536':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56537':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56538':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '56539':{'en': 'Ovalle, Coquimbo', 'es': 'Ovalle, Coquimbo'}, '5655':{'en': 'Antofagasta', 'es': 'Antofagasta'}, '5657':{'en': u('Iquique, Tarapac\u00e1'), 'es': u('Iquique, Tarapac\u00e1')}, '5658':{'en': 'Arica, Arica and Parinacota', 'es': 'Arica, Arica y Parinacota'}, '5661':{'en': u('Punta Arenas, Magallanes and Ant\u00e1rtica Chilena'), 'es': 'Punta Arenas, Magallanes'}, '5663':{'en': u('Valdivia, Los R\u00edos'), 'es': u('Valdivia, Los R\u00edos')}, '5664':{'en': 'Osorno, Los Lagos', 'es': 'Osorno, Los Lagos'}, '5665':{'en': 'Puerto Montt, Los Lagos', 'es': 'Puerto Montt, Los Lagos'}, '5667':{'en': u('Coyhaique, Ais\u00e9n'), 'es': u('Coihaique, Ays\u00e9n')}, '5671':{'en': 'Talca, Maule', 'es': 'Talca, Maule'}, '5672':{'en': 'Rancagua, O\'Higgins', 'es': 'Rancagua, O\'Higgins'}, '5673':{'en': 'Linares, Maule', 'es': 'Linares, Maule'}, '5675':{'en': u('Curic\u00f3, Maule'), 'es': u('Curic\u00f3, Maule')}, '5712':{'en': u('Bogot\u00e1'), 'es': u('Bogot\u00e1')}, '5713':{'en': u('Bogot\u00e1'), 'es': u('Bogot\u00e1')}, '5714':{'en': u('Bogot\u00e1'), 'es': u('Bogot\u00e1')}, '5715':{'en': u('Bogot\u00e1'), 'es': u('Bogot\u00e1')}, '5716':{'en': u('Bogot\u00e1'), 'es': u('Bogot\u00e1')}, '5717':{'en': u('Bogot\u00e1'), 'es': u('Bogot\u00e1')}, '571820':{'en': 'Madrid', 'es': 'Madrid'}, '571821':{'en': 'Funza', 'es': 'Funza'}, '571822':{'en': 'Funza', 'es': 'Funza'}, '5718230':{'en': 'Subachoque', 'es': 'Subachoque'}, '5718232':{'en': 'Funza', 'es': 'Funza'}, '5718240':{'en': 'El Rosal', 'es': 'El Rosal'}, '5718241':{'en': 'El Rosal', 'es': 'El Rosal'}, '57182420':{'en': 'La Pradera', 'es': 'La Pradera'}, '57182428':{'en': 'Subachoque', 'es': 'Subachoque'}, '57182429':{'en': 'Subachique', 'es': 'Subachique'}, '5718243':{'en': 'Bojaca', 'es': 'Bojaca'}, '5718245':{'en': 'Subachoque', 'es': 'Subachoque'}, '5718246':{'en': 'Puente Piedra', 'es': 'Puente Piedra'}, '5718247':{'en': 'La Punta', 'es': 'La Punta'}, '5718249':{'en': 'Zipacon', 'es': 'Zipacon'}, '5718250':{'en': 'Madrid', 'es': 'Madrid'}, '5718251':{'en': 'Madrid', 'es': 'Madrid'}, '5718252':{'en': 'Madrid', 'es': 'Madrid'}, '5718253':{'en': 'Madrid', 'es': 'Madrid'}, '5718254':{'en': 'Madrid', 'es': 'Madrid'}, '5718255':{'en': 'Madrid', 'es': 'Madrid'}, '5718256':{'en': 'Madrid', 'es': 'Madrid'}, '5718257':{'en': 'Funza', 'es': 'Funza'}, '571826':{'en': 'Funza', 'es': 'Funza'}, '571827':{'en': 'Mosquera', 'es': 'Mosquera'}, '5718283':{'en': 'Mosquera', 'es': 'Mosquera'}, '5718288':{'en': 'Madrid', 'es': 'Madrid'}, '5718289':{'en': 'Madrid', 'es': 'Madrid'}, '571830':{'en': 'Girardot', 'es': 'Girardot'}, '571831':{'en': 'Girardot', 'es': 'Girardot'}, '571832':{'en': 'Girardot', 'es': 'Girardot'}, '571833':{'en': 'Girardot', 'es': 'Girardot'}, '5718370':{'en': u('Jerusal\u00e9n'), 'es': u('Jerusal\u00e9n')}, '5718371':{'en': 'Guataqui', 'es': 'Guataqui'}, '5718373':{'en': u('Beltr\u00e1n'), 'es': u('Beltr\u00e1n')}, '5718375':{'en': u('Nari\u00f1o'), 'es': u('Nari\u00f1o')}, '5718376':{'en': 'Tocaima', 'es': 'Tocaima'}, '5718381':{'en': '<NAME>', 'es': 'Agua de Dios'}, '5718383':{'en': 'Nilo', 'es': 'Nilo'}, '5718384':{'en': 'Viota', 'es': 'Viota'}, '5718385':{'en': u('Nari\u00f1o'), 'es': u('Nari\u00f1o')}, '5718386':{'en': 'Apulo', 'es': 'Apulo'}, '57183925':{'en': 'Nilo', 'es': 'Nilo'}, '57183926':{'en': 'Nilo', 'es': 'Nilo'}, '57183927':{'en': 'Nilo', 'es': 'Nilo'}, '57183928':{'en': 'Nilo', 'es': 'Nilo'}, '57183929':{'en': 'La Esmeralda', 'es': 'La Esmeralda'}, '5718393':{'en': 'Girardot', 'es': 'Girardot'}, '5718397':{'en': 'Apulo', 'es': 'Apulo'}, '5718398':{'en': 'Apulo', 'es': 'Apulo'}, '5718402':{'en': 'San Antonio de Tequendama', 'es': 'San Antonio de Tequendama'}, '5718403':{'en': 'Choachi', 'es': 'Choachi'}, '5718404':{'en': 'Fomeque', 'es': 'Fomeque'}, '5718412':{'en': u('Santa In\u00e9s'), 'es': u('Santa In\u00e9s')}, '5718416':{'en': 'Guaduas', 'es': 'Guaduas'}, '5718417':{'en': 'Guaduas', 'es': 'Guaduas'}, '5718419':{'en': 'Pandi', 'es': 'Pandi'}, '571842':{'en': 'Facatativa', 'es': 'Facatativa'}, '5718430':{'en': 'Facatativa', 'es': 'Facatativa'}, '5718431':{'en': 'Facatativa', 'es': 'Facatativa'}, '57184330':{'en': 'Ninaima', 'es': 'Ninaima'}, '57184331':{'en': 'Ninaima', 'es': 'Ninaima'}, '57184332':{'en': 'Ninaima', 'es': 'Ninaima'}, '57184333':{'en': 'Tobia', 'es': 'Tobia'}, '57184334':{'en': 'Tobia', 'es': 'Tobia'}, '5718434':{'en': 'Cartagenita', 'es': 'Cartagenita'}, '5718435':{'en': 'Cartagenita', 'es': 'Cartagenita'}, '5718436':{'en': 'Facatativa', 'es': 'Facatativa'}, '5718437':{'en': 'Facatativa', 'es': 'Facatativa'}, '5718438':{'en': 'Facatativa', 'es': 'Facatativa'}, '5718439':{'en': 'Facatativa', 'es': 'Facatativa'}, '5718440':{'en': 'Facatativa', 'es': 'Facatativa'}, '5718441':{'en': 'Viani', 'es': 'Viani'}, '5718442':{'en': 'Cachipay', 'es': 'Cachipay'}, '5718443':{'en': 'Cachipay', 'es': 'Cachipay'}, '5718444':{'en': 'Villeta', 'es': 'Villeta'}, '5718445':{'en': 'Villeta', 'es': 'Villeta'}, '5718446':{'en': 'Villeta', 'es': 'Villeta'}, '5718447':{'en': 'Villeta', 'es': 'Villeta'}, '5718449':{'en': u('La Pe\u00f1a'), 'es': u('La Pe\u00f1a')}, '5718450':{'en': 'San Antonio de Tequendama', 'es': 'San Antonio de Tequendama'}, '5718451':{'en': 'Nocaima', 'es': 'Nocaima'}, '571845340':{'en': 'La Florida', 'es': 'La Florida'}, '571845341':{'en': 'La Florida', 'es': 'La Florida'}, '571845342':{'en': 'La Florida', 'es': 'La Florida'}, '571845343':{'en': 'La Florida', 'es': 'La Florida'}, '571845344':{'en': 'La Florida', 'es': 'La Florida'}, '571845345':{'en': 'La Florida', 'es': 'La Florida'}, '5718480':{'en': 'Quebradanegra', 'es': 'Quebradanegra'}, '5718481':{'en': 'Quebradanegra', 'es': 'Quebradanegra'}, '5718482':{'en': 'La Magdalena', 'es': 'La Magdalena'}, '57230':{'en': 'Cali', 'es': 'Cali'}, '57231':{'en': 'Cali', 'es': 'Cali'}, '57232':{'en': 'Cali', 'es': 'Cali'}, '57233':{'en': 'Cali', 'es': 'Cali'}, '57234':{'en': 'Cali', 'es': 'Cali'}, '57235':{'en': 'Cali', 'es': 'Cali'}, '57236':{'en': 'Cali', 'es': 'Cali'}, '57272':{'en': 'Pasto', 'es': 'Pasto'}, '57273':{'en': 'Pasto', 'es': 'Pasto'}, '57288':{'en': 'Cali', 'es': 'Cali'}, '57289':{'en':
with ' 'repositories that support server-side ' 'changesets.', }, 'submit_as': { 'type': str, 'description': 'The optional user to submit the review ' 'request as. This requires that the actual ' 'logged in user is either a superuser or has ' 'the "reviews.can_submit_as_another_user" ' 'permission.', }, }) def create(self, request, repository, submit_as=None, changenum=None, local_site_name=None, *args, **kwargs): """Creates a new review request. The new review request will start off as private and pending, and will normally be blank. However, if ``changenum`` is passed and the given repository both supports server-side changesets and has changeset support in Review Board, some details (Summary, Description and Testing Done sections, for instance) may be automatically filled in from the server. Any new review request will have an associated draft (reachable through the ``draft`` link). All the details of the review request must be set through the draft. The new review request will be public when that first draft is published. The only requirement when creating a review request is that a valid repository is passed. This can be a numeric repository ID, the name of a repository, or the path to a repository (matching exactly the registered repository's Path or Mirror Path fields in the adminstration interface). Failing to pass a valid repository will result in an error. Clients can create review requests on behalf of another user by setting the ``submit_as`` parameter to the username of the desired user. This requires that the client is currently logged in as a user that has the ``reviews.can_submit_as_another_user`` permission set. This capability is useful when writing automation scripts, such as post-commit hooks, that need to create review requests for another user. """ user = request.user local_site = _get_local_site(local_site_name) if submit_as and user.username != submit_as: if not user.has_perm('reviews.can_submit_as_another_user'): return _no_access_error(request.user) try: user = User.objects.get(username=submit_as) except User.DoesNotExist: return INVALID_USER try: try: repository = Repository.objects.get(pk=int(repository), local_site=local_site) except ValueError: # The repository is not an ID. repository = Repository.objects.get( (Q(path=repository) | Q(mirror_path=repository) | Q(name=repository)) & Q(local_site=local_site)) except Repository.DoesNotExist, e: return INVALID_REPOSITORY, { 'repository': repository } if not repository.is_accessible_by(request.user): return _no_access_error(request.user) try: review_request = ReviewRequest.objects.create(user, repository, changenum, local_site) return 201, { self.item_result_key: review_request } except AuthenticationError: return REPO_AUTHENTICATION_ERROR except RepositoryNotFoundError: return MISSING_REPOSITORY except ChangeNumberInUseError, e: return CHANGE_NUMBER_IN_USE, { 'review_request': e.review_request } except InvalidChangeNumberError: return INVALID_CHANGE_NUMBER except EmptyChangeSetError: return EMPTY_CHANGESET except SCMError, e: logging.error("Got unexpected SCMError when creating repository: %s" % e, exc_info=1) return REPO_INFO_ERROR @webapi_check_local_site @webapi_login_required @webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED) @webapi_request_fields( optional={ 'status': { 'type': ('discarded', 'pending', 'submitted'), 'description': 'The status of the review request. This can ' 'be changed to close or reopen the review ' 'request', }, 'changenum': { 'type': int, 'description': 'The optional changenumber to set or update. ' 'This can be used to re-associate with a new ' 'change number, or to create/update a draft ' 'with new information from the current ' 'change number. This only works with ' 'repositories that support server-side ' 'changesets.', }, 'description': { 'type': str, 'description': 'The description of the update. Should only be ' 'used if the review request have been submitted ' 'or discarded.', }, }, ) def update(self, request, status=None, changenum=None, description=None, *args, **kwargs): """Updates the status of the review request. The only supported update to a review request's resource is to change the status, the associated server-side, change number, or to update information from the existing change number. The status can be set in order to close the review request as discarded or submitted, or to reopen as pending. The change number can either be changed to a new number, or the current change number can be passed. In either case, a new draft will be created or an existing one updated to include information from the server based on the change number. Changes to a review request's fields, such as the summary or the list of reviewers, is made on the Review Request Draft resource. This can be accessed through the ``draft`` link. Only when that draft is published will the changes end up back in this resource. """ try: review_request = \ review_request_resource.get_object(request, *args, **kwargs) except ObjectDoesNotExist: return DOES_NOT_EXIST if not self.has_modify_permissions(request, review_request): return _no_access_error(request.user) if (status is not None and (review_request.status != string_to_status(status) or review_request.status != ReviewRequest.PENDING_REVIEW)): try: if status in self._close_type_map: review_request.close(self._close_type_map[status], request.user, description) elif status == 'pending': review_request.reopen(request.user) else: raise AssertionError("Code path for invalid status '%s' " "should never be reached." % status) except PermissionError: return _no_access_error(request.user) if changenum is not None: if changenum != review_request.changenum: review_request.update_changenum(changenum, request.user) try: draft = ReviewRequestDraftResource.prepare_draft( request, review_request) except PermissionDenied: return PERMISSION_DENIED try: draft.update_from_changenum(changenum) except InvalidChangeNumberError: return INVALID_CHANGE_NUMBER draft.save() review_request.reopen() return 200, { self.item_result_key: review_request, } @webapi_check_local_site @augment_method_from(WebAPIResource) def delete(self, *args, **kwargs): """Deletes the review request permanently. This is a dangerous call to make, as it will delete the review request, associated screenshots, diffs, and reviews. There is no going back after this call is made. Only users who have been granted the ``reviews.delete_reviewrequest`` permission (which includes administrators) can perform a delete on the review request. After a successful delete, this will return :http:`204`. """ pass @webapi_check_local_site @webapi_request_fields( optional={ 'changenum': { 'type': str, 'description': 'The change number the review requests must ' 'have set. This will only return one review ' 'request per repository, and only works for ' 'repository types that support server-side ' 'changesets.', }, 'time-added-to': { 'type': str, 'description': 'The date/time that all review requests must ' 'be added before. This is compared against the ' 'review request\'s ``time_added`` field. This ' 'must be a valid :term:`date/time format`.', }, 'time-added-from': { 'type': str, 'description': 'The earliest date/time the review request ' 'could be added. This is compared against the ' 'review request\'s ``time_added`` field. This ' 'must be a valid :term:`date/time format`.', }, 'last-updated-to': { 'type': str, 'description': 'The date/time that all review requests must ' 'be last updated before. This is compared ' 'against the review request\'s ' '``last_updated`` field. This must be a valid ' ':term:`date/time format`.', }, 'last-updated-from': { 'type': str, 'description': 'The earliest date/time the review request ' 'could be last updated. This is compared ' 'against the review request\'s ``last_updated`` ' 'field. This must be a valid ' ':term:`date/time format`.', }, 'from-user': { 'type': str, 'description': 'The username that the review requests must ' 'be owned by.', }, 'repository': { 'type': int, 'description': 'The ID of the repository that the review ' 'requests must be on.', }, 'ship-it': { 'type': bool, 'description': 'The review request must have at least one ' 'review with Ship It set, if this is 1. ' 'Otherwise, if 0, it must not have any marked ' 'Ship It.', }, 'status': { 'type': ('all', 'discarded', 'pending', 'submitted'), 'description': 'The status of the review requests.' }, 'to-groups': { 'type': str, 'description': 'A comma-separated list of review group names ' 'that the review requests must have in the ' 'reviewer list.', }, 'to-user-groups': { 'type': str, 'description': 'A comma-separated list of usernames who are ' 'in groups that the review requests must have ' 'in the reviewer list.', }, 'to-users': { 'type': str, 'description': 'A comma-separated list of usernames that the ' 'review requests must either have in the ' 'reviewer list specifically or by way of ' 'a group.', }, 'to-users-directly': { 'type': str, 'description': 'A comma-separated list of usernames that the ' 'review requests must have in the reviewer ' 'list specifically.', } }, allow_unknown=True ) @augment_method_from(WebAPIResource) def get_list(self, *args, **kwargs): """Returns all review requests that the user has read access to. By default, this returns all published or formerly published review requests. The resulting list can be filtered down through the many request parameters. """ pass @augment_method_from(WebAPIResource) def get(self, *args, **kwargs): """Returns information on a particular review request. This contains full information on the latest published review request. If the review request is not public, then
<reponame>kperrynrel/rdtools '''Functions for normalizing, rescaling, and regularizing PV system data.''' import pandas as pd import pvlib import numpy as np from scipy.optimize import minimize import warnings from rdtools._deprecation import deprecated class ConvergenceError(Exception): '''Rescale optimization did not converge''' pass def normalize_with_expected_power(pv, power_expected, poa_global, pv_input='power'): ''' Normalize PV power or energy based on expected PV power. Parameters ---------- pv : pandas.Series Right-labeled time series PV energy or power. If energy, should *not* be cumulative, but only for preceding time step. Type (energy or power) must be specified in the ``pv_input`` parameter. power_expected : pandas.Series Right-labeled time series of expected PV power. (Note: Expected energy is not supported.) poa_global : pandas.Series Right-labeled time series of plane-of-array irradiance associated with ``expected_power`` pv_input : str, {'power' or 'energy'} Specifies the type of input used for ``pv`` parameter. Default: 'power' Returns ------- energy_normalized : pandas.Series Energy normalized based on ``power_expected`` insolation : pandas.Series Insolation associated with each normalized point ''' freq = _check_series_frequency(pv, 'pv') if pv_input == 'power': energy = energy_from_power(pv, freq, power_type='right_labeled') elif pv_input == 'energy': energy = pv.copy() energy.name = 'energy_Wh' else: raise ValueError("Unexpected value for pv_input. pv_input should be 'power' or 'energy'.") model_tds, mean_model_td = _delta_index(power_expected) measure_tds, mean_measure_td = _delta_index(energy) # Case in which the model less frequent than the measurements if mean_model_td > mean_measure_td: power_expected = interpolate(power_expected, pv.index) poa_global = interpolate(poa_global, pv.index) energy_expected = energy_from_power(power_expected, freq, power_type='right_labeled') insolation = energy_from_power(poa_global, freq, power_type='right_labeled') energy_normalized = energy / energy_expected index_union = energy_normalized.index.union(insolation.index) energy_normalized = energy_normalized.reindex(index_union) insolation = insolation.reindex(index_union) return energy_normalized, insolation def pvwatts_dc_power(poa_global, power_dc_rated, temperature_cell=None, poa_global_ref=1000, temperature_cell_ref=25, gamma_pdc=None): ''' PVWatts v5 Module Model: DC power given effective poa poa_global, module nameplate power, and cell temperature. This function differs from the PVLIB implementation by allowing cell temperature to be an optional parameter. Parameters ---------- poa_global : pandas.Series Total effective plane of array irradiance [W/m**2]. power_dc_rated : float Rated DC power of array [W] temperature_cell : pandas.Series, optional Measured or derived cell temperature [degrees Celsius]. Time series assumed to be same frequency as ``poa_global``. If omitted, the temperature term will be ignored. poa_global_ref : float, default 1000 Reference irradiance at standard test condition [W/m**2]. temperature_cell_ref : float, default 25 Reference temperature at standard test condition [degrees Celsius]. gamma_pdc : float, default None Linear array efficiency temperature coefficient [1 / degree Celsius]. If omitted, the temperature term will be ignored. Note ---- All series are assumed to be right-labeled, meaning that the recorded value at a given timestamp refers to the previous time interval Returns ------- power_dc : pandas.Series DC power determined by PVWatts v5 equation [W]. ''' power_dc = power_dc_rated * poa_global / poa_global_ref if temperature_cell is not None and gamma_pdc is not None: temperature_factor = ( 1 + gamma_pdc * (temperature_cell - temperature_cell_ref) ) power_dc = power_dc * temperature_factor return power_dc def normalize_with_pvwatts(energy, pvwatts_kws): ''' Normalize system AC energy output given measured poa_global and meteorological data. This method uses the PVWatts V5 module model. Energy timeseries and poa_global timeseries can be different granularities. Parameters ---------- energy : pandas.Series Energy time series to be normalized [Wh]. Must be a right-labeled regular time series. pvwatts_kws : dict Dictionary of parameters used in the pvwatts_dc_power function. See Other Parameters. Other Parameters ---------------- poa_global : pandas.Series Total effective plane of array irradiance [W/m**2]. power_dc_rated : float Rated DC power of array [W] temperature_cell : pandas.Series, optional Measured or derived cell temperature [degrees Celsius]. Time series assumed to be same frequency as `poa_global`. If omitted, the temperature term will be ignored. poa_global_ref : float, default 1000 Reference irradiance at standard test condition [W/m**2]. temperature_cell_ref : float, default 25 Reference temperature at standard test condition [degrees Celsius]. gamma_pdc : float, default None Linear array efficiency temperature coefficient [1 / degree Celsius]. If omitted, the temperature term will be ignored. Note ---- All series are assumed to be right-labeled, meaning that the recorded value at a given timestamp refers to the previous time interval Returns ------- energy_normalized : pandas.Series Energy divided by PVWatts DC energy [unitless]. insolation : pandas.Series Insolation associated with each normalized point [Wh/m**2] ''' power_dc = pvwatts_dc_power(**pvwatts_kws) irrad = pvwatts_kws['poa_global'] energy_normalized, insolation = normalize_with_expected_power(energy, power_dc, irrad, pv_input='energy') return energy_normalized, insolation @deprecated(since='2.0.0', removal='3.0.0', alternative='normalize_with_expected_power') def sapm_dc_power(pvlib_pvsystem, met_data): ''' Use Sandia Array Performance Model (SAPM) and PVWatts to compute the effective DC power using measured irradiance, ambient temperature, and wind speed. Effective irradiance and cell temperature are calculated with SAPM, and DC power with PVWatts. .. warning:: The ``pvlib_pvsystem`` argument must be a ``pvlib.pvsystem.LocalizedPVSystem`` object, which is no longer available as of pvlib 0.9.0. To use this function you'll need to use an older version of pvlib. Parameters ---------- pvlib_pvsystem : pvlib.pvsystem.LocalizedPVSystem Object contains orientation, geographic coordinates, equipment constants (including DC rated power in watts). The object must also specify either the ``temperature_model_parameters`` attribute or both ``racking_model`` and ``module_type`` attributes to infer the temperature model parameters. met_data : pandas.DataFrame Measured irradiance components, ambient temperature, and wind speed. Expected met_data DataFrame column names: ['DNI', 'GHI', 'DHI', 'Temperature', 'Wind Speed'] Note ---- All series are assumed to be right-labeled, meaning that the recorded value at a given timestamp refers to the previous time interval Returns ------- power_dc : pandas.Series DC power in watts derived using Sandia Array Performance Model and PVWatts. effective_poa : pandas.Series Effective irradiance calculated with SAPM ''' solar_position = pvlib_pvsystem.get_solarposition(met_data.index) total_irradiance = pvlib_pvsystem\ .get_irradiance(solar_position['zenith'], solar_position['azimuth'], met_data['DNI'], met_data['GHI'], met_data['DHI']) aoi = pvlib_pvsystem.get_aoi(solar_position['zenith'], solar_position['azimuth']) airmass = pvlib_pvsystem\ .get_airmass(solar_position=solar_position, model='kastenyoung1989') airmass_absolute = airmass['airmass_absolute'] effective_irradiance = pvlib.pvsystem\ .sapm_effective_irradiance(poa_direct=total_irradiance['poa_direct'], poa_diffuse=total_irradiance['poa_diffuse'], airmass_absolute=airmass_absolute, aoi=aoi, module=pvlib_pvsystem.module) temp_cell = pvlib_pvsystem\ .sapm_celltemp(total_irradiance['poa_global'], met_data['Temperature'], met_data['Wind Speed']) power_dc = pvlib_pvsystem\ .pvwatts_dc(g_poa_effective=effective_irradiance, temp_cell=temp_cell) return power_dc, effective_irradiance @deprecated(since='2.0.0', removal='3.0.0', alternative='normalize_with_expected_power') def normalize_with_sapm(energy, sapm_kws): ''' Normalize system AC energy output given measured met_data and meteorological data. This method relies on the Sandia Array Performance Model (SAPM) to compute the effective DC energy using measured irradiance, ambient temperature, and wind speed. Energy timeseries and met_data timeseries can be different granularities. .. warning:: The ``pvlib_pvsystem`` argument must be a ``pvlib.pvsystem.LocalizedPVSystem`` object, which is no longer available as of pvlib 0.9.0. To use this function you'll need to use an older version of pvlib. Parameters ---------- energy : pandas.Series Energy time series to be normalized in watt hours. Must be a right-labeled regular time series. sapm_kws : dict Dictionary of parameters required for sapm_dc_power function. See Other Parameters. Other Parameters --------------- pvlib_pvsystem : pvlib.pvsystem.LocalizedPVSystem object Object contains orientation, geographic coordinates, equipment constants (including DC rated power in watts). The object must also specify either the ``temperature_model_parameters`` attribute or both ``racking_model`` and ``module_type`` to infer the model parameters. met_data : pandas.DataFrame Measured met_data, ambient temperature, and wind speed. Expected column names are ['DNI', 'GHI', 'DHI', 'Temperature', 'Wind Speed'] Note ---- All series are assumed to be right-labeled, meaning that the recorded value at a given timestamp refers to the previous time interval Returns ------- energy_normalized : pandas.Series Energy divided by Sandia Model DC energy. insolation : pandas.Series Insolation associated with each normalized point ''' power_dc, irrad = sapm_dc_power(**sapm_kws) energy_normalized, insolation = normalize_with_expected_power(energy, power_dc, irrad, pv_input='energy') return energy_normalized, insolation def _delta_index(series): ''' Takes a pandas series with a DatetimeIndex as input and returns (time step sizes, average time step size) in hours Parameters ---------- series : pandas.Series A pandas timeseries Returns ------- deltas : pandas.Series A timeseries representing the timestep sizes of ``series`` mean : float The average timestep ''' if series.index.freq is None: # If there is no frequency information, explicitly calculate interval # sizes. Length of each interval calculated by using 'int64' to convert # to nanoseconds. hours = pd.Series(series.index.view('int64') / (10.0**9 * 3600.0)) hours.index = series.index deltas = hours.diff() else: # If there is frequency information, pandas shift can be used to gain # a meaningful interval for the first element of the timeseries # Length of each interval calculated by using 'int64' to convert to # nanoseconds. deltas = (series.index - series.index.shift(-1)).view('int64') / \ (10.0**9 * 3600.0) return deltas, np.mean(deltas[~np.isnan(deltas)]) delta_index = deprecated('2.0.0',
#!/usr/bin/env python """Test suite for :py:mod:`plastid.readers.bigbed` Notes ----- Several of these tests are tested against |GenomeHash|, and so will fail if |GenomeHash| is malfunctioning """ import unittest import copy import warnings from random import shuffle from pkg_resources import resource_filename, cleanup_resources from nose.plugins.attrib import attr from nose.tools import assert_almost_equal from collections import OrderedDict from plastid.genomics.roitools import SegmentChain, GenomicSegment, Transcript from plastid.genomics.genome_hash import GenomeHash from plastid.readers.bed import BED_Reader from plastid.readers.bigbed import BigBedReader warnings.simplefilter("ignore", DeprecationWarning) #=============================================================================== # INDEX: helper functions #=============================================================================== def tearDownModule(): """Remove test dataset files after unit tests are complete""" cleanup_resources() def transcript_identical(ivc1, ivc2): """Test for identity between positions of two Transcripts""" position_test = ivc1.get_position_set() == ivc2.get_position_set() strand_test = ivc1.spanning_segment.strand == ivc2.spanning_segment.strand chrom_test = ivc1.spanning_segment.chrom == ivc2.spanning_segment.chrom start_test = (ivc1.cds_start is None and ivc2.cds_start is None) or\ (ivc1.cds_start == ivc2.cds_start) end_test = (ivc1.cds_end is None and ivc2.cds_end is None) or\ (ivc1.cds_end == ivc2.cds_end) return position_test & strand_test & chrom_test & start_test & end_test #=============================================================================== # INDEX: test suites #=============================================================================== @attr(test="unit") class test_BigBedReader(unittest.TestCase): @classmethod def setUpClass(cls): cls.cols = [3, 4, 5, 6, 8, 9, 12] cls.bedfiles = {} cls.bbfiles = {} for col in cls.cols: cls.bedfiles[col] = resource_filename( "plastid", "test/data/annotations/100transcripts_bed%s.bed" % col ) cls.bbfiles[col] = resource_filename( "plastid", "test/data/annotations/100transcripts_bed%s.bb" % col ) cls.chrom_sizes = {} with open(resource_filename("plastid", "test/data/annotations/sacCer3.sizes")) as fh: for line in fh: chrom, size = line.strip().split("\t") cls.chrom_sizes[chrom] = int(size) cls.bbs = {K: BigBedReader(cls.bbfiles[K], return_type=Transcript) for K in cls.cols} # comparisons against genome hash cls.binsize = 10000 with open(cls.bedfiles[12]) as fh: transcripts = list(BED_Reader(fh, return_type=Transcript)) cls.tx_dict = {} cls.cds_dict = {} cls.as_cds_dict = {} for tx in transcripts: txid = tx.get_name() cls.tx_dict[txid] = tx cds_ivc = tx.get_cds() cds_ivc.attr["ID"] = txid if cds_ivc.length > 0: cls.cds_dict[txid] = tx.get_cds() cls.as_cds_dict[txid] = tx.get_cds().get_antisense() cls.as_cds_dict[txid].attr["ID"] = txid cls.tx_hash = GenomeHash(cls.tx_dict, do_copy=False, binsize=cls.binsize) cls.cds_hash = GenomeHash(cls.cds_dict, do_copy=False, binsize=cls.binsize) cls.as_cds_hash = GenomeHash(cls.as_cds_dict, do_copy=False, binsize=cls.binsize) cls.shuffled_indices = list(range(len(transcripts))) shuffle(cls.shuffled_indices) cls.flybbfile = resource_filename( "plastid", "test/data/annotations/dmel-all-no-analysis-r5.54.bb" ) cls.flybedfile = resource_filename( "plastid", "test/data/annotations/dmel-all-no-analysis-r5.54.bed" ) # BigBed files with and without extra columns, with and without autoSql descriptions cls.bb_bonuscols = { "bb4as": resource_filename( "plastid", "test/data/annotations/100transcripts_bed4plus_bonus_as.bb" ), "bb12as": resource_filename( "plastid", "test/data/annotations/100transcripts_bed12plus_bonus_as.bb" ), "bb4no_as": resource_filename( "plastid", "test/data/annotations/100transcripts_bed4plus_bonus_no_as.bb" ), "bb12no_as": resource_filename( "plastid", "test/data/annotations/100transcripts_bed12plus_bonus_no_as.bb" ), } cls.bonus_col_file = resource_filename( "plastid", "test/data/annotations/bonus_bed_columns.txt" ) # BigBed file with indexes cls.bb_indexed = resource_filename("plastid", "test/data/annotations/dmel-bonus-cols.bb") def test_count_records(self): for _, my_reader in self.bbs.items(): # make sure we have all records self.assertEqual(my_reader.num_records, 100) def test_num_chroms(self): for _, my_reader in self.bbs.items(): self.assertEqual(my_reader.num_chroms, 17) def test_chrom_sizes(self): for _, my_reader in self.bbs.items(): for k, v in self.chrom_sizes.items(): self.assertEqual(my_reader.chroms[k], v) def test_iter_same_as_bed_reader_various_columns(self): # implicitly tests iterate_over_chunk over all bed files, too # this tests BigBed equality with various ranges of columns # and various custom columns for col in self.cols: bigbed = self.bbs[col] with open(self.bedfiles[col]) as fh: bed = BED_Reader(fh, return_type=Transcript) for n, (tx1, tx2) in enumerate(zip(bed, bigbed)): msg = "Transcript mismatch in BigBed file at record %s. Expected '%s'. Got '%s'." % ( n, tx1, tx2 ) self.assertTrue(transcript_identical(tx1, tx2), msg) self.assertEqual(n, 100 - 1) def test_iter_same_as_bed_reader_flydata(self): # test more complex transcript models # we cast them to lists, sadly, because Python's lexical chromosome sorting # differs from unix command-line sort; so even though the records are # in the same order in both files, they are returned with different sorts flybb = BigBedReader(self.flybbfile, return_type=Transcript) with open(self.flybedfile) as fh: flybed = BED_Reader(fh, return_type=Transcript) for n, (tx1, tx2) in enumerate(zip(flybed, flybb)): msg = "Transcript mismatch in BigBed file at record %s. Expected '%s'. Got '%s'." % ( n, tx1, tx2 ) self.assertTrue(transcript_identical(tx1, tx2), msg) self.assertEqual(n, 32682 - 1) def test_getitem_stranded(self): """Test fetching of overlapping features, minding strand 1. Make sure each feature can fetch its own subregion from its own neighborhood 2. Make sure each feature cannot fetch its own antisense subregion 3. Make sure each features fetches exactly the same features as a GenomeHash """ # make sure we can fetch each transcript's own CDS bb = self.bbs[12] u = 0 for txid, cds in list(self.cds_dict.items())[:100]: gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=True) bb_ol_features = bb[cds] self.assertIn( txid, (X.get_name() for X in gh_ol_features), msg="%s failed to fetch its own CDS on correct strand" % txid ) # make sure bb fetch matches GenomeHash fetch self.assertSetEqual( set([str(X) for X in gh_ol_features]), set([str(X) for X in bb_ol_features]) ) u += 1 self.assertGreater(u, 0) # make sure we don't fetch each transcript's own antisense CDS # on opposite strand for txid, cds in list(self.as_cds_dict.items())[:100]: gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=True) bb_ol_features = bb[cds] self.assertNotIn( txid, (X.get_name() for X in gh_ol_features), msg="%s fetched its own name on wrong strand!" % txid ) self.assertSetEqual( set([str(X) for X in gh_ol_features]), set([str(X) for X in bb_ol_features]) ) def test_get_stranded(self): """Test fetching of overlapping features, minding strand 1. Make sure each feature can fetch its own subregion from its own neighborhood 2. Make sure each feature cannot fetch its own antisense subregion 3. Make sure each features fetches exactly the same features as a GenomeHash """ # make sure we can fetch each transcript's own CDS bb = self.bbs[12] u = 0 for txid, cds in list(self.cds_dict.items())[:100]: gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=True) bb_ol_features = bb.get(cds, stranded=True) self.assertIn( txid, (X.get_name() for X in gh_ol_features), msg="%s failed to fetch its own CDS on correct strand" % txid ) # make sure bb fetch matches GenomeHash fetch self.assertSetEqual( set([str(X) for X in gh_ol_features]), set([str(X) for X in bb_ol_features]) ) u += 1 self.assertGreater(u, 0) # make sure we don't fetch each transcript's own antisense CDS # on opposite strand for txid, cds in list(self.as_cds_dict.items())[:100]: gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=True) bb_ol_features = bb[cds] self.assertNotIn( txid, (X.get_name() for X in gh_ol_features), msg="%s fetched its own name on wrong strand!" % txid ) self.assertSetEqual( set([str(X) for X in gh_ol_features]), set([str(X) for X in bb_ol_features]) ) def test_get_unstranded(self): """Test fetching of overlapping features, disregarding strand 1. Make sure each feature can fetch its own subregion from its own neighborhood 2. Make sure each feature can fetch its own antisense subregion 3. Make sure each features fetches exactly the same features as a GenomeHash """ # make sure we can fetch each transcript's from its own CDS on same strand bb = self.bbs[12] u = 0 for txid, cds in list(self.cds_dict.items())[:100]: gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=False) bb_ol_features = bb.get(cds, stranded=False) self.assertIn( txid, (X.get_name() for X in gh_ol_features), msg="%s failed to fetch its own CDS on same strand" % txid ) # make sure bb fetch matches GenomeHash fetch self.assertSetEqual( set([str(X) + X.get_name() for X in gh_ol_features]), set([str(X) + X.get_name() for X in bb_ol_features]) ) u += 1 self.assertGreater(u, 0) # make sure we can fetch each transcript's from its own antisense CDS # on opposite strand for txid, cds in list(self.as_cds_dict.items())[:100]: gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=False) bb_ol_features = bb.get(cds, stranded=False) self.assertIn( txid, (X.get_name() for X in gh_ol_features), msg="%s failed to fetched its own name on opposite strand!" % txid ) s1 = set([str(X) + X.get_name() for X in gh_ol_features]) s2 = set([str(X) + X.get_name() for X in bb_ol_features]) self.assertSetEqual( s1, s2, msg="%s failure:\n Only in first set: %s\n Only in second set: %s" % (txid, s1 - s2, s2 - s1) ) def test_return_type(self): bb = self.bbs[12] i = iter(bb) for _ in range(5): self.assertTrue(isinstance(next(i), Transcript)) ivcbb = BigBedReader(self.bbfiles[12], return_type=SegmentChain) i = iter(ivcbb) for _ in range(5): self.assertTrue(isinstance(next(i), SegmentChain)) def test_get_autosql_str(self): for k in (4, 12): bbplus_as = BigBedReader(self.bb_bonuscols["bb%sas" % k]) with open(resource_filename( "plastid", "test/data/annotations/bed%s_bonus_bed_columns.as" % k)) as fh: expected_as = fh.read() self.assertEqual(bbplus_as._get_autosql_str(), expected_as) def test_get_no_autosql_str(self): for k in (4, 12): bbplus_noas = BigBedReader(self.bb_bonuscols["bb%sno_as" % k]) self.assertEqual(bbplus_noas._get_autosql_str(), "") def test_custom_columns_names_with_autosql(self): expected = OrderedDict( [ ("my_floats", "some float values"), ("my_sets", "some set options"), ("my_ints", "signed integer values"), ("my_strs", "str representation of transcripts"), ("my_colors", "r,g,b colors"), ] ) for k in (4, 12): fn = "bb%sas" % k bb = BigBedReader(self.bb_bonuscols[fn]) self.assertEqual(bb.extension_fields, expected) def test_custom_columns_names_without_autosql(self): expected = OrderedDict( [ ("custom_0", "no description"), ("custom_1", "no description"), ("custom_2", "no description"), ("custom_3", "no description"), ("custom_4", "no description"), ] ) for k in (4, 12): fn = "bb%sno_as" % k bb = BigBedReader(self.bb_bonuscols[fn]) self.assertEqual(bb.extension_fields, expected) def test_custom_columns_retval_type_with_autosql(self):
import numpy as np class PlanarPauli: """ Defines a Pauli operator on a planar lattice. Notes: * This is a utility class used by planar implementations of the core models. * It is typically instantiated using :meth:`qecsim.models.planar.PlanarCode.new_pauli` Use cases: * Construct a planar Pauli operator by applying site, plaquette, path and logical operators: :meth:`site`, :meth:`plaquette`, :meth:`path`, :meth:`logical_x`, :meth:`logical_z`. * Get the single Pauli operator applied to a given site: :meth:`operator` * Convert to binary symplectic form: :meth:`to_bsf`. * Copy a planar Pauli operator: :meth:`copy`. """ def __init__(self, code, bsf=None): """ Initialise new planar Pauli. Notes: * For performance reasons, the new Pauli is a view of the given bsf. Modifying one will modify the other. :param code: The planar code. :type code: PlanarCode :param bsf: Binary symplectic representation of Pauli. (Optional. Defaults to identity.) :type bsf: numpy.array (1d) """ self._code = code self._from_bsf(bsf) def _from_bsf(self, bsf): # initialise lattices for X and Z operators from bsf n_qubits = self.code.n_k_d[0] if bsf is None: # initialise identity lattices for X and Z operators self._xs = np.zeros(n_qubits, dtype=int) self._zs = np.zeros(n_qubits, dtype=int) else: assert len(bsf) == 2 * n_qubits, 'BSF {} has incompatible length'.format(bsf) assert np.array_equal(bsf % 2, bsf), 'BSF {} is not in binary form'.format(bsf) # initialise lattices for X and Z operators from bsf self._xs, self._zs = np.hsplit(bsf, 2) # split out Xs and Zs def _flatten_site_index(self, index): """Return 1-d index from 2-d index for internal storage.""" r, c = index assert self.code.is_site(index), 'Invalid site index: {}.'.format(index) assert self.code.is_in_bounds(index), 'Out of bounds index: {}.'.format(index) rows, cols = self.code.size # row_in_lattice * lattice_cols + col_in_lattice + lattice_offset return (r // 2) * (cols - c % 2) + (c // 2) + (r % 2 * rows * cols) @property def code(self): """ The planar code. :rtype: PlanarCode """ return self._code def copy(self): """ Returns a copy of this Pauli that references the same code but is backed by a copy of the bsf. :return: A copy of this Pauli. :rtype: PlanarPauli """ return self.code.new_pauli(bsf=np.copy(self.to_bsf())) def operator(self, index): """ Returns the operator on the site identified by the index. :param index: Index identifying a site in the format (row, column). :type index: 2-tuple of int :return: Pauli operator. One of 'I', 'X', 'Y', 'Z'. :rtype: str :raises IndexError: If index is not an in-bounds site index. """ # check valid in-bounds index if not (self.code.is_site(index) and self.code.is_in_bounds(index)): raise IndexError('{} is not an in-bounds site index for code of size {}.'.format(index, self.code.size)) # extract binary x and z flat_index = self._flatten_site_index(index) x = self._xs[flat_index] z = self._zs[flat_index] # return Pauli if x == 1 and z == 1: return 'Y' if x == 1: return 'X' if z == 1: return 'Z' else: return 'I' def site(self, operator, *indices): """ Apply the operator to site identified by the index. Notes: * Index is in the format (row, column). * Operations on sites that lie outside the lattice have no effect on the lattice. :param operator: Pauli operator. One of 'I', 'X', 'Y', 'Z'. :type operator: str :param indices: Any number of indices identifying a site in the format (row, column). :type indices: Any number of 2-tuple of int :return: self (to allow chaining) :rtype: PlanarPauli :raises IndexError: If index is not a site index. """ for index in indices: # check valid index if not self.code.is_site(index): raise IndexError('{} is not a site index.'.format(index)) # apply if index within lattice if self.code.is_in_bounds(index): # flip sites flat_index = self._flatten_site_index(index) if operator in ('X', 'Y'): self._xs[flat_index] ^= 1 if operator in ('Z', 'Y'): self._zs[flat_index] ^= 1 return self def plaquette(self, index): """ Apply a plaquette operator at the given index. Notes: * Index is in the format (row, column). * If the primal lattice is indexed (i.e. row % 2 = 1), then Z operators are applied around the plaquette. (This is equivalent to a vertex operator on the dual lattice.) * If the dual lattice is indexed (i.e. row % 2 = 0), then X operators are applied around the plaquette. (This is equivalent to a vertex operator on the primal lattice.) * Parts of plaquettes that lie outside the lattice have no effect on the lattice. :param index: Index identifying the plaquette in the format (row, column). :type index: 2-tuple of int :return: self (to allow chaining) :rtype: PlanarPauli :raises IndexError: If index is not a plaquette index. """ r, c = index # check valid index if not self.code.is_plaquette(index): raise IndexError('{} is not a plaquette index.'.format(index)) # apply Zs if primal lattice, or Xs otherwise operator = 'Z' if self.code.is_primal(index) else 'X' # flip plaquette sites self.site(operator, (r - 1, c)) # North self.site(operator, (r + 1, c)) # South self.site(operator, (r, c - 1)) # West self.site(operator, (r, c + 1)) # East return self def path(self, a_index, b_index): """ Apply the shortest taxi-cab path of operators between the plaquettes indexed by A and B. Notes: * Indices are in the format (row, column). * Both indices must index the same lattice, see :meth:`qecsim.models.planar.PlanarCode.is_primal` / :meth:`qecsim.models.planar.PlanarCode.is_dual`. * Plaquettes not indexed within the lattice are said to be virtual, see :meth:`qecsim.models.planar.PlanarCode.bounds`. * Paths proceed in the following directions in order: North/South, West/East. Therefore if one plaquette lies beyond both boundaries the path will meet the boundary as dictated by the directions defined here. * If both plaquettes are virtual then they are considered connected by a zero length path. * Parts of paths that lie outside the lattice have no effect on the lattice. :param a_index: Index identifying a plaquette in the format (row, column). :type a_index: 2-tuple of int :param b_index: Index identifying a plaquette in the format (row, column). :type b_index: 2-tuple of int :return: self (to allow chaining) :rtype: PlanarPauli :raises IndexError: If indices are not plaquette indices on the same lattice. """ # steps from A to B row_steps, col_steps = self.code.translation(a_index, b_index) # apply Xs if plaquette on primal lattice, or Zs otherwise operator = 'X' if self.code.is_primal(a_index) else 'Z' # current index c_r, c_c = a_index while row_steps < 0: # heading north # flip current then decrement row self.site(operator, (c_r - 1, c_c)) c_r -= 2 row_steps += 1 while row_steps > 0: # heading south # flip current then increment row self.site(operator, (c_r + 1, c_c)) c_r += 2 row_steps -= 1 while col_steps < 0: # heading west # flip current then decrement col self.site(operator, (c_r, c_c - 1)) c_c -= 2 col_steps += 1 while col_steps > 0: # heading east # flip current then increment col self.site(operator, (c_r, c_c + 1)) c_c += 2 col_steps -= 1 return self def logical_x(self): """ Apply a logical X operator, i.e. column of X on horizontal-edge sites of primal lattice. Notes: * The column of X is applied to the rightmost column to allow optimisation of the MPS decoder. :return: self (to allow chaining) :rtype: PlanarPauli """ max_row, max_col = self.code.bounds self.site('X', *((row, max_col) for row in range(0, max_row + 1, 2))) return self def logical_z(self): """ Apply a logical Z operator, i.e. row of Z on horizontal-edge sites of primal lattice. Notes: * The row of Z is applied to the bottom row to allow optimisation of the MPS decoder. :return: self (to allow chaining) :rtype: PlanarPauli """ max_row, max_col = self.code.bounds self.site('Z', *((max_row, col) for col in range(0, max_col + 1, 2))) return self def __eq__(self, other): if type(other) is type(self): return np.array_equal(self._xs, other._xs) and np.array_equal(self._zs, other._zs) return NotImplemented def __repr__(self): return '{}({!r}, {!r})'.format(type(self).__name__, self.code, self.to_bsf()) def __str__(self): """ ASCII art style lattice showing primal lattice lines and Pauli operators. :return: Informal string representation. :rtype: str """ return self.code.ascii_art(pauli=self) def to_bsf(self): """ Binary symplectic representation of Pauli. Notes: * For performance reasons, the
sets and follow sets ********************************************** EMPTY = "(empty)" END = None TerminalOrEmpty = str TerminalOrEmptyOrErrorToken = typing.Union[str, ErrorTokenClass] StartSets = typing.Dict[Nt, OrderedFrozenSet[TerminalOrEmptyOrErrorToken]] def start_sets(grammar: Grammar) -> StartSets: """Compute the start sets for nonterminals in a grammar. A nonterminal's start set is the set of tokens that a match for that nonterminal may start with, plus EMPTY if it can match the empty string and ErrorToken if it can start with an error. """ # How this works: Note that we can replace the words "match" and "start # with" in the definition above with more queries about start sets. # # 1. A nonterminal's start set contains a terminal `t` if any of its # productions contains either `t` or a nonterminal with `t` in *its* # start set, preceded only by zero or more nonterminals that have EMPTY # in *their* start sets. Plus: # # 2. A nonterminal's start set contains EMPTY if any of its productions # consists entirely of nonterminals that have EMPTY in *their* start # sets. # # This definition is rather circular. We want the smallest collection of # start sets satisfying these rules, and we get that by iterating to a # fixed point. assert all(isinstance(nt, Nt) for nt in grammar.nonterminals) start: StartSets start = {typing.cast(Nt, nt): OrderedFrozenSet() for nt in grammar.nonterminals} done = False while not done: done = True for nt, nt_def in grammar.nonterminals.items(): assert isinstance(nt, Nt) # Compute start set for each `prod` based on `start` so far. # Could be incomplete, but we'll ratchet up as we iterate. nt_start = OrderedFrozenSet( t for p in nt_def.rhs_list for t in seq_start(grammar, start, p.body)) if nt_start != start[nt]: start[nt] = nt_start done = False return start def seq_start( grammar: Grammar, start: StartSets, seq: typing.List[Element] ) -> OrderedFrozenSet[TerminalOrEmptyOrErrorToken]: """Compute the start set for a sequence of elements.""" s: OrderedSet[TerminalOrEmptyOrErrorToken] = OrderedSet([EMPTY]) for i, e in enumerate(seq): if EMPTY not in s: # preceding elements never match the empty string break s.remove(EMPTY) if grammar.is_terminal(e): assert isinstance(e, str) s.add(e) elif isinstance(e, ErrorSymbol): s.add(ErrorToken) elif isinstance(e, Nt): s |= start[e] elif e is NoLineTerminatorHere: s.add(EMPTY) else: assert isinstance(e, LookaheadRule) future = seq_start(grammar, start, seq[i + 1:]) if e.positive: future &= e.set else: future -= e.set return OrderedFrozenSet(future) return OrderedFrozenSet(s) StartSetCache = typing.List[typing.List[OrderedFrozenSet[TerminalOrEmptyOrErrorToken]]] def make_start_set_cache( grammar: Grammar, prods: typing.List[Prod], start: StartSets ) -> StartSetCache: """Compute start sets for all suffixes of productions in the grammar. Returns a list of lists `cache` such that `cache[n][i] == seq_start(grammar, start, prods[n][i:])`. (The cache is for speed, since seq_start was being called millions of times.) """ def suffix_start_list( rhs: typing.List[Element] ) -> typing.List[OrderedFrozenSet[TerminalOrEmptyOrErrorToken]]: sets: typing.List[OrderedFrozenSet[TerminalOrEmptyOrErrorToken]] sets = [OrderedFrozenSet([EMPTY])] for e in reversed(rhs): s: OrderedFrozenSet[TerminalOrEmptyOrErrorToken] if grammar.is_terminal(e): assert isinstance(e, str) s = OrderedFrozenSet([e]) elif isinstance(e, ErrorSymbol): s = OrderedFrozenSet([ErrorToken]) elif isinstance(e, Nt): s = start[e] if EMPTY in s: s = OrderedFrozenSet((s - {EMPTY}) | sets[-1]) elif e is NoLineTerminatorHere: s = sets[-1] else: assert isinstance(e, LookaheadRule) if e.positive: s = OrderedFrozenSet(sets[-1] & e.set) else: s = OrderedFrozenSet(sets[-1] - e.set) assert isinstance(s, OrderedFrozenSet) assert s == seq_start(grammar, start, rhs[len(rhs) - len(sets):]) sets.append(s) sets.reverse() assert sets == [seq_start(grammar, start, rhs[i:]) for i in range(len(rhs) + 1)] return sets return [suffix_start_list(prod.rhs) for prod in prods] FollowSet = OrderedSet[typing.Union[TerminalOrEmptyOrErrorToken, None]] FollowSets = typing.DefaultDict[Nt, FollowSet] def follow_sets( grammar: Grammar, prods_with_indexes_by_nt: typing.DefaultDict[ LenientNt, typing.List[typing.Tuple[int, typing.List[Element]]] ], start_set_cache: StartSetCache ) -> FollowSets: """Compute all follow sets for nonterminals in a grammar. The follow set for a nonterminal `A`, as defined in the book, is "the set of terminals that can appear immediately to the right of `A` in some sentential form"; plus, "If `A` can be the rightmost symbol in some sentential form, then $ is in FOLLOW(A)." Returns a default-dictionary mapping nts to follow sets. """ # Set of nonterminals already seen, including those we are in the middle of # analyzing. The algorithm starts at `goal` and walks all reachable # nonterminals, recursively. visited = set() # The results. By definition, nonterminals that are not reachable from the # goal nt have empty follow sets. follow: FollowSets = collections.defaultdict(OrderedSet) # If `(x, y) in subsumes_relation`, then x can appear at the end of a # production of y, and therefore follow[x] should be <= follow[y]. # (We could maintain that invariant throughout, but at present we # brute-force iterate to a fixed point at the end.) subsumes_relation: OrderedSet[typing.Tuple[Nt, Nt]] subsumes_relation = OrderedSet() # `END` is $. It is, of course, in follow[each goal nonterminal]. It gets # into other nonterminals' follow sets through the subsumes relation. for init_nt in grammar.init_nts: assert isinstance(init_nt, Nt) follow[init_nt].add(END) def visit(nt: Nt) -> None: if nt in visited: return visited.add(nt) for prod_index, rhs in prods_with_indexes_by_nt[nt]: for i, symbol in enumerate(rhs): if isinstance(symbol, Nt): visit(symbol) after = start_set_cache[prod_index][i + 1] if EMPTY in after: after -= {EMPTY} subsumes_relation.add((symbol, nt)) follow[symbol] |= after for nt in grammar.init_nts: assert isinstance(nt, Nt) visit(nt) # Now iterate to a fixed point on the subsumes relation. done = False while not done: done = True # optimistically for target, source in subsumes_relation: if follow[source] - follow[target]: follow[target] |= follow[source] done = False return follow # *** Lowering **************************************************************** # At this point, lowered productions start getting farther from the original # source. We need to associate them with the original grammar in order to # produce correct output, so we use Prod values to represent productions. # # - `nt` is the name of the nonterminal as it appears in the original # grammar. # # - `index` is the index of the source production, within nt's productions, # in the original grammar. # # - `rhs` is the fully lowered/expanded right-hand-side of the production. # # There may be many productions in a grammar that all have the same `nt` and # `index` because they were all produced from the same source production. @dataclass class Prod: nt: Nt index: int rhs: typing.List reducer: ReduceExprOrAccept def expand_optional_symbols_in_rhs( rhs: typing.List[Element], grammar: Grammar, empties: typing.Dict[LenientNt, ReduceExprOrAccept], start_index: int = 0 ) -> typing.Iterable[typing.Tuple[typing.List[Element], typing.Dict[int, ReduceExpr]]]: """Expand a sequence with optional symbols into sequences that have none. rhs is a list of symbols, possibly containing optional elements. This yields every list that can be made by replacing each optional element either with its .inner value, or with nothing. Each list is accompanied by the list of the indices of optional elements in `rhs` that were dropped. For example, `expand_optional_symbols_in_rhs(["if", Optional("else")])` yields the two pairs `(["if"], [1])` and `["if", "else"], []`. """ replacement: ReduceExpr for i in range(start_index, len(rhs)): e = rhs[i] if isinstance(e, Optional): if isinstance(e.inner, Nt) and e.inner in empties: # If this is already possibly-empty in the input grammar, it's an # error! The grammar is ambiguous. raise ValueError( "ambiguous grammar: {} is ambiguous because {} can match " "the empty string" .format(grammar.element_to_str(e), grammar.element_to_str(e.inner))) replacement = None break elif isinstance(e, Nt) and e in empties: empty_expr = empties[e] # The replacement can't be 'accept' because that only happens with # InitNt nonterminals, which are never used in productions. assert not isinstance(empty_expr, str) replacement = empty_expr break else: yield rhs[start_index:], {} return for expanded, r in expand_optional_symbols_in_rhs(rhs, grammar, empties, i + 1): e = rhs[i] rhs_inner = e.inner if isinstance(e, Optional) else e # without rhs[i] r2 = r.copy() r2[i] = replacement yield rhs[start_index:i] + expanded, r2 # with rhs[i] yield rhs[start_index:i] + [rhs_inner] + expanded, r def expand_all_optional_elements(grammar: Grammar) -> typing.Tuple[ Grammar, typing.List[Prod], typing.DefaultDict[LenientNt, typing.List[typing.Tuple[int, typing.List[Element]]]] ]: """Expand optional elements in the grammar. We replace each production that contains an optional element with two productions: one with and one without. Downstream of this step, we can ignore the possibility of optional elements. """ expanded_grammar: typing.Dict[LenientNt, NtDef] = {} # This was capturing the set of empty production to simplify the work of # the previous algorithm which was trying to determine the lookahead. # However, with the LR0Generator this is no longer needed as we are # generating deliberatly inconsistent parse table
song_properties = dict() # Calculate the average 20D feature vector for the mfccs for song_file in self.song_files: filename, _ = os.path.splitext(os.path.basename(song_file)) l.debug("Currently loading %s.", filename) if cache_dir and os.path.isfile( os.path.join(cache_dir, filename + "_done")): l.debug("Loading our song from cache.") mfcc = numpy.load( os.path.join(cache_dir, filename + "_mfcc") + os.extsep + 'npy') tempo = numpy.load( os.path.join(cache_dir, filename + "_tempo") + os.extsep + 'npy') else: l.debug("Song not found in cache, processing it.") if cache_dir: mfcc, tempo = self.process_song_file(mfcc_amount, cache_dir, song_file) else: mfcc, tempo = self.get_mfcc_and_tempo(song_file, mfcc_amount) mfccs[song_file] = mfcc tempos[song_file] = tempo average += mfcc.mean(1) # NOTE: We don't use the length of the songs as weights. Because we # prefer to weigh each song equally. This is also influenced by the # fact that we don't know how long each song will be played so using # the entire length doesn't really make any sense. average = average / len(self.song_files) average_covariance = numpy.array( [numpy.zeros(mfcc_amount) for _ in range(mfcc_amount)]) # Now calculate the centered mfcc and covariance matrix for each song # and keep a running average of the average covariance matrix. for song_file, mfcc in mfccs.items(): mfcc = (mfcc.T - average).T covariance = numpy.cov(mfcc) average_covariance += covariance props = (numpy.linalg.cholesky(covariance), numpy.mean(mfcc, 1), tempos[song_file]) song_properties[song_file] = props # Do PCA on the average covariance matrix average_covariance = average_covariance / len(self.song_files) pca = PCA(self.weight_amount) pca.fit(average_covariance.T) # Initialize the weights to the explained variance ratio if the weights # are not yet set. if self.weights is None: weights = pca.explained_variance_ratio_ else: weights = self.weights return pca.components_.T, song_properties, weights def reset_songs(self): """Reset ``self.song_files`` to its original value. This does not alter the weights calculated till now. :rtype: None """ l.debug("Resetting songs.") self.song_files = copy(self._song_files) @staticmethod def get_mfcc_and_tempo(song_file, mfcc_amount): """Calculate the mfcc and estimated BPM. :param str song_file: This file to calculate for. :param int mfcc_amount: The amount of mfccs to calculate. :returns: A tuple of the mfccs and tempo in BPM in this order. :rtype: tuple """ song, sr = librosa.load(song_file) tempo, _ = librosa.beat.beat_track(song, sr) return librosa.feature.mfcc(song, sr, None, mfcc_amount), tempo @staticmethod def get_w_vector(pca, weights): """Get a weighted pca matrix. :param numpy.array(numpy.array) pca: The PCA to apply the weights on. :param numpy.array(len(pca)) weights: The weights to apply. :returns: A square matrix of the same size as the PCA. :rtype: numpy.array """ return numpy.array([ sum((elem * weights[i] for i, elem in enumerate(row))) for row in pca ]) def covariance(self, song_file, weights): """Calculate a (approximation) of the covariance matrix. This is done by using a PCA and a cholesky decomposition. :param str song_file: The path to the song that should be used. Please note that this path should be in ``self.song_properties``. :param numpy.array weights: The weights to be used. :returns: A square matrix of the same size as the weights vector. :rtype: numpy.array """ cholesky, _, _unused = self.song_properties[song_file] d = numpy.dot( numpy.diag(self.get_w_vector(self.pca, weights)), cholesky) return numpy.dot(d, d.T) def distance(self, song_q, song_p, weights=None): """Calculate the distance between two MFCCs. This is done based on this paper: http://cs229.stanford.edu/proj2009/RajaniEkkizogloy.pdf :param str song_q: The first song used. :param str song_p: The second song used. :param numpy.array weights: The weights vector to use. :returns: The distance between the two given songs. This distance is symmetric. :rtype: int """ if weights is None: weights = self.weights def kl(p, q): cov_p = self.covariance(p, weights) cov_q = self.covariance(q, weights) cov_q_inv = numpy.linalg.inv(cov_q) m_p = self.song_properties[p][1] m_q = self.song_properties[q][1] d = cov_p.shape[0] return ( numpy.log(numpy.linalg.det(cov_q) / numpy.linalg.det(cov_p)) + numpy.trace(numpy.dot(cov_q_inv, cov_p)) + numpy.dot( numpy.transpose(m_p - m_q), numpy.dot(cov_q_inv, (m_p - m_q))) - d) / 2 return (kl(song_q, song_p) + kl(song_p, song_q)) / 2 def get_next_song(self, user_feedback, force=False): """Get the next song to play. Do this randomly for the first and otherwise use :func:`_find_next_song`. This also calls :func:`_optimize_weights` if we got useful feedback (based on the current song we are mixing). If ``get_next_song`` was ``True`` too many times we pick random again. :param dict user_feedback: The user feedback between the song of five and four picks ago. :param bool force: Indicate that the previous pick was not expectable. :return: The song to play next: :rtype: Song """ # We have only one song remaining so we won't be able to pick good new # songs. So reset all the available songs. if len(self.song_files) == 1: self.reset_songs() if (not force) and len(self.picked_songs) == 5: old = self.picked_songs.pop(0) if old != self.picked_songs[-4]: new = self.picked_songs[-4] l.debug("Trying to interpreted user feedback from %s to %s.", old, new) user_feedback.update({"old": old, "new": new}) self.done_transitions.append( (old, new, self.get_feedback(user_feedback))) self._optimize_weights() if force: self.force_streak += 1 else: self.force_streak = 0 if self.force_streak > self.max_force_streak: self.reset_songs() if self.current_song is None: # First pick, simply select random next_song = random.choice(self.song_files) else: next_song = self._find_next_song(force) if force: self.picked_songs[-1] = next_song else: self.picked_songs.append(next_song) if (not force) and self.current_song == next_song: # Kept same song self.streak += 1 elif self.current_song is not None: # Remove the old song from the available so we have fresh tunes self.song_files.remove(self.current_song) self.streak = 0 self.current_song = next_song return Song(next_song) def _find_next_song(self, force): """Find the next song by doing song analysis. This gets the distance between the current and the potential song, does softmax with these distances and gets one by chance. Based on this paper: http://www.cs.cornell.edu/~kilian/papers/Slaney2008-MusicSimilarityMetricsISMIR.pdf :param bool force: Make it impossible to pick the current song. :returns: The filename of the next song. :rtype: str """ l.debug("Finding song by using NCA.") max_dst = 0 filter_songs = self.force_streak < 2 for song_file in self.all_but_current_song(filter_songs=filter_songs): # calc distance between song_file and current_song dst = self.song_distances[self.current_song][song_file] if dst is None: dst = self.distance(self.current_song, song_file) self.song_distances[self.current_song][song_file] = dst self.song_distances[song_file][self.current_song] = dst max_dst = max(max_dst, dst) # Find the max distance and normalize it to 50. This is because of # floating point errors when doing something to the power of a very # large negative number factor = 50 / max(max_dst, 1) # Now calculate the distance sum needed for softmax distance_sum = 0 for song_file in self.all_but_current_song(filter_songs=filter_songs): dst = self.song_distances[self.current_song][song_file] distance_sum += numpy.power(numpy.e, -(dst * factor)) chances = [] for song_file in self.all_but_current_song(filter_songs=filter_songs): # Append the softmax chances to the chances list if song_file != self.current_song: dst = self.song_distances[self.current_song][song_file] chance = numpy.power(numpy.e, -(dst * factor)) / distance_sum chances.append((song_file, chance)) chances = list( zip([x for x, _ in chances], self.normalize_chances([x for _, x in chances]))) if not force: chances.append( (self.current_song, 1 / (1 + self.streak * self.multiplier))) if not chances: self.reset_songs() self.force_streak += 1 return self._find_next_song(force) # Sort the chances by descending chance chances.sort(key=lambda x: x[1]) # We do a range 10 so we are almost certain we find a match within the # loop however we won't crash or slowdown to much if this doesn't # happen. next_song = chances[0][0] for _ in range(10): for song_file, chance in chances: if random.random() < chance: next_song = song_file l.debug("Found next_song %s, its chance was %f", next_song, chance) break else: continue # Make sure we actually break the OUTER loop break else: l.critical("Terminated simulating odds without finding," + " picking song with highest odds (%s).", next_song) return next_song @staticmethod def normalize_chances(original_chances): """Normilize the chances by squaring them and normalizing them again. Square the chances and then normalize them again. This has the advantage of giving relative high chances (similar songs) a higher chance while still remaining a vector sum of 1. :param list(int) original_chances: The original chances of the songs. :returns: The new chances in the same order the input was. :rtype: list(int) """ chances = list() for chance in original_chances: chances.append(chance**2) chance_sum = sum(chances) for i in range(len(chances)): chances[i] = chances[i] / chance_sum return chances def _optimize_weights(self): """Optimize the weights of the picker. This function optimizes the weights based on the saved feedback between to songs using the :func:`scipy.optimize.minimize` function. It constrains the weights to sum to 1. :returns: Nothing of value. :rtype: None """ l.debug("Optimize the current weights.") def
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: <NAME> ( <EMAIL> ) """ The I{sxbasic} module provides classes that represent I{basic} schema objects. """ from suds import * from suds.xsd import * from suds.xsd.sxbase import * from suds.xsd.query import * from suds.sax import splitPrefix, Namespace from suds.transport import TransportError from suds.reader import DocumentReader from urllib.parse import urljoin log = getLogger(__name__) class RestrictionMatcher: """ For use with L{NodeFinder} to match restriction. """ def match(self, n): return isinstance(n, Restriction) class TypedContent(Content): """ Represents any I{typed} content. """ def resolve(self, nobuiltin=False): qref = self.qref() if qref is None: return self key = 'resolved:nb=%s' % nobuiltin cached = self.cache.get(key) if cached is not None: return cached result = self query = TypeQuery(qref) query.history = [self] log.debug('%s, resolving: %s\n using:%s', self.id, qref, query) resolved = query.execute(self.schema) if resolved is None: log.debug(self.schema) raise TypeNotFound(qref) self.cache[key] = resolved if resolved.builtin(): if nobuiltin: result = self else: result = resolved else: result = resolved.resolve(nobuiltin) return result def qref(self): """ Get the I{type} qualified reference to the referenced xsd type. This method takes into account simple types defined through restriction with are detected by determining that self is simple (len=0) and by finding a restriction child. @return: The I{type} qualified reference. @rtype: qref """ qref = self.type if qref is None and len(self) == 0: ls = [] m = RestrictionMatcher() finder = NodeFinder(m, 1) finder.find(self, ls) if len(ls): return ls[0].ref return qref class Complex(SchemaObject): """ Represents an (xsd) schema <xs:complexType/> node. @cvar childtags: A list of valid child node names @type childtags: (I{str},...) """ def childtags(self): return ( 'attribute', 'attributeGroup', 'sequence', 'all', 'choice', 'complexContent', 'simpleContent', 'any', 'group') def description(self): return ('name',) def extension(self): for c in self.rawchildren: if c.extension(): return True return False def mixed(self): for c in self.rawchildren: if isinstance(c, SimpleContent) and c.mixed(): return True return False class Group(SchemaObject): """ Represents an (xsd) schema <xs:group/> node. @cvar childtags: A list of valid child node names @type childtags: (I{str},...) """ def childtags(self): return ('sequence', 'all', 'choice') def dependencies(self): deps = [] midx = None if self.ref is not None: query = GroupQuery(self.ref) g = query.execute(self.schema) if g is None: log.debug(self.schema) raise TypeNotFound(self.ref) deps.append(g) midx = 0 return (midx, deps) def merge(self, other): SchemaObject.merge(self, other) self.rawchildren = other.rawchildren def description(self): return ('name', 'ref',) class AttributeGroup(SchemaObject): """ Represents an (xsd) schema <xs:attributeGroup/> node. @cvar childtags: A list of valid child node names @type childtags: (I{str},...) """ def childtags(self): return ('attribute', 'attributeGroup') def dependencies(self): deps = [] midx = None if self.ref is not None: query = AttrGroupQuery(self.ref) ag = query.execute(self.schema) if ag is None: log.debug(self.schema) raise TypeNotFound(self.ref) deps.append(ag) midx = 0 return (midx, deps) def merge(self, other): SchemaObject.merge(self, other) self.rawchildren = other.rawchildren def description(self): return ('name', 'ref',) class Simple(SchemaObject): """ Represents an (xsd) schema <xs:simpleType/> node """ def childtags(self): return ('restriction', 'any', 'list',) def enum(self): for child, ancestry in self.children(): if isinstance(child, Enumeration): return True return False def mixed(self): return len(self) def description(self): return ('name',) def extension(self): for c in self.rawchildren: if c.extension(): return True return False def restriction(self): for c in self.rawchildren: if c.restriction(): return True return False class List(SchemaObject): """ Represents an (xsd) schema <xs:list/> node """ def childtags(self): return () def description(self): return ('name',) def xslist(self): return True class Restriction(SchemaObject): """ Represents an (xsd) schema <xs:restriction/> node """ def __init__(self, schema, root): SchemaObject.__init__(self, schema, root) self.ref = root.get('base') def childtags(self): return ('enumeration', 'attribute', 'attributeGroup') def dependencies(self): deps = [] midx = None if self.ref is not None: query = TypeQuery(self.ref) super = query.execute(self.schema) if super is None: log.debug(self.schema) raise TypeNotFound(self.ref) if not super.builtin(): deps.append(super) midx = 0 return (midx, deps) def restriction(self): return True def merge(self, other): SchemaObject.merge(self, other) filter = Filter(False, self.rawchildren) self.prepend(self.rawchildren, other.rawchildren, filter) def description(self): return ('ref',) class Collection(SchemaObject): """ Represents an (xsd) schema collection node: - sequence - choice - all """ def childtags(self): return ('element', 'sequence', 'all', 'choice', 'any', 'group') class Sequence(Collection): """ Represents an (xsd) schema <xs:sequence/> node. """ def sequence(self): return True class All(Collection): """ Represents an (xsd) schema <xs:all/> node. """ def all(self): return True class Choice(Collection): """ Represents an (xsd) schema <xs:choice/> node. """ def choice(self): return True class ComplexContent(SchemaObject): """ Represents an (xsd) schema <xs:complexContent/> node. """ def childtags(self): return ('attribute', 'attributeGroup', 'extension', 'restriction') def extension(self): for c in self.rawchildren: if c.extension(): return True return False def restriction(self): for c in self.rawchildren: if c.restriction(): return True return False class SimpleContent(SchemaObject): """ Represents an (xsd) schema <xs:simpleContent/> node. """ def childtags(self): return ('extension', 'restriction') def extension(self): for c in self.rawchildren: if c.extension(): return True return False def restriction(self): for c in self.rawchildren: if c.restriction(): return True return False def mixed(self): return len(self) class Enumeration(Content): """ Represents an (xsd) schema <xs:enumeration/> node """ def __init__(self, schema, root): Content.__init__(self, schema, root) self.name = root.get('value') def enum(self): return True class Element(TypedContent): """ Represents an (xsd) schema <xs:element/> node. """ def __init__(self, schema, root): TypedContent.__init__(self, schema, root) a = root.get('form') if a is not None: self.form_qualified = a == 'qualified' a = self.root.get('nillable') if a is not None: self.nillable = a in ('1', 'true') self.implany() def implany(self): """ Set the type as any when implicit. An implicit <xs:any/> is when an element has not body and no type defined. @return: self @rtype: L{Element} """ if self.type is None and self.ref is None and self.root.isempty(): self.type = self.anytype() return self def childtags(self): return ('attribute', 'simpleType', 'complexType', 'any',) def extension(self): for c in self.rawchildren: if c.extension(): return True return False def restriction(self): for c in self.rawchildren: if c.restriction(): return True return False def dependencies(self): deps = [] midx = None if self.ref is not None: query = ElementQuery(self.ref) e = query.execute(self.schema) if e is None: log.debug(self.schema) raise TypeNotFound(self.ref) deps.append(e) midx = 0 return (midx, deps) def merge(self, other): SchemaObject.merge(self, other) self.rawchildren = other.rawchildren def description(self): return ('name', 'ref', 'type') def anytype(self): """ create an xsd:anyType reference """ p, u = Namespace.xsdns mp = self.root.findPrefix(u) if mp is None: mp = p self.root.addPrefix(p, u) return ':'.join((mp, 'anyType')) class Extension(SchemaObject): """ Represents an (xsd) schema <xs:extension/> node. """ def __init__(self, schema, root): SchemaObject.__init__(self, schema, root) self.ref = root.get('base') def childtags(self): return ('attribute', 'attributeGroup', 'sequence', 'all', 'choice', 'group') def dependencies(self): deps = [] midx = None if self.ref is not None: query = TypeQuery(self.ref) super = query.execute(self.schema) if super is None: log.debug(self.schema) raise TypeNotFound(self.ref) if not super.builtin(): deps.append(super) midx = 0 return (midx, deps) def merge(self, other): SchemaObject.merge(self, other) filter = Filter(False, self.rawchildren) self.prepend(self.rawchildren, other.rawchildren, filter) def extension(self): return self.ref is not None def description(self): return ('ref',) class Import(SchemaObject): """ Represents an (xsd) schema <xs:import/> node @cvar locations: A dictionary of namespace locations. @type locations: dict @ivar ns: The imported namespace. @type ns: str @ivar location: The (optional) location. @type location: namespace-uri @ivar opened: Opened and I{imported} flag. @type opened: boolean """ locations = {} @classmethod def bind(cls, ns, location=None): """ Bind a namespace to a schema location (URI). This is used for imports that don't specify a schemaLocation. @param ns: A namespace-uri. @type ns: str @param location: The (optional) schema location for the namespace. (default=ns). @type location: str """ if location is None: location = ns cls.locations[ns] = location def __init__(self, schema, root): SchemaObject.__init__(self, schema, root) self.ns = (None, root.get('namespace')) self.location = root.get('schemaLocation') if self.location is None: self.location =
1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0], [1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0], [1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1], [0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0], [1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1], [1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0], [1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1], [1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1], [0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0], [1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0], [0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1], [1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1,
"""Contains eventful dict and list implementations.""" # void function used as a callback placeholder. def _void(*p, **k): return None class EventfulDict(dict): """Eventful dictionary. This class inherits from the Python intrinsic dictionary class, dict. It adds events to the get, set, and del actions and optionally allows you to intercept and cancel these actions. The eventfulness isn't recursive. In other words, if you add a dict as a child, the events of that dict won't be listened to. If you find you need something recursive, listen to the `add` and `set` methods, and then cancel `dict` values from being set, and instead set EventfulDicts that wrap those dicts. Then you can wire the events to the same handlers if necessary. See the on_events, on_add, on_set, and on_del methods for registering event handlers.""" def __init__(self, *args, **kwargs): """Public constructor""" self._add_callback = _void self._del_callback = _void self._set_callback = _void dict.__init__(self, *args, **kwargs) def on_events(self, add_callback=None, set_callback=None, del_callback=None): """Register callbacks for add, set, and del actions. See the doctstrings for on_(add/set/del) for details about each callback. add_callback: [callback = None] set_callback: [callback = None] del_callback: [callback = None]""" self.on_add(add_callback) self.on_set(set_callback) self.on_del(del_callback) def on_add(self, callback): """Register a callback for when an item is added to the dict. Allows the listener to detect when items are added to the dictionary and optionally cancel the addition. callback: callable or None If you want to ignore the addition event, pass None as the callback. The callback should have a signature of callback(key, value). The callback should return a boolean True if the additon should be canceled, False or None otherwise.""" self._add_callback = callback if callable(callback) else _void def on_del(self, callback): """Register a callback for when an item is deleted from the dict. Allows the listener to detect when items are deleted from the dictionary and optionally cancel the deletion. callback: callable or None If you want to ignore the deletion event, pass None as the callback. The callback should have a signature of callback(key). The callback should return a boolean True if the deletion should be canceled, False or None otherwise.""" self._del_callback = callback if callable(callback) else _void def on_set(self, callback): """Register a callback for when an item is changed in the dict. Allows the listener to detect when items are changed in the dictionary and optionally cancel the change. callback: callable or None If you want to ignore the change event, pass None as the callback. The callback should have a signature of callback(key, value). The callback should return a boolean True if the change should be canceled, False or None otherwise.""" self._set_callback = callback if callable(callback) else _void def pop(self, key): """Returns the value of an item in the dictionary and then deletes the item from the dictionary.""" if self._can_del(key): return dict.pop(self, key) else: raise Exception('Cannot `pop`, deletion of key "{}" failed.'.format(key)) def popitem(self): """Pop the next key/value pair from the dictionary.""" key = next(iter(self)) return key, self.pop(key) def update(self, other_dict): """Copy the key/value pairs from another dictionary into this dictionary, overwriting any conflicting keys in this dictionary.""" for (key, value) in other_dict.items(): self[key] = value def clear(self): """Clear the dictionary.""" for key in list(self.keys()): del self[key] def __setitem__(self, key, value): if (key in self and self._can_set(key, value)) or \ (key not in self and self._can_add(key, value)): return dict.__setitem__(self, key, value) def __delitem__(self, key): if self._can_del(key): return dict.__delitem__(self, key) def _can_add(self, key, value): """Check if the item can be added to the dict.""" return not bool(self._add_callback(key, value)) def _can_del(self, key): """Check if the item can be deleted from the dict.""" return not bool(self._del_callback(key)) def _can_set(self, key, value): """Check if the item can be changed in the dict.""" return not bool(self._set_callback(key, value)) class EventfulList(list): """Eventful list. This class inherits from the Python intrinsic `list` class. It adds events that allow you to listen for actions that modify the list. You can optionally cancel the actions. See the on_del, on_set, on_insert, on_sort, and on_reverse methods for registering an event handler. Some of the method docstrings were taken from the Python documentation at https://docs.python.org/2/tutorial/datastructures.html""" def __init__(self, *pargs, **kwargs): """Public constructor""" self._insert_callback = _void self._set_callback = _void self._del_callback = _void self._sort_callback = _void self._reverse_callback = _void list.__init__(self, *pargs, **kwargs) def on_events(self, insert_callback=None, set_callback=None, del_callback=None, reverse_callback=None, sort_callback=None): """Register callbacks for add, set, and del actions. See the doctstrings for on_(insert/set/del/reverse/sort) for details about each callback. insert_callback: [callback = None] set_callback: [callback = None] del_callback: [callback = None] reverse_callback: [callback = None] sort_callback: [callback = None]""" self.on_insert(insert_callback) self.on_set(set_callback) self.on_del(del_callback) self.on_reverse(reverse_callback) self.on_sort(sort_callback) def on_insert(self, callback): """Register a callback for when an item is inserted into the list. Allows the listener to detect when items are inserted into the list and optionally cancel the insertion. callback: callable or None If you want to ignore the insertion event, pass None as the callback. The callback should have a signature of callback(index, value). The callback should return a boolean True if the insertion should be canceled, False or None otherwise.""" self._insert_callback = callback if callable(callback) else _void def on_del(self, callback): """Register a callback for item deletion. Allows the listener to detect when items are deleted from the list and optionally cancel the deletion. callback: callable or None If you want to ignore the deletion event, pass None as the callback. The callback should have a signature of callback(index). The callback should return a boolean True if the deletion should be canceled, False or None otherwise.""" self._del_callback = callback if callable(callback) else _void def on_set(self, callback): """Register a callback for items are set. Allows the listener to detect when items are set and optionally cancel the setting. Note, `set` is also called when one or more items are added to the end of the list. callback: callable or None If you want to ignore the set event, pass None as the callback. The callback should have a signature of callback(index, value). The callback should return a boolean True if the set should be canceled, False or None otherwise.""" self._set_callback = callback if callable(callback) else _void def on_reverse(self, callback): """Register a callback for list reversal. callback: callable or None If you want to ignore the reverse event, pass None as the callback. The callback should have a signature of callback(). The callback should return a boolean True if the reverse should be canceled, False or None otherwise.""" self._reverse_callback = callback if callable(callback) else _void def on_sort(self, callback): """Register a callback for sortting of the list. callback: callable or None If you want to ignore the sort event, pass None as the callback. The callback signature should match that of Python list's `.sort` method or `callback(*pargs, **kwargs)` as a catch all. The callback should return a boolean True if the reverse should be canceled, False or None otherwise.""" self._sort_callback = callback if callable(callback) else _void def append(self, x): """Add an item to the end of the list.""" self[len(self):] = [x] def extend(self, L): """Extend the list by appending all the items in the given list.""" self[len(self):] = L def remove(self, x): """Remove the first item from the list whose value is x. It is an error if there is no such item.""" del self[self.index(x)] def pop(self, i=None): """Remove the item at the given position in the list, and return it. If no index is specified, a.pop() removes and returns the last item in the list.""" if i is None: i = len(self) - 1 val = self[i] del self[i] return val def reverse(self): """Reverse the elements of the list, in place.""" if self._can_reverse(): list.reverse(self) def insert(self, index, value): """Insert an item at a given position. The first argument is the index of the element before which to insert,
import os, sys import re CONFIG_BAK_PATH = ".important.bak" AOS_MAKEFILE = "aos.mk" COMPONENT_KEYWORD = "KEYWORD: COMPONENT NAME IS " def find_comp_mkfile(dirname): """ Find component makefile (aos.mk) from dirname and its subdirectory, exclude out, build, publish folder """ mklist = [] for root, dirs, files in os.walk(dirname): tmp = (root + '/').replace("\\", "/") if '/out/' in tmp or '/build/' in tmp or '/publish/' in tmp: continue if 'aos.mk' in files: mklist += ["%s/aos.mk" % root] continue return mklist def get_comp_name(mkfile): """ Get comp name from mkfile by searching lines started with NAME """ name = None patten = re.compile(r'^NAME.*=\s*(.*)\s*') with open(mkfile, 'r') as f: for line in f.readlines(): match = patten.match(line) if match: name = match.group(1) return name def update_comp_optional_deps(comp_names, comp_cond, mandatory_deps, optional_deps): """ put the component and condition into optional_deps, if it is not in the mandatory_deps, subroutine for process $(NAME)_COMPONENTS-$(comp_cond) += compa compb compc, comp_names is the list of [compa compb compc] """ for comp in comp_names: if comp not in mandatory_deps: existed = False if len(optional_deps) > 0: for dep in optional_deps: if comp == dep["comp_name"]: existed = True if comp_cond[0] not in dep["condition"]: dep["condition"].append(comp_cond[0]) if not existed: dep_info = {} dep_info["comp_name"] = comp dep_info["condition"] = comp_cond optional_deps.append(dep_info) def get_comp_deps(mkfile): """ Get component's mandatory and optional dependencies from aos.mk by searching $(NAME)_COMPONENTS and $(NAME)_COMPONENTS-$( """ abs_mkfile = os.path.abspath(mkfile) mandatory_deps = [] optional_deps = [] host_mcu_family = "" p1 = re.compile(r'^HOST_MCU_FAMILY.*=\s*(.*)\s*') p2 = re.compile(r'^\$\(NAME\)_COMPONENTS.*=\s*(.*)\s*') p3 = re.compile(r'^\$\(NAME\)_COMPONENTS-\$\((.*)\)') with open(mkfile, "r") as f: for line in f: line = line.strip() if not line or "#" in line: continue while line.endswith('\\'): line = line[:-1] + next(f).rstrip('\n') if line.startswith("HOST_MCU_FAMILY"): match = p1.match(line) if match: host_mcu_family = match.group(1) if line.startswith("$(NAME)_COMPONENTS") and not line.startswith("$(NAME)_COMPONENTS-n"): match = p2.match(line) if match: orig_comp_names = match.group(1).split() tmp = " ".join(orig_comp_names) if host_mcu_family: tmp = tmp.replace("$(HOST_MCU_FAMILY)", host_mcu_family) comp_names = tmp.split() if line.startswith("$(NAME)_COMPONENTS-$("): match = p3.match(line) if match: # only one condition comp_cond = [match.group(1)] update_comp_optional_deps(comp_names, comp_cond, mandatory_deps, optional_deps) else: mandatory_deps += comp_names mandatory_deps = list(set(mandatory_deps)) return mandatory_deps, optional_deps def get_comp_mandatory_depends(comp_info, comps): """ Get comp mandatory depends from comp index """ depends = [] for comp in comps: if comp in comp_info: depends += comp_info[comp]["dependencies"] # print("add mandatory depend:", comp_info[comp]["dependencies"], "for", comp) if depends: depends += get_comp_mandatory_depends(comp_info, depends) return list(set(depends)) def get_comp_optional_depends_r(comp_info, comps, mandatory_comps): """ Get comp optional depends recursively from comp index """ depends = [] """ comps are optional dependency list from last layer """ for comp in comps: # print("comp name is:", comp["comp_name"]) if comp["comp_name"] not in comp_info: continue """ get mandatory dependency list for this optional component """ for dep_info in comp_info[comp["comp_name"]]["dependencies"]: if dep_info not in mandatory_comps: """ add to the list with the inherrited dependency""" tmp = {} tmp["comp_name"] = dep_info tmp["condition"] = comp["condition"] depends.append(tmp) # print("add mandatory depend r:", tmp, "for", comp["comp_name"]) """ get optional dependency list for this optional component """ for dep_info in comp_info[comp["comp_name"]]["optional_dependencies"]: if dep_info["comp_name"] not in mandatory_comps: """ add to the list with (the inherrited dependency && this condition) """ tmp = {} tmp["comp_name"] = dep_info["comp_name"] tmp["condition"] = comp["condition"] + ["and"] + dep_info["condition"] depends.append(tmp) # print("add optional depend r:", tmp, "for", comp["comp_name"]) if depends: depends += get_comp_optional_depends_r(comp_info, depends, mandatory_comps) return depends def merge_comp_optional_depends(optional_deps): """ merge the condition for the dependency of same component name """ merge_depends = [] if optional_deps: optional_deps.sort(key=lambda x: x["comp_name"]) last_dep = "" for dep in optional_deps: # print("optional dependency is", dep) if dep["comp_name"] != last_dep: """ new deps """ tmp = {} tmp["comp_name"] = dep["comp_name"] tmp["condition"] = [] tmp["condition"].append(dep["condition"]) last_dep = dep["comp_name"] merge_depends.append(tmp) else: """ deps with the prio one """ duplicated = False for cond in merge_depends[-1]["condition"]: if cond == dep["condition"]: duplicated = True break if not duplicated: merge_depends[-1]["condition"].append(dep["condition"]) return merge_depends def get_comp_optional_depends(comp_info, comps): """ Get comp optional depends from comp index """ depends = [] """ comps are mandatory components got by get_comp_mandatory_depends, here is to find all optional dependencies for comp""" for comp in comps: if comp in comp_info: for dep_info in comp_info[comp]["optional_dependencies"]: """ if optional dependency(dep_info) is in mandatory components, ignore it """ if dep_info["comp_name"] not in comps: depends.append(dep_info) # print("add depend:", dep_info, "for", comp) merge_depends = [] if depends: depends += get_comp_optional_depends_r(comp_info, depends, comps) merge_depends = merge_comp_optional_depends(depends) # for dep in merge_depends: # print("dep is", dep) return merge_depends def get_comp_optional_depends_text(conditions_list, config_file): """ format optional Config.in string, like if (((cond1 || cond2) && cond3) || (cond4 || cond5)) source $AOS_SDK_PATH/core/cli/Config.in endif condition_list is [[cond1, cond2, and cond3], [cond4, cond5]] config_file is filename of Config.in """ line = "if (" conds_line = "" for conds in conditions_list: conds_line += "(" cond_line = "" for cond in conds: if cond == "and": cond_line = "(" + cond_line[:-4] + ") && " else: cond_line += "%s || " % cond conds_line += cond_line[:-4] conds_line += ") || " conds_line = conds_line[:-4] line += conds_line line += ")\n" + 'source "$AOS_SDK_PATH/%s"\n' % config_file + "endif\n" return line, conds_line def find_config_in_file(app_config_in): """find Config.in files in application's Config.in """ config_in_list = [] if not os.path.isfile(app_config_in): return config_in_list aos_sdk_path = os.environ["AOS_SDK_PATH"] user_app_path = os.path.dirname(app_config_in) pattern = re.compile(r'source \"\$(AOS_SDK_PATH|USER_APP_PATH)\/([\w\-\.\/]+)\"') with open (app_config_in, 'r') as f: for line in f.readlines(): line = line.strip() if line.startswith("source"): match = pattern.match(line) if match: if "AOS_SDK_PATH" == match.group(1): config_in_list.append(os.path.join(aos_sdk_path, match.group(2))) elif "USER_APP_PATH" == match.group(1): config_in_list.append(os.path.join(user_app_path, match.group(2))) config_in_list.append(app_config_in) return config_in_list def get_comp_name_from_configin(config_in_list): """ read aos.mk file in the same directory of Config.in, and get component name from aos.mk """ comp_list = [] for config_in_file in config_in_list: dirname = os.path.dirname(config_in_file) mkfile = os.path.join(dirname, "aos.mk") if(os.path.isfile(mkfile)): comp = {} comp["comp_name"] = get_comp_name(mkfile) comp["config_file"] = config_in_file comp_list.append(comp) return comp_list def from_y_n_to_0_1(from_y_n, type): if type == "bool": if from_y_n == "y": to_0_1 = "1" else: to_0_1 = "0" elif type == "tristate": if from_y_n == "m": to_0_1 = "1" elif from_y_n == "y": to_0_1 = "2" else: to_0_1 = "0" else: to_0_1 = from_y_n return to_0_1 def from_0_1_to_y_n(from_0_1, type): if type == "bool": if from_0_1 != "0": to_y_n = "y" else: to_y_n = "n" elif type == "tristate": if from_0_1 == "1": to_y_n = "m" elif from_0_1 == "2": to_y_n = "y" else: to_y_n = "n" else: to_y_n = from_0_1 return to_y_n def parse_block_of_configin(lines): """ parse a block of Config.in file to get macro name, type, value """ p1 = re.compile(r"(config|menuconfig)\s+(\w*)$") p2 = re.compile(r"(bool|int|string|hex|tristate)(\s+\"(.*)\")?") p3 = re.compile(r"default\s+(\w*)") p4 = re.compile(r"default\s+\"(.*)\"") new_macro = {} depends_on = "" for line in lines: if line.startswith("config") or line.startswith("menuconfig"): match = p1.match(line) if match: new_macro["name"] = match.group(2) # add default data type new_macro["type"] = "bool" new_macro["hint"] = "" new_macro["value"] = "" else: return new_macro elif line.startswith("bool") or line.startswith("int") or line.startswith("string") \ or line.startswith("hex") or line.startswith("tristate"): match = p2.match(line) if match: new_macro["type"] = match.group(1) if new_macro["type"] == "string": new_macro["value"] = "\"\"" else: new_macro["value"] = "0" if match.group(3): new_macro["hint"] = match.group(3) else: new_macro["hint"] = "CAN NOT BE MODIFIED" elif line.startswith("default"): if new_macro["type"] == "string": match = p4.match(line) if match: new_macro["value"] = "\"" + match.group(1) + "\"" else: match = p3.match(line) if match: val = from_y_n_to_0_1(match.group(1), new_macro["type"]) new_macro["value"] = val elif line.startswith("depends on"): depends_on = ", " + line if depends_on: new_macro["hint"] += depends_on return new_macro def append_a_block_to_header(fn, lines): macro = parse_block_of_configin(lines) if macro: fn.write("// description:%s\n" % macro["hint"]) fn.write("// #define %s %s // type: %s\n\n" % (macro["name"], macro["value"], macro["type"])) def convert_configin_to_header(config_in_file, comp_name, destdir): """ read Config.in file, and convert to C header file """ if not os.path.isfile(config_in_file): return False if not os.path.isdir(destdir): return False if not comp_name: return False macro_list = [] filename = os.path.join(destdir, "comp_%s.h" % comp_name) fn = open (filename, 'w+') fn.write("//================This is split line================\n") fn.write("// %s %s\n\n" % (COMPONENT_KEYWORD, comp_name)) with open (config_in_file, 'r') as f: lines = [] new_block = False p1 = re.compile(r"if (.*)=\s*(y|n)") for line in f.readlines(): line = line.strip() if line: if line.startswith("if "): if new_block: append_a_block_to_header(fn, lines) new_block = False lines = [] match = p1.match(line) if match: if match.group(2) == "y": fn.write("// #if " + match.group(1) + "= 1\n\n") else: fn.write("// #if " + match.group(1) + "=
# # For licensing see accompanying LICENSE file. # Copyright (C) 2020 Apple Inc. All rights reserved. # '''Train CIFAR10 with PyTorch.''' # import os # os.environ['CUDA_LAUNCH_BLOCKING'] = "1" # pip install pytorch-warmup # Num epochs=600, lr scheduler after every 100 epochs # CUDA_VISIBLE_DEVICES=0 python3 main1.py --dataset CIFAR100 --model GlobalLinformer --config ./config_linformer/Global/CIFAR100/Global_resnet_backbone_CIFAR100_capsdim256.json --seed 0 --train_bs 32 --accumulation_steps 4 # resnet_backbone_FashionMNIST_capsdim64v3 # THIS IS THE MAIN FILE FOR CIFAR100-CIFAR10 experiments import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import pytorch_warmup as warmup # Mixup augmentation import numpy as np from torch.autograd import Variable # import torchvision import torchvision.transforms as transforms import os import argparse from src import capsule_model_invert as capsule_model from utils import progress_bar import pickle import json from datetime import datetime from utils import seed_torch from get_dataset import get_dataset # + parser = argparse.ArgumentParser(description='Training Capsules using Inverted Dot-Product Attention Routing') parser.add_argument('--resume_dir', '-r', default='', type=str, help='dir where we resume from checkpoint') parser.add_argument('--num_routing', default=2, type=int, help='number of routing. Recommended: 0,1,2,3.') parser.add_argument('--dataset', default='CIFAR100', type=str, help='dataset. CIFAR10,CIFAR100 or MNIST') parser.add_argument('--backbone', default='resnet', type=str, help='type of backbone. simple or resnet') parser.add_argument('--num_workers', default=2, type=int, help='number of workers. 0 or 2') parser.add_argument('--config_path', default='./configs/resnet_backbone_CIFAR100_capsdim1024.json', type=str, help='path of the config') parser.add_argument('--debug', action='store_true', help='use debug mode (without saving to a directory)') parser.add_argument('--sequential_routing', action='store_true', help='not using concurrent_routing') parser.add_argument('--kernel_transformation', action='store_true', help='tranform each 3*3 to 4 tranformation with local linformer') parser.add_argument('--multi_transforms', action='store_true', help='tranform 288->128 using this number of matrices ( say 4, then 4 matrices to 32 dimension and then concatenate before attention') parser.add_argument('--train_bs', default=64, type=int, help='Batch Size for train') parser.add_argument('--mixup', default=False, type=bool, help='Mixup Augmentation') parser.add_argument('--mixup_alpha', default=1, type=int, help='mixup interpolation coefficient (default: 1)') parser.add_argument('--test_bs', default=100, type=int, help='Batch Size for test') parser.add_argument('--seed', default=0, type=int, help='Random seed value') parser.add_argument('--accumulation_steps', default=2, type=float, help='Number of gradient accumulation steps') parser.add_argument('--lr', default=0.1, type=float, help='learning rate: 0.1 for SGD') parser.add_argument('--gamma', default=0.1, type=float, help='learning rate decay: 0.1') parser.add_argument('--dp', default=0.0, type=float, help='dropout rate') parser.add_argument('--weight_decay', default=5e-4, type=float, help='weight decay') parser.add_argument('--total_epochs', default=350, type=int, help='Total epochs for training') parser.add_argument('--model', default='sinkhorn', type=str, help='default or sinkhorn or bilinear') parser.add_argument('--optimizer', default='SGD', type=str, help='SGD or Adams') parser.add_argument('--lr_decay', default='MultiStep150', type=str, help='SGD or Adams') parser.add_argument('--warmup', action='store_true', help='Use warmup?') # parser.add_argument('--save_dir', default='CIFAR10', type=str, help='dir to save results') # - args = parser.parse_args() assert args.num_routing > 0 if 'Linformer' in args.model: assert ('config_linformer' in args.config_path), "Wrong configuration file, choose linformer configs" if 'Local' in args.model: assert ('Local' in args.config_path), "Local linformer model, but wrong config file" if 'Global' in args.model: assert ('Global' in args.config_path), "Global linformer model, but wrong config file" accumulation_steps=args.accumulation_steps seed_torch(args.seed) device = 'cuda' if torch.cuda.is_available() else 'cpu' best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch # Data print('==> Preparing data..') assert args.dataset == 'CIFAR10' or args.dataset == 'CIFAR100' or args.dataset == 'MNIST' or args.dataset == 'MultiMNIST' or args.dataset == 'ExpandedMNIST' or args.dataset == 'AffNIST' or args.dataset=="Expanded_AffNISTv2" trainset, testset, num_class, image_dim_size = get_dataset(args.dataset, args.seed) trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_bs, shuffle=True, num_workers=args.num_workers) testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_bs, shuffle=False, num_workers=args.num_workers) print("Training dataset: ", len(trainloader)," Validation dataset: " , len(testloader)) print('==> Building model..') # Model parameters with open(args.config_path, 'rb') as file: params = json.load(file) if args.model=='default': net = capsule_model.CapsModel(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, sequential_routing=args.sequential_routing, seed = args.seed) elif args.model=='sinkhorn': net = capsule_model.CapsSAModel(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, sequential_routing=args.sequential_routing, seed = args.seed) elif args.model=='BilinearRandomInit': net = capsule_model.CapsRandomInitBAModel(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, sequential_routing=args.sequential_routing, seed = args.seed) elif args.model=='bilinear': net = capsule_model.CapsBAModel(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, sequential_routing=args.sequential_routing, seed = args.seed) elif args.model=='HintonDynamic': print("Using Sara Sabour's Dynamic Routing") assert args.sequential_routing == True net = capsule_model.CapsDRModel(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, sequential_routing=args.sequential_routing, seed = args.seed) elif args.model=='DynamicBilinear': assert args.sequential_routing == True net = capsule_model.CapsDBAModel(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, sequential_routing=args.sequential_routing, seed = args.seed) elif args.model=='MultiHeadBilinear': net = capsule_model.CapsMultiHeadBAModel(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, multi_transforms = args.multi_transforms, sequential_routing=args.sequential_routing, seed = args.seed) if args.model=='LocalLinformer': net = capsule_model.CapsBilinearLocalLinformer(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, multi_transforms = args.multi_transforms, kernel_transformation = args.kernel_transformation, sequential_routing=args.sequential_routing, seed = args.seed) if args.model=='MultiHeadLocalLinformer': net = capsule_model.CapsMultiHeadBilinearLocalLinformer(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, kernel_transformation = args.kernel_transformation, sequential_routing=args.sequential_routing, seed = args.seed) if args.model=='GlobalLinformer': net = capsule_model.CapsBilinearGlobalLinformerModel(image_dim_size, params, args.dataset, args.backbone, args.dp, args.num_routing, sequential_routing=args.sequential_routing, seed = args.seed) elif args.model=='resnet18': net = torchvision.models.resnet18(pretrained=True) num_ftrs = net.fc.in_features net.fc = nn.Linear(num_ftrs, num_class) # + if(args.optimizer=="SGD"): optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) else: print("Changed optimizer to Adams, Learning Rate 0.001") optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-07, weight_decay=0, amsgrad=False) if args.lr_decay == 'MultiStep150': lr_scheduler_name = "MultiStepLR_150_250" lr_decay = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[150, 250, 350], gamma=0.1) if args.warmup: warmup_scheduler = warmup.LinearWarmup(optimizer,warmup_period=1) warmup_scheduler.last_step = -1 # if 'NIST' in args.dataset and args.optimizer !="SGD": # print("Setting LR Decay for Adams on MNIST...") # gamma = 0.1 # lr_scheduler_name = "Exponential_" + str(gamma) # lr_decay = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma = gamma) # elif 'NIST' in args.dataset and args.optimizer =="SGD": # print("Setting LR Decay for SGD on MNIST...") # gamma = 0.1 # step_size = 5 # lr_scheduler_name = "StepLR_steps_"+ str(step_size) + "_gamma_" + str(gamma) # lr_decay = torch.optim.lr_scheduler.StepLR(optimizer=optimizer , step_size=5, gamma = gamma) # - def count_parameters(model): ssum=0 for name, param in model.named_parameters(): # if param.requires_grad: if param.requires_grad and 'capsule_layer' in name: # .numel() returns total number of elements print(name, param.numel()) ssum += param.numel() print('Caps sum ', ssum) return sum(p.numel() for p in model.parameters() if p.requires_grad) # print(net) total_params = count_parameters(net) print("Total model parameters: ",total_params) # Get configuration info capsdim = args.config_path.split('capsdim')[1].split(".")[0] if 'capsdim' in args.config_path else 'normal' print(capsdim) save_dir_name = 'Invert_model_' + str(args.model)+ '_dataset_' + str(args.dataset) + '_batch_' +str(args.train_bs)+'_acc_'+str(args.accumulation_steps) + '_optimizer_' +str(args.optimizer) +'_scheduler_' + lr_scheduler_name +'_num_routing_' + str(args.num_routing) + '_backbone_' + args.backbone + '_config_'+capsdim + '_sequential_'+str(args.sequential_routing) + '_alpha_' +str(args.mixup_alpha) + '_mixup_'+str(args.mixup)+'_warmup_'+str(args.warmup)+ '_KernelTransform_' + str(args.kernel_transformation)+ '_MultiTransforms_'+ str(args.multi_transforms)+'_seed_'+str(args.seed) print(save_dir_name) if 'Linformer' in args.model: print("Linformer directory it is") if not os.path.isdir('results/Linformer/'+args.dataset + '/CapsDim' + str(capsdim)) and not args.debug: os.makedirs('results/Linformer/'+args.dataset + '/CapsDim' + str(capsdim)) store_dir = os.path.join('results/Linformer/'+args.dataset + '/CapsDim' + str(capsdim), save_dir_name) if not os.path.isdir(store_dir) : os.mkdir(store_dir) else: if not os.path.isdir('results/'+args.dataset + '/CapsDim' + str(capsdim)) and not args.debug: os.makedirs('results/'+args.dataset + '/CapsDim' + str(capsdim)) store_dir = os.path.join('results/'+args.dataset + '/CapsDim' + str(capsdim), save_dir_name) if not os.path.isdir(store_dir) : os.mkdir(store_dir) net = net.to(device) if device == 'cuda': use_cuda = True net = torch.nn.DataParallel(net) cudnn.benchmark = True loss_func = nn.CrossEntropyLoss() if args.resume_dir and not args.debug: # Load checkpoint. print('==> Resuming from checkpoint..') checkpoint = torch.load(os.path.join(args.resume_dir, 'ckpt.pth')) net.load_state_dict(checkpoint['net']) optimizer.load_state_dict(checkpoint['optimizer']) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] # Mixup Augmentation def mixup_data(x, y, alpha=1.0, use_cuda=True): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if use_cuda: index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam def mixup_criterion(criterion, pred, y_a, y_b, lam): return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) # Training def train(epoch): global accumulation_steps if(accumulation_steps!=1): print("TRAINING WITH GRADIENT ACCUMULATION") if args.mixup == True: print("Training with Mixup Augmentation") print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 optimizer.zero_grad() for batch_idx, (inputs, targets) in enumerate(trainloader): inputs = inputs.to(device) targets = targets.to(device) # Mixup augmentation based Training if args.mixup == True: inputs, targets_a, targets_b, lam = mixup_data(inputs, targets, args.mixup_alpha, use_cuda) inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b)) outputs = net(inputs) loss = mixup_criterion(loss_func, outputs, targets_a, targets_b, lam) _, predicted = torch.max(outputs.data, 1) correct += (lam * predicted.eq(targets_a.data).cpu().sum().float() + (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float()) else: v = net(inputs) loss = loss_func(v, targets) _, predicted = v.max(dim=1) correct += predicted.eq(targets).sum().item() loss = loss / accumulation_steps loss.backward() if (batch_idx+1) % accumulation_steps == 0: # print("Performed Gradient update") optimizer.step() optimizer.zero_grad() # optimizer.step() train_loss += loss.item() total += targets.size(0) progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) return 100.*correct/total def train_withoutgradacc(epoch): print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 import time for batch_idx, (inputs, targets) in enumerate(trainloader): inputs = inputs.to(device) targets = targets.to(device) optimizer.zero_grad() time1=time.time() v = net(inputs) time2=time.time() print(time2-time1) loss = loss_func(v, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = v.max(dim=1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) return 100.*correct/total def test(epoch): global best_acc net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs = inputs.to(device) targets = targets.to(device) v = net(inputs) loss = loss_func(v, targets) test_loss += loss.item() _, predicted = v.max(dim=1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Save checkpoint. acc = 100.*correct/total
<filename>pbd/system_enums.py<gh_stars>1-10 class EnumInfo(object): def __init__(self, name, value, m_6): self.name = name self.value = value self.m_6 = m_6 def __str__(self): pass enum_main = dict() e_4000 = dict() e_4000[0] = EnumInfo('defaultrole', 0, 0) e_4000[1] = EnumInfo('titlebarrole', 1, 0) e_4000[2] = EnumInfo('menubarrole', 2, 0) e_4000[3] = EnumInfo('scrollbarrole', 3, 0) e_4000[4] = EnumInfo('griprole', 4, 0) e_4000[5] = EnumInfo('soundrole', 5, 0) e_4000[6] = EnumInfo('cursorrole', 6, 0) e_4000[7] = EnumInfo('caretrole', 7, 0) e_4000[8] = EnumInfo('alertrole', 8, 0) e_4000[9] = EnumInfo('windowrole', 9, 0) e_4000[10] = EnumInfo('clientrole', 10, 0) e_4000[11] = EnumInfo('menupopuprole', 11, 0) e_4000[12] = EnumInfo('menuitemrole', 12, 0) e_4000[13] = EnumInfo('tooltiprole', 13, 0) e_4000[14] = EnumInfo('applicationrole', 14, 0) e_4000[15] = EnumInfo('documentrole', 15, 0) e_4000[16] = EnumInfo('panerole', 16, 0) e_4000[17] = EnumInfo('chartrole', 17, 0) e_4000[18] = EnumInfo('dialogrole', 18, 0) e_4000[19] = EnumInfo('borderrole', 19, 0) e_4000[20] = EnumInfo('groupingrole', 20, 0) e_4000[21] = EnumInfo('separatorrole', 21, 0) e_4000[22] = EnumInfo('toolbarrole', 22, 0) e_4000[23] = EnumInfo('statusbarrole', 23, 0) e_4000[24] = EnumInfo('tablerole', 24, 0) e_4000[25] = EnumInfo('columnheaderrole', 25, 0) e_4000[26] = EnumInfo('rowheaderrole', 26, 0) e_4000[27] = EnumInfo('columnrole', 27, 0) e_4000[28] = EnumInfo('rowrole', 28, 0) e_4000[29] = EnumInfo('cellrole', 29, 0) e_4000[30] = EnumInfo('linkrole', 30, 0) e_4000[31] = EnumInfo('helpballoonrole', 31, 0) e_4000[32] = EnumInfo('characterrole', 32, 0) e_4000[33] = EnumInfo('listrole', 33, 0) e_4000[34] = EnumInfo('listitemrole', 34, 0) e_4000[35] = EnumInfo('outlinerole', 35, 0) e_4000[36] = EnumInfo('outlineitemrole', 36, 0) e_4000[37] = EnumInfo('pagetabrole', 37, 0) e_4000[38] = EnumInfo('propertypagerole', 38, 0) e_4000[39] = EnumInfo('indicatorrole', 39, 0) e_4000[40] = EnumInfo('graphicrole', 40, 0) e_4000[41] = EnumInfo('statictextrole', 41, 0) e_4000[42] = EnumInfo('textrole', 42, 0) e_4000[43] = EnumInfo('pushbuttonrole', 43, 0) e_4000[44] = EnumInfo('checkbuttonrole', 44, 0) e_4000[45] = EnumInfo('radiobuttonrole', 45, 0) e_4000[46] = EnumInfo('comboboxrole', 46, 0) e_4000[47] = EnumInfo('droplistrole', 47, 0) e_4000[48] = EnumInfo('progressbarrole', 48, 0) e_4000[49] = EnumInfo('dialrole', 49, 0) e_4000[50] = EnumInfo('hotkeyfieldrole', 50, 0) e_4000[51] = EnumInfo('sliderrole', 51, 0) e_4000[52] = EnumInfo('spinbuttonrole', 52, 0) e_4000[53] = EnumInfo('diagramrole', 53, 0) e_4000[54] = EnumInfo('animationrole', 54, 0) e_4000[55] = EnumInfo('equationrole', 55, 0) e_4000[56] = EnumInfo('buttondropdownrole', 56, 0) e_4000[57] = EnumInfo('buttonmenurole', 57, 0) e_4000[58] = EnumInfo('buttondropdowngridrole', 58, 0) e_4000[59] = EnumInfo('whitespacerole', 59, 0) e_4000[60] = EnumInfo('pagetablistrole', 60, 0) e_4000[61] = EnumInfo('clockrole', 61, 0) e_4000[62] = EnumInfo('splitbuttonrole', 62, 0) e_4000[63] = EnumInfo('ipaddressrole', 63, 0) e_4000[64] = EnumInfo('outlinebuttonrole', 64, 0) enum_main[0x4000] = e_4000 e_4001 = dict() e_4001[0] = EnumInfo('left', 0, 0) e_4001[1] = EnumInfo('center', 1, 0) e_4001[2] = EnumInfo('right', 2, 0) e_4001[3] = EnumInfo('justify', 3, 0) enum_main[0x4001] = e_4001 e_4002 = dict() e_4002[0] = EnumInfo('cascaded', 0, 0) e_4002[1] = EnumInfo('layered', 1, 0) e_4002[2] = EnumInfo('original', 2, 0) enum_main[0x4002] = e_4002 e_4003 = dict() e_4003[0] = EnumInfo('tile', 0, 0) e_4003[1] = EnumInfo('layer', 1, 0) e_4003[2] = EnumInfo('cascade', 2, 0) e_4003[3] = EnumInfo('icons', 3, 0) e_4003[4] = EnumInfo('tilehorizontal', 4, 0) enum_main[0x4003] = e_4003 e_4004 = dict() e_4004[0] = EnumInfo('detail', 0, 0) e_4004[1] = EnumInfo('header', 1, 0) e_4004[2] = EnumInfo('footer', 2, 0) enum_main[0x4004] = e_4004 e_4005 = dict() e_4005[0] = EnumInfo('noborder', 0, 0) e_4005[1] = EnumInfo('shadowbox', 1, 0) e_4005[2] = EnumInfo('box', 2, 0) e_4005[3] = EnumInfo('resizeborder', 3, 0) e_4005[4] = EnumInfo('underline', 4, 0) e_4005[5] = EnumInfo('lowered', 5, 0) e_4005[6] = EnumInfo('raised', 6, 0) enum_main[0x4005] = e_4005 e_4006 = dict() e_4006[2] = EnumInfo('stylebox', 2, 0) e_4006[6] = EnumInfo('styleraised', 6, 0) e_4006[5] = EnumInfo('stylelowered', 5, 0) e_4006[1] = EnumInfo('styleshadowbox', 1, 0) enum_main[0x4006] = e_4006 e_4007 = dict() e_4007[0] = EnumInfo('ok', 0, 0) e_4007[1] = EnumInfo('okcancel', 1, 0) e_4007[2] = EnumInfo('retrycancel', 2, 0) e_4007[3] = EnumInfo('abortretryignore', 3, 0) e_4007[4] = EnumInfo('yesno', 4, 0) e_4007[5] = EnumInfo('yesnocancel', 5, 0) enum_main[0x4007] = e_4007 e_4008 = dict() e_4008[0] = EnumInfo('charsetansi', 0, 0) e_4008[1] = EnumInfo('charsetunicode', 1, 0) e_4008[2] = EnumInfo('charsetansihebrew', 2, 0) e_4008[3] = EnumInfo('charsetansiarabic', 3, 0) e_4008[4] = EnumInfo('charsetdbcsjapanese', 4, 0) e_4008[5] = EnumInfo('charsetdbcs', 5, 0) enum_main[0x4008] = e_4008 e_4009 = dict() e_4009[1] = EnumInfo('clipformattext', 1, 0) e_4009[2] = EnumInfo('clipformatbitmap', 2, 0) e_4009[3] = EnumInfo('clipformatmetafilepict', 3, 0) e_4009[4] = EnumInfo('clipformatsylk', 4, 0) e_4009[5] = EnumInfo('clipformatdif', 5, 0) e_4009[6] = EnumInfo('clipformattiff', 6, 0) e_4009[7] = EnumInfo('clipformatoemtext', 7, 0) e_4009[8] = EnumInfo('clipformatdib', 8, 0) e_4009[9] = EnumInfo('clipformatpalette', 9, 0) e_4009[10] = EnumInfo('clipformatpendata', 10, 0) e_4009[11] = EnumInfo('clipformatriff', 11, 0) e_4009[12] = EnumInfo('clipformatwave', 12, 0) e_4009[13] = EnumInfo('clipformatunicodetext', 13, 0) e_4009[14] = EnumInfo('clipformatenhmetafile', 14, 0) e_4009[15] = EnumInfo('clipformathdrop', 15, 0) e_4009[16] = EnumInfo('clipformatlocale', 16, 0) enum_main[0x4009] = e_4009 e_400a = dict() e_400a[0] = EnumInfo('noconnectprivilege', 0, 0) e_400a[1] = EnumInfo('connectprivilege', 1, 0) e_400a[2] = EnumInfo('connectwithadminprivilege', 2, 0) enum_main[0x400a] = e_400a e_400b = dict() e_400b[1] = EnumInfo('xpixelstounits', 1, 0) e_400b[2] = EnumInfo('ypixelstounits', 2, 0) e_400b[3] = EnumInfo('xunitstopixels', 3, 0) e_400b[4] = EnumInfo('yunitstopixels', 4, 0) enum_main[0x400b] = e_400b e_400c = dict() e_400c[0] = EnumInfo('i286', 0, 0) e_400c[1] = EnumInfo('i386', 1, 0) e_400c[2] = EnumInfo('i486', 2, 0) e_400c[3] = EnumInfo('pentium', 3, 0) e_400c[4] = EnumInfo('alpha', 4, 0) e_400c[5] = EnumInfo('mips', 5, 0) e_400c[6] = EnumInfo('m68000', 6, 0) e_400c[7] = EnumInfo('m68020', 7, 0) e_400c[8] = EnumInfo('m68030', 8, 0) e_400c[9] = EnumInfo('m68040', 9, 0) e_400c[10] = EnumInfo('powerpc', 10, 0) e_400c[11] = EnumInfo('sparc', 11, 0) e_400c[12] = EnumInfo('hppa', 12, 0) e_400c[13] = EnumInfo('powerrs', 13, 0) e_400c[14] = EnumInfo('ppc601', 14, 0) e_400c[15] = EnumInfo('ppc603', 15, 0) e_400c[16] = EnumInfo('ppc604', 16, 0) e_400c[17] = EnumInfo('ultrasparc', 17, 0) e_400c[18] = EnumInfo('ia64', 18, 0) e_400c[19] = EnumInfo('arm', 19, 0) enum_main[0x400c] = e_400c e_400d = dict() e_400d[0] = EnumInfo('dbnonotification', 0, 0) e_400d[1] = EnumInfo('dbserverdown', 1, 0) e_400d[2] = EnumInfo('dbfailover', 2, 0) e_400d[3] = EnumInfo('dbdatatruncate', 3, 0) enum_main[0x400d] = e_400d e_400e = dict() e_400e[0] = EnumInfo('directionleft', 0, 0) e_400e[1] = EnumInfo('directionup', 1, 0) e_400e[2] = EnumInfo('directionright', 2, 0) e_400e[3] = EnumInfo('directiondown', 3, 0) e_400e[4] = EnumInfo('directionall', 4, 0) enum_main[0x400e] = e_400e e_400f = dict() e_400f[0] = EnumInfo('inkpicautosize', 0, 0) e_400f[1] = EnumInfo('inkpiccentered', 1, 0) e_400f[2] = EnumInfo('inkpicnormal', 2, 0) e_400f[3] = EnumInfo('inkpicstretched', 3, 0) enum_main[0x400f] = e_400f e_4010 = dict() e_4010[0] = EnumInfo('partlydisplayed', 0, 0) e_4010[1] = EnumInfo('entirelydisplayed', 1, 0) enum_main[0x4010] = e_4010 e_4011 = dict() e_4011[0] = EnumInfo('begin', 0, 0) e_4011[1] = EnumInfo('end', 1, 0) e_4011[2] = EnumInfo('cancel', 2, 0) enum_main[0x4011] = e_4011 e_4012 = dict() e_4012[0] = EnumInfo('primary', 0, 0) e_4012[1] = EnumInfo('delete', 1, 0) e_4012[2] = EnumInfo('filter', 2, 0) enum_main[0x4012] = e_4012 e_4013 = dict() e_4013[0] = EnumInfo('failonanyconflict', 0, 0) e_4013[1] = EnumInfo('allowpartialchanges', 1, 0) enum_main[0x4013] = e_4013 e_4014 = dict() e_4014[0] = EnumInfo('notmodified', 0, 0) e_4014[1] = EnumInfo('datamodified', 1, 0) e_4014[2] = EnumInfo('new', 2, 0) e_4014[3] = EnumInfo('newmodified', 3, 0) enum_main[0x4014] = e_4014 e_4015 = dict() e_4015[0] = EnumInfo('encodingansi', 0, 0) e_4015[1] = EnumInfo('encodingutf16le', 1, 0) e_4015[2] = EnumInfo('encodingutf8', 2, 0) e_4015[3] = EnumInfo('encodingutf16be', 3, 0) enum_main[0x4015] = e_4015 e_4016 = dict() e_4016[0] = EnumInfo('exceptionfail', 0, 0) e_4016[1] = EnumInfo('exceptionignore', 1, 0) e_4016[2] = EnumInfo('exceptionretry', 2, 0) e_4016[3] = EnumInfo('exceptionsubstitutereturnvalue', 3, 0) enum_main[0x4016] = e_4016 e_4017 = dict() e_4017[0] = EnumInfo('read', 0, 0) e_4017[1] = EnumInfo('write', 1, 0) enum_main[0x4017] = e_4017 e_4018 = dict() e_4018[0] = EnumInfo('lockreadwrite', 0, 0) e_4018[1] = EnumInfo('lockread', 1, 0) e_4018[2] = EnumInfo('lockwrite', 2, 0) e_4018[3] = EnumInfo('shared', 3, 0) enum_main[0x4018] = e_4018 e_4019 = dict() e_4019[0] = EnumInfo('linemode', 0, 0) e_4019[1] = EnumInfo('streammode', 1, 0) e_4019[2] = EnumInfo('textmode', 2, 0) enum_main[0x4019] = e_4019 e_401a = dict() e_401a[0] = EnumInfo('solid', 0, 0) e_401a[1] = EnumInfo('horizontal', 1, 0) e_401a[2] = EnumInfo('vertical', 2, 0) e_401a[3] = EnumInfo('fdiagonal', 3, 0) e_401a[4] = EnumInfo('bdiagonal', 4, 0) e_401a[5] = EnumInfo('square', 5, 0) e_401a[6] = EnumInfo('diamond', 6, 0) enum_main[0x401a] = e_401a e_401b = dict() e_401b[0] = EnumInfo('filetypetext', 0, 0) e_401b[1] = EnumInfo('filetyperichtext', 1, 0) e_401b[2] = EnumInfo('filetypehtml', 2, 0) e_401b[3] = EnumInfo('filetypedoc', 3, 0) e_401b[4] = EnumInfo('filetypepdf', 4, 0) enum_main[0x401b] = e_401b e_401c = dict() e_401c[0] = EnumInfo('paperportrait', 0, 0) e_401c[1] = EnumInfo('paperlandscape', 1, 0) enum_main[0x401c] = e_401c e_401d = dict() e_401d[0] = EnumInfo('ansi', 0, 0) e_401d[1] = EnumInfo('defaultcharset', 1, 0) e_401d[2] = EnumInfo('symbol', 2, 0) e_401d[128] = EnumInfo('shiftjis', 128, 0) e_401d[129] = EnumInfo('hangeul', 129, 0) e_401d[136] = EnumInfo('chinesebig5', 136, 0) e_401d[255] = EnumInfo('oem', 255, 0) e_401d[134] = EnumInfo('gb2312charset', 134, 0) e_401d[130] = EnumInfo('johabcharset', 130, 0) e_401d[177] = EnumInfo('hebrewcharset', 177, 0) e_401d[178] = EnumInfo('arabiccharset', 178, 0) e_401d[161] = EnumInfo('greekcharset', 161, 0) e_401d[162] = EnumInfo('turkishcharset', 162, 0) e_401d[163] = EnumInfo('vietnamesecharset', 163, 0) e_401d[222] = EnumInfo('thaicharset', 222, 0) e_401d[238] = EnumInfo('easteuropecharset', 238, 0) e_401d[204] = EnumInfo('russiancharset', 204, 0) e_401d[77] = EnumInfo('maccharset', 77, 0) e_401d[186] = EnumInfo('balticcharset', 186, 0) enum_main[0x401d] = e_401d e_401e = dict() e_401e[0] = EnumInfo('anyfont', 0, 0) e_401e[1] = EnumInfo('roman', 1, 0) e_401e[2] = EnumInfo('swiss', 2, 0) e_401e[3] = EnumInfo('modern', 3, 0) e_401e[4] = EnumInfo('script', 4, 0) e_401e[5] = EnumInfo('decorative', 5, 0) enum_main[0x401e] = e_401e e_401f = dict() e_401f[0] = EnumInfo('default', 0, 0) e_401f[1] = EnumInfo('fixed', 1, 0) e_401f[2] = EnumInfo('variable', 2, 0) enum_main[0x401f] = e_401f e_4020 = dict() e_4020[0] = EnumInfo('adtdefault', 0, 0) e_4020[1] = EnumInfo('adttext', 1, 0) e_4020[2] = EnumInfo('adtdouble', 2, 0) e_4020[3] = EnumInfo('adtdatetime', 3, 0) e_4020[4] = EnumInfo('adtdate', 4, 0) e_4020[5] = EnumInfo('adttime', 5, 0) enum_main[0x4020] = e_4020 e_4021 = dict() e_4021[0] = EnumInfo('foreground', 0, 0) e_4021[1] = EnumInfo('background', 1, 0) e_4021[2] = EnumInfo('shade', 2, 0) e_4021[3] = EnumInfo('linecolor', 3, 0) enum_main[0x4021] = e_4021 e_4022 = dict() e_4022[0] = EnumInfo('xvalue', 0, 0) e_4022[1] = EnumInfo('yvalue', 1, 0) enum_main[0x4022] = e_4022 e_4023 = dict() e_4023[0] = EnumInfo('areagraph', 0, 0) e_4023[1] = EnumInfo('bargraph', 1, 0) e_4023[2] = EnumInfo('bar3dgraph', 2, 0) e_4023[3] = EnumInfo('bar3dobjgraph', 3, 0) e_4023[4] = EnumInfo('barstackgraph', 4, 0) e_4023[5] = EnumInfo('barstack3dobjgraph', 5, 0) e_4023[6] = EnumInfo('colgraph', 6, 0) e_4023[7] = EnumInfo('col3dgraph', 7, 0) e_4023[8] = EnumInfo('col3dobjgraph', 8, 0) e_4023[9] = EnumInfo('colstackgraph', 9, 0) e_4023[10] = EnumInfo('colstack3dobjgraph', 10, 0) e_4023[11] = EnumInfo('linegraph', 11, 0) e_4023[12] = EnumInfo('piegraph', 12, 0) e_4023[13] = EnumInfo('scattergraph', 13, 0) e_4023[14] = EnumInfo('pie3d', 14, 0) e_4023[15] = EnumInfo('area3d', 15, 0) e_4023[16] = EnumInfo('line3d', 16, 0) enum_main[0x4023] = e_4023 e_4024 = dict() e_4024[0] = EnumInfo('nolegend', 0, 0) e_4024[1] = EnumInfo('atleft', 1, 0) e_4024[2] = EnumInfo('atright', 2, 0) e_4024[3] = EnumInfo('attop', 3, 0) e_4024[4] = EnumInfo('atbottom', 4, 0) enum_main[0x4024] = e_4024 e_4025 = dict() e_4025[0] = EnumInfo('typegraph', 0, 0) e_4025[1] = EnumInfo('typeseries', 1, 0) e_4025[2] = EnumInfo('typedata', 2, 0) e_4025[3] = EnumInfo('typecategory', 3, 0) e_4025[4] = EnumInfo('typetitle', 4, 0) e_4025[5] = EnumInfo('typeserieslabel', 5, 0) e_4025[6] = EnumInfo('typecategorylabel', 6, 0) e_4025[7] = EnumInfo('typevaluelabel', 7, 0) e_4025[8] = EnumInfo('typelegend', 8, 0) e_4025[9] = EnumInfo('typeseriesaxis', 9, 0) e_4025[10] = EnumInfo('typecategoryaxis', 10, 0) e_4025[11] =
<filename>tests/unittests/test_mock_network_plugin_public_nat.py # Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import unittest from cloudify import exceptions as cfy_exc from tests.unittests import test_mock_base from vcloud_network_plugin import public_nat from vcloud_network_plugin import utils import vcloud_network_plugin import vcloud_plugin_common from IPy import IP class NetworkPluginPublicNatMockTestCase(test_mock_base.TestBase): def test_is_rule_exists(self): rule_inlist = self.generate_nat_rule( 'SNAT', 'external', '22', 'internal', '11', 'TCP' ) # exist self.assertTrue( public_nat._is_rule_exists( [rule_inlist], 'SNAT', 'external', '22', 'internal', '11', 'TCP') ) # not exist self.assertFalse( public_nat._is_rule_exists( [rule_inlist], 'DNAT', 'external', '22', 'internal', '11', 'UDP') ) def test_get_original_port_for_delete(self): # no replacement fake_ctx = self.generate_relation_context_with_current_ctx() fake_ctx._target.instance.runtime_properties = { public_nat.PORT_REPLACEMENT: {}} self.assertEqual( public_nat._get_original_port_for_delete( fake_ctx, "10.1.1.1", "11"), "11" ) # replacement for other fake_ctx = self.generate_relation_context_with_current_ctx() fake_ctx._target.instance.runtime_properties = { public_nat.PORT_REPLACEMENT: { "10.1.1.2:11": '12' } } self.assertEqual( public_nat._get_original_port_for_delete( fake_ctx, "10.1.1.1", "11"), "11" ) # replacement for other fake_ctx = self.generate_relation_context_with_current_ctx() fake_ctx._target.instance.runtime_properties = { public_nat.PORT_REPLACEMENT: { "10.1.1.2:11": '12' } } self.assertEqual( public_nat._get_original_port_for_delete( fake_ctx, "10.1.1.2", "11"), "12" ) def test_get_original_port_for_create(self): gateway = mock.Mock() fake_ctx = self.generate_relation_context_with_current_ctx() rule_inlist = self.generate_nat_rule( 'DNAT', 'external', 'any', 'internal', '11', 'TCP') gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist]) # exeption about same port with self.assertRaises(cfy_exc.NonRecoverableError): public_nat._get_original_port_for_create( fake_ctx, gateway, 'DNAT', 'external', 'any', 'internal', '11', 'TCP' ) # everythiong fine with different port self.assertEqual( public_nat._get_original_port_for_create( fake_ctx, gateway, 'DNAT', 'external', '12', 'internal', '12', 'TCP' ), 12) # relink some port to other # port have not used yet self.assertEqual( public_nat._get_original_port_for_create( fake_ctx, gateway, 'SNAT', 'external', 13, 'internal', '12', 'TCP'), 13) def test_get_original_port_for_create_with_ctx(self): # with replace, but without replace table - up port +1 fake_ctx = self.generate_relation_context_with_current_ctx() fake_ctx._target.instance.runtime_properties = { public_nat.PORT_REPLACEMENT: {} } gateway = mock.Mock() rule_inlist = self.generate_nat_rule( 'SNAT', 'external', 10, 'internal', 11, 'TCP' ) gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist]) self.assertEqual( public_nat._get_original_port_for_create( fake_ctx, gateway, 'SNAT', 'external', '10', 'internal', '11', 'TCP' ), 11 ) self.assertEqual( fake_ctx._target.instance.runtime_properties, { public_nat.PORT_REPLACEMENT: { 'external:10': 11 } } ) # same but without replacement at all fake_ctx._target.instance.runtime_properties = {} self.assertEqual( public_nat._get_original_port_for_create( fake_ctx, gateway, 'SNAT', 'external', '10', 'internal', '11', 'TCP' ), 11 ) self.assertEqual( fake_ctx._target.instance.runtime_properties, { public_nat.PORT_REPLACEMENT: { 'external:10': 11 } } ) # we dont have enought ports rule_inlist = self.generate_nat_rule( 'SNAT', 'external', utils.MAX_PORT_NUMBER, 'internal', 11, 'TCP' ) gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist]) fake_ctx._target.instance.runtime_properties = {} with self.assertRaises(cfy_exc.NonRecoverableError): public_nat._get_original_port_for_create( fake_ctx, gateway, 'SNAT', 'external', utils.MAX_PORT_NUMBER, 'internal', '11', 'TCP' ) def test_get_gateway_ip_range(self): gate = mock.Mock() # empty list of networks gate.get_dhcp_pools = mock.MagicMock(return_value=[]) self.assertEqual( public_nat._get_gateway_ip_range(gate, 'something'), None ) # exist other network gate.get_dhcp_pools = mock.MagicMock(return_value=[ self.genarate_pool( 'test_network', '127.0.0.1', '127.0.0.255' ) ]) self.assertEqual( public_nat._get_gateway_ip_range(gate, 'something'), None ) # exist correct network self.assertEqual( public_nat._get_gateway_ip_range(gate, 'test_network'), (IP('127.0.0.1'), IP('127.0.0.255')) ) def test_obtain_public_ip(self): fake_ctx = self.generate_relation_context_with_current_ctx() fake_ctx._target.instance.runtime_properties = { vcloud_network_plugin.PUBLIC_IP: '192.168.1.1' } gateway = mock.Mock() fake_client = mock.Mock() # exist some ip for delete self.assertEqual( public_nat._obtain_public_ip( fake_client, fake_ctx, gateway, vcloud_network_plugin.DELETE ), '192.168.1.1' ) # no ip for delete fake_ctx._target.instance.runtime_properties = {} with self.assertRaises(cfy_exc.NonRecoverableError): public_nat._obtain_public_ip( fake_client, fake_ctx, gateway, vcloud_network_plugin.DELETE ) # unknow operation with self.assertRaises(cfy_exc.NonRecoverableError): public_nat._obtain_public_ip( fake_client, fake_ctx, gateway, 'unknow operation' ) # exist some public ip fake_ctx._target.node.properties = { 'nat': { vcloud_network_plugin.PUBLIC_IP: '192.168.1.1' } } self.assertEqual( public_nat._obtain_public_ip( fake_client, fake_ctx, gateway, vcloud_network_plugin.CREATE ), '192.168.1.1' ) # no public ip yet fake_ctx._target.node.properties = { 'nat': {} } fake_ctx._source.node.properties = { 'vcloud_config': { 'vdc': 'vdc_name', 'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE } } gateway.get_public_ips = mock.MagicMock(return_value=[ '10.18.1.1', '10.18.1.2' ]) rule_inlist = self.generate_nat_rule( 'DNAT', '10.18.1.1', 'any', 'internal', '11', 'TCP' ) gateway.get_nat_rules = mock.MagicMock( return_value=[rule_inlist] ) with mock.patch( 'vcloud_plugin_common.ctx', fake_ctx ): self.assertEqual( public_nat._obtain_public_ip( fake_client, fake_ctx, gateway, vcloud_network_plugin.CREATE ), '10.18.1.2' ) def test_get_network_ip_range(self): # dont have ip range for this network fake_client = self.generate_client() self.assertEqual( public_nat._get_network_ip_range( fake_client, "some_org", "some_network" ), None ) fake_client.get_networks.assert_called_with("some_org") # different network network = self.generate_fake_client_network( name="some", start_ip="127.1.1.1", end_ip="127.1.1.255" ) fake_client.get_networks = mock.MagicMock(return_value=[network]) self.assertEqual( public_nat._get_network_ip_range( fake_client, "some_org", "some_network" ), None ) # correct network name fake_client.get_networks = mock.MagicMock(return_value=[network]) self.assertEqual( public_nat._get_network_ip_range( fake_client, "some_org", "some" ), (IP('127.1.1.1'), IP('127.1.1.255')) ) def test_create_ip_range(self): # context fake_ctx = self.generate_relation_context_with_current_ctx() fake_ctx._source.instance.runtime_properties = { vcloud_network_plugin.network.VCLOUD_NETWORK_NAME: "some" } fake_ctx._source.node.properties = { 'vcloud_config': { 'org': 'some_org', 'vdc': 'some_vdc' } } fake_ctx._target.instance.runtime_properties = {} # vca client fake_client = self.generate_client() # gateway gate = fake_client._vdc_gateway gate.get_dhcp_pools = mock.MagicMock(return_value=[]) network = self.generate_fake_client_network( name="some", start_ip="127.1.1.100", end_ip="127.1.1.200" ) fake_client.get_networks = mock.MagicMock(return_value=[network]) with mock.patch( 'vcloud_plugin_common.ctx', fake_ctx ): # empty gateway dhcp pool # vca pool: 127.1.1.100..127.1.1.200 self.assertEqual( public_nat._create_ip_range(fake_ctx, fake_client, gate), '127.1.1.100 - 127.1.1.200' ) fake_client.get_networks.assert_called_with("some_vdc") # network from gate gate.get_dhcp_pools = mock.MagicMock(return_value=[ self.genarate_pool( "some", '127.1.1.1', '127.1.1.255' ) ]) self.assertEqual( public_nat._create_ip_range(fake_ctx, fake_client, gate), '127.1.1.1 - 127.1.1.255' ) # network not exist network = self.generate_fake_client_network( name="other", start_ip="127.1.1.100", end_ip="127.1.1.200" ) fake_client.get_networks = mock.MagicMock( return_value=[network] ) with self.assertRaises(cfy_exc.NonRecoverableError): public_nat._create_ip_range(fake_ctx, fake_client, gate) def test_save_configuration(self): def _context_for_delete(service_type): """ create correct context for delete """ fake_ctx = self.generate_relation_context_with_current_ctx() self.set_services_conf_result( gateway, vcloud_plugin_common.TASK_STATUS_SUCCESS ) fake_ctx._target.instance.runtime_properties = { vcloud_network_plugin.PUBLIC_IP: "1.2.3.4", public_nat.PORT_REPLACEMENT: { '127.0.0.1:10': '100' }, vcloud_network_plugin.SSH_PORT: '23', vcloud_network_plugin.SSH_PUBLIC_IP: '10.1.1.1' } properties = { 'vcloud_config': { 'edge_gateway': 'gateway', 'vdc': 'vdc', 'org': 'some_org' } } if service_type: properties['vcloud_config']['service_type'] = service_type fake_ctx._source.node.properties = properties return fake_ctx def _ip_exist_in_runtime(fake_ctx): """ ip still exist in ctx """ runtime_properties = fake_ctx._target.instance.runtime_properties return vcloud_network_plugin.PUBLIC_IP in runtime_properties fake_client = self.generate_client() gateway = fake_client._vdc_gateway # cant save configuration: server busy self.set_services_conf_result( gateway, None ) self.set_gateway_busy(gateway) fake_ctx = self.generate_relation_context_with_current_ctx() self.assertFalse(public_nat._save_configuration( fake_ctx, gateway, fake_client, vcloud_network_plugin.CREATE, "1.2.3.4" )) # operation create fake_ctx = self.generate_relation_context_with_current_ctx() self.set_services_conf_result( gateway, vcloud_plugin_common.TASK_STATUS_SUCCESS ) # success save configuration with mock.patch('vcloud_plugin_common.ctx', fake_ctx): public_nat._save_configuration( fake_ctx, gateway, fake_client, vcloud_network_plugin.CREATE, "1.2.3.4") self.assertEqual( fake_ctx._target.instance.runtime_properties, { vcloud_network_plugin.PUBLIC_IP: "1.2.3.4" } ) # delete - subscription service fake_ctx = _context_for_delete( vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE ) with mock.patch( 'vcloud_plugin_common.ctx', fake_ctx ): public_nat._save_configuration( fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE, "1.2.3.4" ) self.assertFalse(_ip_exist_in_runtime(fake_ctx)) # delete - without service fake_ctx = _context_for_delete(None) with mock.patch( 'vcloud_plugin_common.ctx', fake_ctx ): public_nat._save_configuration( fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE, "1.2.3.4" ) self.assertFalse(_ip_exist_in_runtime(fake_ctx)) # delete - ondemand service - nat fake_ctx = _context_for_delete( vcloud_plugin_common.ONDEMAND_SERVICE_TYPE ) fake_ctx._target.node.properties = { 'nat': { vcloud_network_plugin.PUBLIC_IP: "1.2.3.4" } } with mock.patch( 'vcloud_plugin_common.ctx', fake_ctx ): public_nat._save_configuration( fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE, "1.2.3.4" ) self.assertFalse(_ip_exist_in_runtime(fake_ctx)) # delete - ondemand - not nat gateway.deallocate_public_ip = mock.MagicMock( return_value=self.generate_task( vcloud_plugin_common.TASK_STATUS_SUCCESS ) ) fake_ctx = _context_for_delete( vcloud_plugin_common.ONDEMAND_SERVICE_TYPE ) fake_ctx._target.node.properties = { 'nat': {} } with mock.patch( 'vcloud_plugin_common.ctx', fake_ctx ): # import pdb;pdb.set_trace() public_nat._save_configuration( fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE, "1.2.3.4" ) gateway.deallocate_public_ip.assert_called_with("1.2.3.4") self.assertFalse(_ip_exist_in_runtime(fake_ctx)) runtime_properties = fake_ctx._target.instance.runtime_properties self.assertFalse( public_nat.PORT_REPLACEMENT in runtime_properties ) self.assertFalse( vcloud_network_plugin.SSH_PORT in runtime_properties ) self.assertFalse( vcloud_network_plugin.SSH_PUBLIC_IP in runtime_properties ) def test_nat_network_operation(self): fake_client = self.generate_client() fake_ctx = self.generate_relation_context_with_current_ctx() gateway = fake_client._vdc_gateway # used wrong operation with self.assertRaises(cfy_exc.NonRecoverableError): public_nat.nat_network_operation( fake_ctx, fake_client, gateway, "unknow", "DNAT", "1.2.3.4", "2.3.4.5", "11", "11", "TCP" ) # run correct operation/rule for operation in [ vcloud_network_plugin.DELETE, vcloud_network_plugin.CREATE ]: for rule_type in ["SNAT", "DNAT"]: # cleanup properties fake_ctx = self.generate_relation_context_with_current_ctx() fake_ctx._target.instance.runtime_properties = { public_nat.PORT_REPLACEMENT: {}} fake_ctx._source.instance.runtime_properties = {} # checks with mock.patch( 'vcloud_plugin_common.ctx', fake_ctx ): public_nat.nat_network_operation( fake_ctx, fake_client, gateway, operation, rule_type, "1.2.3.4", "2.3.4.5", "11", "11", "TCP" ) if rule_type == "DNAT": if operation == vcloud_network_plugin.DELETE: gateway.del_nat_rule.assert_called_with( 'DNAT', '1.2.3.4', '11', '2.3.4.5', '11', 'TCP' ) else: gateway.add_nat_rule.assert_called_with( 'DNAT', '1.2.3.4', '11', '2.3.4.5', '11', 'TCP' ) else: if operation == vcloud_network_plugin.DELETE: gateway.del_nat_rule.assert_called_with( 'SNAT', '2.3.4.5', 'any', '1.2.3.4', 'any', 'any' ) else: gateway.add_nat_rule.assert_called_with( 'SNAT', '2.3.4.5', 'any', '1.2.3.4', 'any', 'any' ) # cleanup properties fake_ctx = self.generate_relation_context_with_current_ctx() fake_ctx._target.instance.runtime_properties = { public_nat.PORT_REPLACEMENT: {}} fake_ctx._source.instance.runtime_properties = {} # save ssh port with mock.patch( 'vcloud_plugin_common.ctx', fake_ctx ): public_nat.nat_network_operation( fake_ctx, fake_client, gateway, vcloud_network_plugin.CREATE, "DNAT", "1.2.3.4", "2.3.4.5", "43", "22", "TCP" ) self.assertEqual( {'port_replacement': {'1.2.3.4:43': 43}}, fake_ctx._target.instance.runtime_properties ) self.assertEqual( {'ssh_port': '43', 'ssh_public_ip': '1.2.3.4'}, fake_ctx._source.instance.runtime_properties ) # error with type with self.assertRaises(cfy_exc.NonRecoverableError): public_nat.nat_network_operation( fake_ctx, fake_client, gateway, vcloud_network_plugin.CREATE, "QNAT", "1.2.3.4", "2.3.4.5", "43", "22", "TCP" ) def generate_client_and_context_server(self, no_vmip=False): """ for test prepare_server_operation based operations """ vm_ip = '1.1.1.1' if not no_vmip else None fake_client = self.generate_client(vms_networks=[{ 'is_connected': True, 'network_name': 'network_name', 'is_primary': True, 'ip': vm_ip }]) self.set_network_routed_in_client(fake_client) fake_ctx = self.generate_relation_context_with_current_ctx() fake_ctx._target.node.properties = { 'nat': { 'edge_gateway': 'gateway' }
<filename>mockupdb/__init__.py<gh_stars>0 # -*- coding: utf-8 -*- # Copyright 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simulate a MongoDB server, for use in unittests.""" from __future__ import print_function __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '1.8.0.dev0' import atexit import contextlib import datetime import errno import functools import inspect import os import random import select import ssl as _ssl import socket import struct import traceback import threading import time import weakref import sys from codecs import utf_8_decode as _utf_8_decode try: from queue import Queue, Empty except ImportError: from Queue import Queue, Empty try: from collections.abc import Mapping except: from collections import Mapping try: from collections import OrderedDict except: from ordereddict import OrderedDict # Python 2.6, "pip install ordereddict" try: from io import StringIO except ImportError: from cStringIO import StringIO try: from urllib.parse import quote_plus except ImportError: # Python 2 from urllib import quote_plus import bson from bson import codec_options, json_util CODEC_OPTIONS = codec_options.CodecOptions(document_class=OrderedDict) PY3 = sys.version_info[0] == 3 if PY3: string_type = str text_type = str def reraise(exctype, value, trace=None): raise exctype(str(value)).with_traceback(trace) else: string_type = basestring text_type = unicode # "raise x, y, z" raises SyntaxError in Python 3. exec ("""def reraise(exctype, value, trace=None): raise exctype, str(value), trace """) __all__ = [ 'MockupDB', 'go', 'going', 'Future', 'wait_until', 'interactive_server', 'OP_REPLY', 'OP_UPDATE', 'OP_INSERT', 'OP_QUERY', 'OP_GET_MORE', 'OP_DELETE', 'OP_KILL_CURSORS', 'OP_MSG', 'QUERY_FLAGS', 'UPDATE_FLAGS', 'INSERT_FLAGS', 'DELETE_FLAGS', 'REPLY_FLAGS', 'OP_MSG_FLAGS', 'Request', 'Command', 'OpQuery', 'OpGetMore', 'OpKillCursors', 'OpInsert', 'OpUpdate', 'OpDelete', 'OpReply', 'OpMsg', 'Matcher', 'absent', ] def go(fn, *args, **kwargs): """Launch an operation on a thread and get a handle to its future result. >>> from time import sleep >>> def print_sleep_print(duration): ... sleep(duration) ... print('hello from background thread') ... sleep(duration) ... print('goodbye from background thread') ... return 'return value' ... >>> future = go(print_sleep_print, 0.1) >>> sleep(0.15) hello from background thread >>> print('main thread') main thread >>> result = future() goodbye from background thread >>> result 'return value' """ if not callable(fn): raise TypeError('go() requires a function, not %r' % (fn,)) result = [None] error = [] def target(): try: result[0] = fn(*args, **kwargs) except Exception: # Are we in interpreter shutdown? if sys: error.extend(sys.exc_info()) t = threading.Thread(target=target) t.daemon = True t.start() def get_result(timeout=10): t.join(timeout) if t.is_alive(): raise AssertionError('timed out waiting for %r' % fn) if error: reraise(*error) return result[0] return get_result @contextlib.contextmanager def going(fn, *args, **kwargs): """Launch a thread and wait for its result before exiting the code block. >>> with going(lambda: 'return value') as future: ... pass >>> future() # Won't block, the future is ready by now. 'return value' Or discard the result: >>> with going(lambda: "don't care"): ... pass If an exception is raised within the context, the result is lost: >>> with going(lambda: 'return value') as future: ... assert 1 == 0 Traceback (most recent call last): ... AssertionError """ future = go(fn, *args, **kwargs) try: yield future except: # We are raising an exception, just try to clean up the future. exc_info = sys.exc_info() try: # Shorter than normal timeout. future(timeout=1) except: log_message = ('\nerror in %s:\n' % format_call(inspect.currentframe())) sys.stderr.write(log_message) traceback.print_exc() # sys.stderr.write('exc in %s' % format_call(inspect.currentframe())) reraise(*exc_info) else: # Raise exception or discard result. future(timeout=10) class Future(object): def __init__(self): self._result = None self._event = threading.Event() def result(self, timeout=None): self._event.wait(timeout) # wait() always returns None in Python 2.6. if not self._event.is_set(): raise AssertionError('timed out waiting for Future') return self._result def set_result(self, result): if self._event.is_set(): raise RuntimeError("Future is already resolved") self._result = result self._event.set() def wait_until(predicate, success_description, timeout=10): """Wait up to 10 seconds (by default) for predicate to be true. E.g.: wait_until(lambda: client.primary == ('a', 1), 'connect to the primary') If the lambda-expression isn't true after 10 seconds, we raise AssertionError("Didn't ever connect to the primary"). Returns the predicate's first true value. """ start = time.time() while True: retval = predicate() if retval: return retval if time.time() - start > timeout: raise AssertionError("Didn't ever %s" % success_description) time.sleep(0.1) OP_REPLY = 1 OP_UPDATE = 2001 OP_INSERT = 2002 OP_QUERY = 2004 OP_GET_MORE = 2005 OP_DELETE = 2006 OP_KILL_CURSORS = 2007 OP_MSG = 2013 QUERY_FLAGS = OrderedDict([ ('TailableCursor', 2), ('SlaveOkay', 4), ('OplogReplay', 8), ('NoTimeout', 16), ('AwaitData', 32), ('Exhaust', 64), ('Partial', 128)]) UPDATE_FLAGS = OrderedDict([ ('Upsert', 1), ('MultiUpdate', 2)]) INSERT_FLAGS = OrderedDict([ ('ContinueOnError', 1)]) DELETE_FLAGS = OrderedDict([ ('SingleRemove', 1)]) REPLY_FLAGS = OrderedDict([ ('CursorNotFound', 1), ('QueryFailure', 2)]) OP_MSG_FLAGS = OrderedDict([ ('checksumPresent', 1), ('moreToCome', 2)]) _UNPACK_BYTE = struct.Struct("<b").unpack _UNPACK_INT = struct.Struct("<i").unpack _UNPACK_UINT = struct.Struct("<I").unpack _UNPACK_LONG = struct.Struct("<q").unpack def _get_c_string(data, position): """Decode a BSON 'C' string to python unicode string.""" end = data.index(b"\x00", position) return _utf_8_decode(data[position:end], None, True)[0], end + 1 class _PeekableQueue(Queue): """Only safe from one consumer thread at a time.""" _NO_ITEM = object() def __init__(self, *args, **kwargs): Queue.__init__(self, *args, **kwargs) self._item = _PeekableQueue._NO_ITEM def peek(self, block=True, timeout=None): if self._item is not _PeekableQueue._NO_ITEM: return self._item else: self._item = self.get(block, timeout) return self._item def get(self, block=True, timeout=None): if self._item is not _PeekableQueue._NO_ITEM: item = self._item self._item = _PeekableQueue._NO_ITEM return item else: return Queue.get(self, block, timeout) def _ismap(obj): return isinstance(obj, Mapping) def _islist(obj): return isinstance(obj, list) def _dt_rounded(dt): """Python datetimes have microsecond precision, BSON only milliseconds.""" return dt.replace(microsecond=dt.microsecond - dt.microsecond % 1000) class Request(object): """Base class for `Command`, `OpMsg`, and so on. Some useful asserts you can do in tests: >>> {'_id': 0} in OpInsert({'_id': 0}) True >>> {'_id': 1} in OpInsert({'_id': 0}) False >>> {'_id': 1} in OpInsert([{'_id': 0}, {'_id': 1}]) True >>> {'_id': 1} == OpInsert([{'_id': 0}, {'_id': 1}])[1] True >>> 'field' in OpMsg(field=1) True >>> 'field' in OpMsg() False >>> 'field' in OpMsg('ismaster') False >>> OpMsg(ismaster=False)['ismaster'] is False True """ opcode = None is_command = None _non_matched_attrs = 'doc', 'docs' _flags_map = None def __init__(self, *args, **kwargs): self._flags = kwargs.pop('flags', None) self._namespace = kwargs.pop('namespace', None) self._client = kwargs.pop('_client', None) self._request_id = kwargs.pop('request_id', None) self._server = kwargs.pop('_server', None) self._verbose = self._server and self._server.verbose self._server_port = kwargs.pop('server_port', None) self._docs = make_docs(*args, **kwargs) if not all(_ismap(doc) for doc in self._docs): raise_args_err() @property def doc(self): """The request document, if there is exactly one. Use this for queries, commands, and legacy deletes. Legacy writes may have many documents, OP_GET_MORE and OP_KILL_CURSORS have none. """ assert len(self.docs) == 1, '%r has more than one document' % self return self.docs[0] @property def docs(self): """The request documents, if any.""" return self._docs @property def namespace(self): """The operation namespace or None.""" return self._namespace @property def flags(self): """The request flags or None.""" return self._flags @property def slave_ok(self): """True if the SlaveOkay wire protocol flag is set.""" return self._flags and bool( self._flags & QUERY_FLAGS['SlaveOkay']) slave_okay = slave_ok """Synonym for `.slave_ok`.""" @property def request_id(self): """The request id or None.""" return self._request_id @property def client_port(self): """Client connection's TCP port.""" address = self._client.getpeername() if isinstance(address, tuple): return address[1] # Maybe a Unix domain socket connection. return 0 @property def server(self): """The `.MockupDB` server.""" return self._server def assert_matches(self, *args, **kwargs): """Assert this matches a :ref:`message spec <message spec>`. Returns self. """ matcher = make_matcher(*args, **kwargs) if not matcher.matches(self): raise AssertionError('%r does not match %r' % (self, matcher)) return self def matches(self, *args, **kwargs): """True if this matches a :ref:`message spec <message spec>`.""" return make_matcher(*args, **kwargs).matches(self) def replies(self, *args, **kwargs): """Send an `OpReply` to the client. The default reply to a command is ``{'ok': 1}``, otherwise the default is empty (no documents). Returns True so it is suitable as an `~MockupDB.autoresponds` handler. """ self._replies(*args, **kwargs) return True ok = send = sends = reply = replies """Synonym for `.replies`.""" def fail(self, err='MockupDB query failure', *args, **kwargs): """Reply to a query with the QueryFailure flag and an '$err' key. Returns True so it is suitable as an `~MockupDB.autoresponds` handler. """ kwargs.setdefault('flags', 0) kwargs['flags'] |= REPLY_FLAGS['QueryFailure'] kwargs['$err'] = err self.replies(*args, **kwargs) return True def command_err(self, code=1, errmsg='MockupDB command failure', *args, **kwargs): """Error reply to a command. Returns True so it is suitable as an `~MockupDB.autoresponds` handler. """ kwargs.setdefault('ok', 0) kwargs['code'] = code kwargs['errmsg'] = errmsg self.replies(*args, **kwargs) return
<gh_stars>0 from .models import * from datetime import datetime from django.core.exceptions import ObjectDoesNotExist import json from WebAppsMain.settings import TEST_WINDOWS_USERNAME, TEST_PMS, TEST_SUPERVISOR_PMS, TEST_COMMISSIONER_PMS from WebAppsMain.testing_utils import HttpPostTestCase, HttpGetTestCase from django.db.models import Max, Q import copy ### DO NOT RUN THIS IN PROD ENVIRONMENT DEFAULT_WORK_UNIT = '1600' def get_or_create_user(windows_username=TEST_WINDOWS_USERNAME): """create or get an user and return the user object. Defaults to TEST_WINDOWS_USERNAME as the user""" try: wu = TblWorkUnits.objects.using('OrgChartWrite').get( wu__exact=DEFAULT_WORK_UNIT ) pms = TblEmployees.objects.using('OrgChartWrite').get_or_create( pms=TEST_PMS )[0] pms.lv='B' pms.wu=wu pms.save(using='OrgChartWrite') user = TblUsers.objects.using('OrgChartWrite').get_or_create( windows_username=windows_username ,pms=pms )[0] user.active = True user.save(using='OrgChartWrite') return user except Exception as e: raise ValueError(f"get_or_create_user(): {e}") def grant_admin_status(windows_username=TEST_WINDOWS_USERNAME): """create or get an user and set it up with admin status and return the user object. Defaults to TEST_WINDOWS_USERNAME as the user""" try: user = get_or_create_user(windows_username=windows_username) user.is_admin=True user.active=True user.save(using='OrgChartWrite') return user except Exception as e: raise ValueError(f"grant_admin_status(): {e}") def remove_admin_status(windows_username=TEST_WINDOWS_USERNAME): """removes the admin status of an user""" try: user = get_or_create_user(windows_username=windows_username) user.is_admin=False user.save(using='OrgChartWrite') return user except Exception as e: raise ValueError(f"remove_admin_status(): {e}") def grant_active_user_status(windows_username=TEST_WINDOWS_USERNAME): """Set user as active""" try: #TODO IMPLEMENT THIS WHEN NEW USER AND APP PERMISSION MANAGEMENT IS IN PLACE ...#TODO except Exception as e: raise ValueError(f"grant_active_user_status(): {e}") def remove_active_user_status(windows_username=TEST_WINDOWS_USERNAME): """Set user as inactive""" try: #TODO IMPLEMENT THIS WHEN NEW USER AND APP PERMISSION MANAGEMENT IS IN PLACE ...#TODO except Exception as e: raise ValueError(f"remove_active_user_status(): {e}") def set_up_permissions(windows_username=TEST_WINDOWS_USERNAME, work_units=[DEFAULT_WORK_UNIT]): """ set up permissions for a user. If user is admin, the permissions added will probably mean nothing. @windows_username is self explanatory, just one name @work_units should be a list of work units """ try: for work_unit in work_units: work_unit_obj = TblWorkUnits.objects.using('OrgChartWrite').get( wu__exact=work_unit ,active=True ) user_obj = get_or_create_user(windows_username=windows_username) permission = TblPermissionsWorkUnit.objects.using('OrgChartWrite').get_or_create( user_id=user_obj ,wu=work_unit_obj )[0] permission.save(using="OrgChartWrite") except Exception as e: raise ValueError(f"set_up_permissions(): {e}") def tear_down_permissions(windows_username=TEST_WINDOWS_USERNAME): """remove all permissions for an user. If user is admin, the permissions removed will probably mean nothing.""" try: permissions = TblPermissionsWorkUnit.objects.using('OrgChartWrite').filter( user_id__windows_username__exact=windows_username ) for each in permissions: each.delete(using='OrgChartWrite') except Exception as e: raise ValueError(f"tear_down_permissions_for_user(): {e}") def tear_down(windows_username=TEST_WINDOWS_USERNAME): """Removes admin status of @windows_username, and set all its permissions to inactive. Defaults to TEST_WINDOWS_USERNAME""" try: remove_admin_status(windows_username=windows_username) tear_down_permissions(windows_username=windows_username) except Exception as e: raise ValueError(f"tear_down(): {e}") def get_active_lv_list(): return ['B', 'C', 'K', 'M', 'N', 'Q', 'R', 'S'] def get_active_tblemployee_qryset(): """ Return a queryset filtered to contain only records with active lv status plus a subset of 'L' leave status Lv status 'L' is usually Inactive, but when it is due to 'B10' Leave Status Reason (Look up from payroll history), that employee is actually Active """ try: latest_pay_date = TblPayrollHistory.objects.using('HRReportingRead').aggregate(Max('paydate'))['paydate__max'] active_L_pms_qryset = TblPayrollHistory.objects.using('HRReportingRead').filter( lv__exact='L' ,lv_reason_code__exact='B10' ,paydate__exact=latest_pay_date ) active_L_pms_list = [each['pms'] for each in list(active_L_pms_qryset.values('pms', 'lname', 'fname'))] return TblEmployees.objects.using('OrgChartRead').filter( Q( lv__in=get_active_lv_list() ) | Q( pms__in=active_L_pms_list ) ) except Exception as e: raise ValueError(f"get_active_tblemployee_qryset(): {e}") class TestViewPagesResponse(HttpGetTestCase): @classmethod def setUpClass(self): tear_down() set_up_permissions() self.regular_views = [ 'orgchartportal_home_view', 'orgchartportal_about_view', 'orgchartportal_contact_view', 'orgchartportal_empgrid_view', 'orgchartportal_orgchart_view', 'orgchartportal_how_to_use_view', ] self.admin_views = [ 'orgchartportal_admin_panel_view', 'orgchartportal_manage_users_view', 'orgchartportal_manage_permissions_view', ] self.additional_context_requirements = [ { 'view' : 'orgchartportal_empgrid_view' ,'additional_context_keys' : [ 'emp_entry_columns_json' ,'emp_entries_json' ,'supervisor_dropdown_list_json' ,'site_dropdown_list_json' ,'site_floor_dropdown_list_json' ,'site_type_dropdown_list_json' ] ,'qa_fct' : self.__assert_additional_context_qa_empgrid } ## The below are admin views ,{ 'view' : 'orgchartportal_manage_users_view' ,'additional_context_keys' : [ 'ag_grid_col_def_json' ,'users_data_json' ] ,'qa_fct' : self.__assert_additional_context_qa_manage_users } ,{ 'view' : 'orgchartportal_manage_permissions_view' ,'additional_context_keys' : [ 'ag_grid_col_def_json' ,'permissions_json' ,'user_list' ,'division_list' ,'wu_desc_list' ] ,'qa_fct' : self.__assert_additional_context_qa_manage_permissions } ] @classmethod def tearDownClass(self): tear_down() def __assert_additional_context_qa_empgrid(self, response): ## Make sure the emp_entry_columns_json got all the required fields emp_entry_columns_dict = json.loads(response.context_data['emp_entry_columns_json']) from_api_fields = set(each['field'] for each in emp_entry_columns_dict) required_fields = set([ 'pms' ,'last_name' ,'first_name' ,'lv' ,'wu__wu' ,'civil_title' ,'office_title' ,'supervisor_pms__pms' ,'actual_site_id__site_id' ,'actual_floor_id__floor_id' ,'actual_site_type_id__site_type_id']) if len(from_api_fields) > len(required_fields): raise ValueError(f"orgchartportal_empgrid_view: context variable emp_entry_columns_json got back more fields than expected. These are the unexpected fields: {from_api_fields - required_fields}") self.assertTrue(from_api_fields == required_fields ,f'orgchartportal_empgrid_view: context variable emp_entry_columns_json is missing some fields: {required_fields - from_api_fields}') ## Make sure emp_entries_json has only WUs that client has permission to emp_entries_dict = json.loads(response.context_data['emp_entries_json']) distinct_wu = set(each['wu__wu'] for each in emp_entries_dict) user = get_or_create_user(windows_username=TEST_WINDOWS_USERNAME) if user.is_admin: permissions_wu = set(each.wu for each in TblWorkUnits.objects.using('OrgChartRead').all()) else: permissions_wu = set(each.wu.wu for each in TblPermissionsWorkUnit.objects.using('OrgChartRead').filter(user_id__windows_username__exact=TEST_WINDOWS_USERNAME, is_active=True)) if len(permissions_wu) > len(distinct_wu): missing_wus = permissions_wu - distinct_wu if get_active_tblemployee_qryset().filter(wu__wu__in=missing_wus).count() == 0: ## the missing_wus actually doesn't exists in the active list of employees, no error here, remove it from list and moving on. permissions_wu = permissions_wu - missing_wus else: raise ValueError(f"orgchartportal_empgrid_view: Did not get back any emp with these Work Units even though permission allows it: {missing_wus}") self.assertTrue(distinct_wu == permissions_wu ,f'orgchartportal_empgrid_view: Got back an entry with work unit that "{TEST_WINDOWS_USERNAME}" does not have permission to. Here are the Work Units that it got, but should not have {distinct_wu - permissions_wu}"') ## Make sure a list of all active employees is returned in supervisor dropdown supervisor_dropdown_dict = json.loads(response.context_data['supervisor_dropdown_list_json']) count_of_all_api = len([each for each in supervisor_dropdown_dict]) count_of_all_base = len([each for each in get_active_tblemployee_qryset()]) self.assertTrue(count_of_all_base == count_of_all_api ,f'orgchartportal_empgrid_view: Did not get back a list of ALL active employees in the supervisor_dropdown_list_json context variable. base {count_of_all_base} vs api {count_of_all_api}') ## Make sure a list of all sites is returned in site dropdown site_dropdown_dict = json.loads(response.context_data['site_dropdown_list_json']) count_of_all_api = len([each for each in site_dropdown_dict]) count_of_all_base = len([each for each in TblDOTSites.objects.using('OrgChartRead').all()]) self.assertTrue(count_of_all_base == count_of_all_api ,f'orgchartportal_empgrid_view: Did not get back a list of ALL sites in the site_dropdown_list_json context variable. base {count_of_all_base} vs api {count_of_all_api}') ## Make sure a list of all site floors is returned in site floor dropdown site_floor_dropdown_dict = json.loads(response.context_data['site_floor_dropdown_list_json']) count_of_all_api = len([each for each in site_floor_dropdown_dict]) count_of_all_base = len([each for each in TblDOTSiteFloors.objects.using('OrgChartRead').all()]) self.assertTrue(count_of_all_base == count_of_all_api ,f'orgchartportal_empgrid_view: Did not get back a list of ALL site floors in the site_floor_dropdown_list_json context variable. base {count_of_all_base} vs api {count_of_all_api}') ## Make sure a list of all site type site floors is returned in site type dropdown site_type_dropdown_dict = json.loads(response.context_data['site_type_dropdown_list_json']) count_of_all_api = len([each for each in site_type_dropdown_dict]) count_of_all_base = len([each for each in TblDOTSiteFloorSiteTypes.objects.using('OrgChartRead').values( 'site_type_id__site_type_id' ,'site_type_id__site_type' ,'floor_id__floor_id' ,'floor_id__site_id' ).all()]) self.assertTrue(count_of_all_base == count_of_all_api ,f'orgchartportal_empgrid_view: Did not get back a list of ALL site floor + site types in the site_type_dropdown_list_json context variable. base {count_of_all_base} vs api {count_of_all_api}') def __assert_additional_context_qa_manage_users(self, response): ## Make sure the ag_grid_col_def_json got all the required fields ag_grid_col_def_dict = json.loads(response.context_data['ag_grid_col_def_json']) from_api_fields = set(each['field'] for each in ag_grid_col_def_dict) required_fields = set([ 'pms' ,'windows_username' ,'is_admin' ,'active' ,None ]) if len(from_api_fields) > len(required_fields): raise ValueError(f"orgchartportal_manage_users_view: context variable ag_grid_col_def_json got back more fields than expected. These are the unexpected fields: {from_api_fields - required_fields}") self.assertTrue(from_api_fields == required_fields ,f'orgchartportal_manage_users_view: context variable ag_grid_col_def_json is missing some fields: {required_fields - from_api_fields}') ## Make sure users_data_json has ALL the user records, since this api is an admin api users_data_json = json.loads(response.context_data['users_data_json']) from_api_users_data = set(each['windows_username'] for each in users_data_json) required_users_data = set(each.windows_username for each in TblUsers.objects.using('OrgChartRead').all()) self.assertEqual(from_api_users_data, required_users_data ,f"orgchartportal_manage_users_view: context variable users_data_json either has more data than allowed ({from_api_users_data - required_users_data}) or has less data than allowed ({required_users_data - from_api_users_data})") def __assert_additional_context_qa_manage_permissions(self, response): ## Make sure the ag_grid_col_def_json got all the required fields ag_grid_col_def_dict = json.loads(response.context_data['ag_grid_col_def_json']) from_api_fields = set(each['field'] for each in ag_grid_col_def_dict) required_fields = set([ 'user_id__windows_username' ,'wu__wu' ,'wu__subdiv' ,'wu__wu_desc' ,None ]) if len(from_api_fields) > len(required_fields): raise ValueError(f"orgchartportal_manage_permissions_view: context variable ag_grid_col_def_json got back more fields than expected. These are the unexpected fields: {from_api_fields - required_fields}") self.assertTrue(from_api_fields == required_fields ,f'orgchartportal_manage_permissions_view: context variable ag_grid_col_def_json is missing some fields: {required_fields - from_api_fields}') ## Make sure permissions_json has ALL the permission records, since this api is an admin api permissions_json = json.loads(response.context_data['permissions_json']) from_api_permissions = set(f"{each['user_id__windows_username']}-{each['wu__wu']}" for each in permissions_json) required_permissions = set(f"{each.user_id.windows_username}-{each.wu.wu}" for each in TblPermissionsWorkUnit.objects.using('OrgChartRead').all()) self.assertEqual(from_api_permissions, required_permissions ,f"orgchartportal_manage_permissions_view: context variable permissions_json either has more data than allowed ({from_api_permissions - required_permissions}) or has less data than allowed ({required_permissions - from_api_permissions})") from_api_user_list = set(response.context_data['user_list']) from_api_division_list = set(response.context_data['division_list']) from_api_wu_desc_list = set(each['wu'] for each in response.context_data['wu_desc_list']) required_user_list = set(each.windows_username for each in TblUsers.objects.using('OrgChartRead').all()) required_division_list = set(each.subdiv for each in TblWorkUnits.objects.using('OrgChartRead').filter(subdiv__isnull=False).distinct()) ## subidv not null filters out the WU 9999 On-Loan required_wu_desc_list = set(each.wu for each in TblWorkUnits.objects.using('OrgChartRead').filter(subdiv__isnull=False)) ## subidv not null filters out the WU 9999 On-Loan self.assertEqual(from_api_user_list, required_user_list ,f"orgchartportal_manage_permissions_view: context variable user_list either has more data than allowed ({from_api_user_list - required_user_list}) or has less data than allowed ({required_user_list - from_api_user_list})") self.assertEqual(from_api_division_list, required_division_list
import discord import asyncio import uuid import ast from redbot.core import Config from redbot.core import commands from redbot.core import checks from redbot.core.utils.predicates import ReactionPredicate from redbot.core.utils.menus import start_adding_reactions, menu, DEFAULT_CONTROLS k_factor = 40 defaults = {"Players": {}, "Results": [], "SelfReportFlag": False} class PlayerRatings(commands.Cog): def __init__(self, bot): self.bot = bot self.config = Config.get_conf(self, identifier=1234567870, force_registration=True) self.config.register_guild(**defaults) self.players = [] self.team_manager = bot.get_cog("TeamManager") #region commmands @commands.command() @commands.guild_only() @checks.admin_or_permissions(manage_guild=True) async def addPlayers(self, ctx, *players_to_add): """Add the players provided to the player list. Arguments: players_to_add -- One or more players in the following format: ``` "['<player_id>','<wins>', '<losses>', '<elo_rating>']" ``` Each player should be separated by a space. Examples: ``` [p]addPlayers "['123456789','2', '1', '1000']" [p]addPlayers "['123456789','2', '1', '1000']" "['987654321','1', '2', '950']" ``` """ addedCount = 0 try: for playerStr in players_to_add: player = ast.literal_eval(playerStr) playerAdded = await self._add_player(ctx, *player) if playerAdded: addedCount += 1 else: await ctx.send("Error adding player: {0}".format(repr(player))) finally: await ctx.send("Added {0} players(s).".format(addedCount)) await ctx.send("Done.") @commands.command() @commands.guild_only() @checks.admin_or_permissions(manage_guild=True) async def addPlayer(self, ctx, member: discord.Member, wins: int, losses: int, elo_rating: int): """Add a single player and their info to the file system.""" playerAdded = await self._add_player(ctx, member, wins, losses, elo_rating) if(playerAdded): await ctx.send("Done.") else: await ctx.send("Error adding player: {0}".format(member.name)) @commands.command() @commands.guild_only() @checks.admin_or_permissions(manage_guild=True) async def removePlayer(self, ctx, member: discord.Member): """Removes player from the file system.""" playerRemoved = await self._remove_player(ctx, member) if playerRemoved: await ctx.send("Done.") else: await ctx.send("Error removing player: {0}".format(member.name)) @commands.command() @commands.guild_only() @checks.admin_or_permissions(manage_guild=True) async def clearPlayers(self, ctx): """Removes all players from the file system.""" await self.load_players(ctx) players = self.players players.clear() await self._save_players(ctx, players) await ctx.send("Done.") @commands.guild_only() @commands.command(aliases=["rr", "reportresult"]) async def reportResult(self, ctx, member_1: discord.Member, member_1_wins: int, member_2_wins: int, member_2: discord.Member): """Submits the result of the game between two players. Should be used in the score report channel for your tier. Both players need to agree on the result before it is finalized. This command may be disabled by admins to prevent erroneous reporting.""" if not self._self_report_flag(ctx): await ctx.send("Score reporting for this server is currently set to admin only.") return await self.load_players(ctx) player_1 = self.get_player_by_id(self.players, member_1.id) if not player_1: await ctx.send("There was a problem finding player info for {}. Please verify that you have the correct member in your command. If this persists message an admin.".format(member_1.name)) return player_2 = self.get_player_by_id(self.players, member_2.id) if not player_2: await ctx.send("There was a problem finding player info for {}. Please verify that you have the correct member in your command. If this persists message an admin.".format(member_2.name)) return opposing_member = member_2 if ctx.author == member_1 else member_1 if await self.verify_game_results(ctx, member_1, member_2, member_1_wins, member_2_wins, opposing_member): await self.finish_game(ctx, player_1, player_2, member_1_wins, member_2_wins) await ctx.send("Done.") @commands.guild_only() @commands.command(aliases=["arrs", "adminreportresults"]) @checks.admin_or_permissions(manage_guild=True) async def adminReportResults(self, ctx, *match_results): """Submits results for matches in bulk with no verification. Arguments: match_results -- One or more match results in the following format: ``` "['<member_1>','<member_1_wins>', '<member_2_wins>', '<member_2>']" ``` Each match should be separated by a space. Members can be either their id number, their name, their name + descriminator, or a mention of the user. Id number is preferred as it's guaranteed to be unique. Examples: ``` [p]adminReportResults "['123456789','2', '1', '987654321']" [p]adminReportResults "['123456789','2', '1', '987654321']" "['234567890','1', '2', '098765432']" ``` """ addedCount = 0 try: for matchStr in match_results: match = ast.literal_eval(matchStr) matchSubmitted = await self._admin_report_result(ctx, *match) if matchSubmitted: addedCount += 1 else: await ctx.send("Error submitting match: {0}".format(repr(match))) finally: await ctx.send("Submitted {0} match(es).".format(addedCount)) await ctx.send("Done.") @commands.guild_only() @commands.command(aliases=["arr", "adminreportresult"]) @checks.admin_or_permissions(manage_guild=True) async def adminReportResult(self, ctx, member_1: discord.Member, member_1_wins: int, member_2_wins: int, member_2: discord.Member): """Submits the result of the game between two players. There is no verification neccessary since this is an admin-only command.""" if await self._admin_report_result(ctx, member_1, member_1_wins, member_2_wins, member_2): await ctx.send("Done.") @commands.guild_only() @commands.command(aliases=["pi"]) async def playerInfo(self, ctx, member: discord.Member = None): """Gets all the info corresponding to a player. Shows the player's wins, losses, Elo rating, the team they play for.""" await self.load_players(ctx) if not member: member = ctx.author player = self.get_player_by_id(self.players, member.id) if not player: await ctx.send("{} has no player information at this time".format(member.name)) return team_name = await self.team_manager.get_current_team_name(ctx, member) franchise_role, tier_role = await self.team_manager._roles_for_team(ctx, team_name) await ctx.send(embed=self.embed_player_info(player, team_name, tier_role)) @commands.guild_only() @commands.command(aliases=["plb"]) async def playerLeaderboard(self, ctx, tier = None): """Shows the top ten players in terms of current Elo rating. If tier is specified it only looks at players in that tier.""" await self.load_players(ctx) players = self.players if not players: ctx.send("There are no players at this time") return tier_role = None #Filter list by tier if given if tier: tier_role = self.team_manager._get_tier_role(ctx, tier) if tier_role: tier_players = [] for player in players: if tier_role in player.member.roles: tier_players.append(player) players = tier_players players.sort(key=lambda player: player.elo_rating, reverse=True) await ctx.send(embed=self.embed_leaderboard(ctx, players, tier_role)) @commands.guild_only() @commands.command(aliases=["toggleReport", "toggleSelfReporting", "toggleSR", "toggleselfreport", "togglesr", "tsr"]) @checks.admin_or_permissions(manage_guild=True) async def toggleSelfReport(self, ctx): """ Toggles the status of the self report flag. (Default: False) If True, players can report their own results and results must be verified by the opposing player. If False, only admins can report results and no verification is needed. """ self_report_flag = await self._toggle_self_report_flag(ctx.guild) self_report_str = "on" if self_report_flag else "off" await ctx.send("Self reporting is now **{0}**.".format(self_report_str)) @commands.guild_only() @commands.command(aliases=["getallplayers", "gap", "getAllPlayerRatings", "listAllPlayers", "listAllPlayerRatings"]) @checks.admin_or_permissions(manage_guild=True) async def getAllPlayers(self, ctx): await self.load_players(ctx) players = self.players if not players: await ctx.send("There are no players at this time") return messages = [] message = "" for player in players: player_string = "{0.member.id}:{0.wins}:{0.losses}:{0.elo_rating}\n".format(player) if len(message + player_string) < 2000: message += player_string else: messages.append(message) message = player_string messages.append(message) for msg in messages: if msg: await ctx.send("{0}{1}{0}".format("```", msg)) #endregion #region helper methods async def _add_player(self, ctx, member, wins, losses, elo_rating): await self.load_players(ctx) players = self.players wins = int(wins) losses = int(losses) elo_rating = int(elo_rating) # Validation of input # There are other validations we could do, but don't # - that there aren't extra args for example errors = [] if not isinstance(member, discord.Member): try: member = await commands.MemberConverter().convert(ctx, member) except: errors.append("Member {} not found.".format(member)) if wins < 0: errors.append("Wins cannot be a negative number.") if losses < 0: errors.append("Losses cannot be a negative number.") if not elo_rating: errors.append("Elo rating not found.") if errors: await ctx.send(":x: Errors with input:\n\n " "* {0}\n".format("\n * ".join(errors))) return try: player = Player(member, wins, losses, elo_rating, -1) players.append(player) except: return False await self._save_players(ctx, players) return True async def _remove_player(self, ctx, member: discord.Member): await self.load_players(ctx) players = self.players try: player = await self.get_player_by_id(self.players, member.id) if not player: await ctx.send("{0} does not seem to be a current player.".format(member.name)) return False players.remove(player) except ValueError: await ctx.send("{0} does not seem to be a current player.".format(member.name)) return False await self._save_players(ctx, players) return True async def _admin_report_result(self, ctx, member_1: discord.Member, member_1_wins: int, member_2_wins: int, member_2: discord.Member): await self.load_players(ctx) player_1 = self.get_player_by_id(self.players, member_1.id) if not player_1: await ctx.send("There was a problem finding player info for {}. Please verify that you have the correct member in your command. If this persists message an admin.".format(member_1.name)) return False player_2 = self.get_player_by_id(self.players, member_2.id) if not player_2: await ctx.send("There was a problem finding player info for {}. Please verify that you have the correct member in your command. If this persists message an admin.".format(member_2.name)) return False await self.finish_game(ctx, player_1, player_2, member_1_wins, member_2_wins) return True async def verify_game_results(self, ctx, member_1: discord.Member, member_2: discord.Member, member_1_wins: int, member_2_wins: int, verifier: discord.Member): msg = await ctx.send("{0} Please verify the results:\n**{1}** {2} - {3} **{4}**".format(verifier.mention, member_1.name, member_1_wins, member_2_wins, member_2.name)) start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS) pred = ReactionPredicate.yes_or_no(msg, verifier) try: await ctx.bot.wait_for("reaction_add", check=pred) if pred.result is True: return True else: await ctx.send(":x: Results not verified. To report the result you will need to use the `{0}reportResult` command again.".format(ctx.prefix)) return False except asyncio.TimeoutError: await ctx.send(":x: Result not verified in time. To report the result you will need to use the `{0}reportResult` command again.".format(ctx.prefix)) return False async def finish_game(self, ctx, player_1, player_2, player_1_wins: int, player_2_wins: int): player_1_new_elo, player_2_new_elo = self.update_elo(player_1.elo_rating, player_2.elo_rating, player_1_wins / (player_1_wins + player_2_wins)) await ctx.send(embed=self.embed_game_results(player_1, player_2, player_1_wins, player_2_wins, player_1_new_elo, player_2_new_elo)) self.update_player_info(player_1, player_1_wins, player_2_wins, player_1_new_elo) self.update_player_info(player_2, player_2_wins, player_1_wins, player_2_new_elo) await self._save_players(ctx, self.players) def update_elo(self, player_1_elo: int, player_2_elo: int,
<reponame>taodav/novelty-search-repr-space<filename>nsrl/experiment/exploration_helpers.py import json import os import copy import numpy as np import torch import json from deer.experiment.base_controllers import Controller from deer.helper.exploration import calculate_scores from deer.helper.knn import ranked_avg_knn_scores, batch_count_scaled_knn from deer.helper.pytorch import device, calculate_large_batch class ExplorationMetricController(Controller): def __init__(self, evaluate_on='action', periodicity=1, reset_every='none', env_name='default', experiment_dir=None, baseline_file=None, hyperparams=None, reload_dataset=None, **kwargs): """ Controller for PLOTTING exploration metric. Requires Visdom. :param evaluate_on: :param periodicity: :param reset_every: """ super(ExplorationMetricController, self).__init__(**kwargs) self._periodicity = periodicity self._baseline_file = baseline_file self._on_action = 'action' == evaluate_on self._on_episode = 'episode' == evaluate_on self._on_epoch = 'epoch' == evaluate_on self._env_name = env_name self._experiment_dir = experiment_dir self._exp_factor = [] self._ratio_visited = [] if not self._on_action and not self._on_episode and not self._on_epoch: self._on_action = True if reload_dataset is not None: self._reload_dataset(reload_dataset) self._reset_on_episode = 'episode' == reset_every self._reset_on_epoch = 'epoch' == reset_every self._ep_num = -1 self._hyperparams = hyperparams def _plot_baseline(self, agent): if self._baseline_file is not None: # If a baseline data is given, we overlay it. with open(self._baseline_file, 'r') as f: baseline = json.load(f) exp_factor = np.array([l for l in baseline['exploration_factors'] if l]) avg_exp_factor = np.average(exp_factor, axis=0) agent._plotter.plot("exploration_factor", np.arange(0, len(avg_exp_factor)), avg_exp_factor, "Exploration Factor", ymin=0, ymax=1, name='baseline') ratio_visited = np.array([l for l in baseline['ratios_visited'] if l]) avg_ratio_visited = np.average(ratio_visited, axis=0) agent._plotter.plot("states_visited", np.arange(0, len(avg_ratio_visited)), avg_ratio_visited, "Ratio of states visited", ymin=0, ymax=1, name='baseline') def onStart(self, agent): if (self._active == False): return self._reset(agent) def onEpisodeEnd(self, agent, terminal_reached, reward): if (self._active == False): return if self._reset_on_episode: self. _reset(agent) elif self._on_episode: self._update(agent) def onEpochEnd(self, agent): if (self._active == False): return if self._reset_on_epoch: self._reset(agent) elif self._on_epoch: self._update(agent) def onActionTaken(self, agent): if (self._active == False): return if self._on_action: self._update(agent) def _reset(self, agent): self._count = 0 self._exp_factor.append([]) self._ratio_visited.append([]) self._plot_baseline(agent) if agent._dataset.n_elems > len(self._exp_factor[self._ep_num]) and len(self._exp_factor[self._ep_num]) == 0: self._plot_dataset(agent) self._count = agent._dataset.n_elems self._ep_num += 1 def _plot_dataset(self, agent): all_observations = agent._dataset.observationsMatchingBatchDim()[0] all_positions = [(y.item(), x.item()) for y, x in zip(*np.where(all_observations == 0.5)[1:])] unique_counts = [] for i, pos in enumerate(all_positions): if i == 0: unique_counts.append(1) else: unique_counts.append(unique_counts[i - 1]) if pos not in all_positions[:i]: unique_counts[i] += 1 # self._exp_factor[self._ep_num] = [c / (i + 1) for i, c in enumerate(unique_counts)] y = np.array(self._exp_factor[self._ep_num]) x = np.arange(len(self._exp_factor[self._ep_num])) agent._plotter.plot("exploration_factor", x, y, "Exploration Factor", ymin=0, ymax=1) if hasattr(agent._environment, "_size_maze"): ys, xs = np.nonzero(agent._environment._map == 0.0) total_possible_states = len(ys) self._ratio_visited[self._ep_num] = [c / total_possible_states for c in unique_counts] y_tps = np.array(self._ratio_visited[self._ep_num]) agent._plotter.plot("states_visited", x, y_tps, "Ratio of states visited", ymin=0, ymax=1) def _update(self, agent): self._count += 1 if self._periodicity <= 1 or self._count % self._periodicity == 0: all_observations = agent._dataset.observations()[0] if all_observations.shape[0] < 1: return unique_observations = np.unique(all_observations, axis=0) exp_factor = unique_observations.shape[0] / all_observations.shape[0] self._exp_factor[self._ep_num].append(exp_factor) x = np.array([self._count]) y = np.array([exp_factor]) agent._plotter.plot("exploration_factor", x, y, "Exploration Factor", ymin=0, ymax=1) if hasattr(agent._environment, "_size_maze"): ys, xs = np.nonzero(agent._environment._map == 0.0) total_possible_states = len(ys) ratio = unique_observations.shape[0] / total_possible_states self._ratio_visited[self._ep_num].append(ratio) y_tps = np.array([ratio]) agent._plotter.plot("states_visited", x, y_tps, "Ratio of states visited", ymin=0, ymax=1) def onEnd(self, agent): exp_factor = np.array([l for l in self._exp_factor if l]) avg_exp_factor = np.average(exp_factor, axis=0) agent._plotter.plot("average exploration factor", np.arange(0, len(avg_exp_factor)), avg_exp_factor, "Average exploration factor over %d episodes" % exp_factor.shape[0], ymin=0, ymax=1) ratio_visited = np.array([l for l in self._ratio_visited if l]) avg_ratio_visited = np.average(ratio_visited, axis=0) agent._plotter.plot("average ratios visited", np.arange(0, len(avg_exp_factor)), avg_ratio_visited, "Average ratio of states visited over %d episodes" % ratio_visited.shape[0], ymin=0, ymax=1) record = { 'exploration_factors': self._exp_factor, 'ratios_visited': self._ratio_visited } if self._hyperparams is not None: record['hyperparameters'] = self._hyperparams filename = os.path.join(self._experiment_dir, 'results.json') with open(filename, 'w') as f: json.dump(record, f) class RewardController(Controller): def __init__(self, evaluate_on='train_loop', periodicity=1): super(RewardController, self).__init__() self._on_train_loop = 'train_loop' == evaluate_on self._on_train_step = 'train_step' == evaluate_on self._before_action = 'before_action' == evaluate_on self._on_action = 'action' == evaluate_on self._on_episode = 'episode' == evaluate_on self._on_epoch = 'epoch' == evaluate_on self._periodicity = periodicity self._count = 0 def onActionTaken(self, agent): if self._on_action: if self._count % self._periodicity == 0: self._update(agent) self._count += 1 def onActionChosen(self, agent, action): if self._before_action: if self._count % self._periodicity == 0: self._update(agent) self._count += 1 def onEpochEnd(self, agent): if self._on_epoch: if self._count % self._periodicity == 0: self._update(agent) self._count += 1 def onTrainLoopTaken(self, agent): if self._on_train_loop: if self._count % self._periodicity == 0: self._update(agent) self._count += 1 def repopulate_rewards(self, agent): self._update(agent) def _update(self, agent): raise NotImplementedError class NoveltyRewardController(RewardController): def __init__(self, evaluate_on='train_loop', periodicity=1, metric_func=calculate_scores, score_func=ranked_avg_knn_scores, k=10, knn=batch_count_scaled_knn, secondary=False): super(NoveltyRewardController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity) self._metric_func = metric_func self._k = k self._score_func = score_func self._knn = knn self._secondary = secondary def _update(self, agent): # Now we have to calculate intrinsic rewards for m in agent._learning_algo.all_models: m.eval() all_prev_state = agent._dataset.observationsMatchingBatchDim()[0] intr_rewards = self._metric_func(all_prev_state, all_prev_state, agent._learning_algo.encoder, dist_score=self._score_func, k=self._k, knn=self._knn) # UPDATE HTIS TO TAKE INTO ACCOUNT NON INTRINSIC REWARDS # reward clipping for preventing divergence # intr_rewards = np.clip(intr_rewards, -1, 1) # s_t, a_t, r_t (where r_t is intr_reward of s_{t+1}) agent._dataset.updateRewards(intr_rewards[1:], np.arange(0, agent._dataset.n_elems - 1), secondary=self._secondary) # we still need to calculate most recent reward. latest_state = np.array(agent._environment.observe()) if len(latest_state.shape) != len(all_prev_state.shape): latest_obs = latest_state obs_per_state = all_prev_state.shape[1] n_to_fill = obs_per_state - 1 n_prev_obs = agent._dataset.observations()[0][-n_to_fill:] latest_state = np.expand_dims(np.concatenate((n_prev_obs, latest_obs), axis=0), axis=0) latest_obs_intr_reward = self._metric_func(latest_state, all_prev_state, agent._learning_algo.encoder, dist_score=self._score_func, k=self._k, knn=self._knn) # latest_obs_intr_reward = np.clip(latest_obs_intr_reward, -1, 1) agent._dataset.updateRewards(latest_obs_intr_reward, agent._dataset.n_elems - 1, secondary=self._secondary) class HashStateCounterController(RewardController): def __init__(self, plotter, evaluate_on='action', periodicity=1, granularity=32, input_dims=(1, 64, 64), **kwargs): self._periodicity = periodicity self._count = 0 super(HashStateCounterController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity) self._granularity = granularity # size of binary code # Get input_dims from self._environment.inputDimensions()[0] self._A = np.random.normal(size=(self._granularity, np.prod(input_dims))) self.plotter = plotter self._unique_state_count = [0] self._count_table = {} def onStart(self, agent): all_obs = agent._dataset.observationsMatchingBatchDim()[0] indices_to_update = [0] for i, ob in enumerate(all_obs[1:], 1): hashed_ob = self.calc_hash(ob) to_add = self._unique_state_count[-1] if hashed_ob not in self._count_table: self._count_table[hashed_ob] = 0 to_add += 1 self._unique_state_count.append(to_add) self._count_table[hashed_ob] += 1 indices_to_update.append(i) self._count += 1 if indices_to_update: self.plotter.plot('hashed_unique_state_counts', indices_to_update, self._unique_state_count) def onEnd(self, agent): self.plotter.plot_text('ending', f'Environment completed after {self._count} steps') def calc_hash(self, obs): A_g = np.matmul(self._A, obs.flatten()) hash_seq = np.sign(A_g).astype(int) zero_mask = (hash_seq == 0).astype(int) hash_seq += zero_mask return str(hash_seq) def _update(self, agent): all_obs = agent._dataset.observationsMatchingBatchDim()[0] latest_obs = agent._environment.observe()[0] if len(all_obs.shape) == 4: second_to_last = all_obs[-1] to_attach = second_to_last[1:] latest_obs = np.concatenate((to_attach, latest_obs[None, :, :]), axis=0) hashed_obs = self.calc_hash(latest_obs) to_add = 0 if hashed_obs not in self._count_table: to_add = 1 self._count_table[hashed_obs] = 0 self._count_table[hashed_obs] += 1 self._unique_state_count.append(self._unique_state_count[-1] + to_add) self.plotter.plot('hashed_unique_state_counts', [self._count], [self._unique_state_count[-1]], 'Hashed Unique State Counts') class HashCountRewardController(RewardController): def __init__(self, evaluate_on='action', periodicity=1, granularity=32, input_dims=(1, 64, 64), secondary=False, discrete=False, **kwargs): super(HashCountRewardController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity) self._granularity = granularity # size of binary code self._bonus_coeff = 0.01 * (256 / self._granularity) if discrete: self._bonus_coeff = 1 # self._A = np.random.normal(size=(self._granularity, self._learning_algo.internal_dim)) # Get input_dims from self._environment.inputDimensions()[0] self._A = np.random.normal(size=(self._granularity, np.prod(input_dims))) self._count_table = {} self._secondary = secondary self._discrete = discrete # FOR DEBUGGING self._all_latest_obs = [] self._all_obs = [] def onStart(self, agent): all_obs = agent._dataset.observationsMatchingBatchDim()[0] hashed_obs = [] indices_to_update = [] for i, ob in enumerate(all_obs[1:]): hashed_ob = self.calc_hash(ob) hashed_obs.append(hashed_ob) if hashed_ob not in self._count_table: self._count_table[hashed_ob] = 0 self._count_table[hashed_ob] += 1 indices_to_update.append(i) rewards = [] for hob in hashed_obs: rewards.append(self._bonus_coeff / np.sqrt(self._count_table[hob])) agent._dataset.updateRewards(rewards, indices_to_update, secondary=self._secondary) def calc_hash(self, obs): if self._discrete: return np.array2string(obs.astype(np.half).flatten()) else: A_g = np.matmul(self._A, obs.flatten()) hash_seq = np.sign(A_g).astype(int) zero_mask = (hash_seq == 0).astype(int) hash_seq += zero_mask return str(hash_seq) def _update(self, agent): all_obs = agent._dataset.observationsMatchingBatchDim()[0] # hacky as shit if len(all_obs) == 1: # we need to first count the first state hashed_first_ob = self.calc_hash(all_obs[0]) self._count_table[hashed_first_ob] = 1 latest_obs = agent._environment.observe()[0] self._all_obs.append(copy.deepcopy(latest_obs)) if len(all_obs.shape) == 4: second_to_last = all_obs[-1] to_attach = second_to_last[1:] latest_obs = np.concatenate((to_attach, latest_obs[None, :, :]), axis=0) self._all_latest_obs.append(latest_obs) hashed_obs = self.calc_hash(latest_obs) # FIGURE THIS OUT FOR ACROBOT if hashed_obs not in self._count_table: self._count_table[hashed_obs] = 0 self._count_table[hashed_obs] += 1 next_states = np.concatenate((all_obs[1:],latest_obs[np.newaxis]), axis=0) # idx_to_update = [i for i, obs in enumerate(all_obs[1:]) if hashed_obs == self.calc_hash(obs)] idx_to_update = np.arange(len(all_obs)) # idx_to_update = np.array(idx_to_update) # THIS MIGHT NEED TO BE REFACTORED all_rewards = [] for i, s in enumerate(next_states): hob = self.calc_hash(s) all_rewards.append(self._bonus_coeff / np.sqrt(self._count_table[hob])) agent._dataset.updateRewards(all_rewards, idx_to_update, secondary=self._secondary) class CountBasedRewardController(RewardController): def __init__(self, evaluate_on='action', periodicity=1, bonus=1, hash_func=None, secondary=False, **kwargs): super(CountBasedRewardController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity) self._hash_func = hash_func self._bonus = bonus self._counts = {} self._secondary = secondary def _update(self, agent): """ increment counts and update rewards :param agent: :return: """ for m in agent._learning_algo.all_models: m.eval() all_obs = agent._dataset.observations()[0] latest_obs = agent._environment.observe()[0] hashed_obs = self._hash_func(latest_obs) if
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse import json import random from src.data.loader import check_data_params, load_data from src.evaluation.evaluator import SingleEvaluator, EncDecEvaluator from src.model import check_model_params, build_model, build_classifier from src.slurm import init_signal_handler, init_distributed_mode from src.trainer import SingleTrainer, EncDecTrainer from src.utils import bool_flag, initialize_exp, set_sampling_probs, shuf_order from src.utils import print_memory def get_parser(): """ Generate a parameters parser. """ # parse parameters parser = argparse.ArgumentParser(description="Language transfer") # main parameters parser.add_argument( "--dump_path", type=str, default="./dumped/", help="Experiment dump path" ) parser.add_argument("--exp_name", type=str, default="", help="Experiment name") parser.add_argument( "--save_periodic", type=int, default=0, help="Save the model periodically (0 to disable)", ) parser.add_argument("--exp_id", type=str, default="", help="Experiment ID") # float16 / AMP API parser.add_argument( "--fp16", type=bool_flag, default=False, help="Run model with float16" ) parser.add_argument( "--amp", type=int, default=-1, help="Use AMP wrapper for float16 / distributed / gradient accumulation. Level of optimization. -1 to disable.", ) # only use an encoder (use a specific decoder for machine translation) parser.add_argument( "--encoder_only", type=bool_flag, default=True, help="Only use an encoder" ) # model parameters parser.add_argument("--emb_dim", type=int, default=512, help="Embedding layer size") parser.add_argument( "--emb_dim_encoder", type=int, default=0, help="Embedding layer size" ) parser.add_argument( "--emb_dim_decoder", type=int, default=0, help="Embedding layer size" ) parser.add_argument( "--n_layers", type=int, default=4, help="Number of Transformer layers" ) parser.add_argument( "--n_layers_encoder", type=int, default=0, help="Number of Transformer layers for the encoder", ) parser.add_argument( "--n_layers_decoder", type=int, default=0, help="Number of Transformer layers for the decoder", ) parser.add_argument( "--n_heads", type=int, default=8, help="Number of Transformer heads" ) parser.add_argument("--dropout", type=float, default=0, help="Dropout") parser.add_argument( "--attention_dropout", type=float, default=0, help="Dropout in the attention layer", ) parser.add_argument( "--gelu_activation", type=bool_flag, default=False, help="Use a GELU activation instead of ReLU", ) parser.add_argument( "--share_inout_emb", type=bool_flag, default=True, help="Share input and output embeddings", ) parser.add_argument( "--sinusoidal_embeddings", type=bool_flag, default=False, help="Use sinusoidal embeddings", ) parser.add_argument( "--use_lang_emb", type=bool_flag, default=True, help="Use language embedding" ) # causal language modeling task parameters parser.add_argument( "--context_size", type=int, default=0, help="Context size (0 means that the first elements in sequences won't have any context)", ) # masked language modeling task parameters parser.add_argument( "--word_pred", type=float, default=0.15, help="Fraction of words for which we need to make a prediction", ) parser.add_argument( "--sample_alpha", type=float, default=0, help="Exponent for transforming word counts to probabilities (~word2vec sampling)", ) parser.add_argument( "--word_mask_keep_rand", type=str, default="0.8,0.1,0.1", help="Fraction of words to mask out / keep / randomize, among the words to predict", ) parser.add_argument( "--mask_length", type=str, default="", help="Length distribution of the masked spans. " "No span masking if kept empty. Constant if integer. Poisson if 'poisson'", ) parser.add_argument( "--poisson_lambda", type=float, default=3.0, help="Parameter of the poisson distribution for span length", ) # input sentence noise parser.add_argument( "--word_shuffle", type=float, default=0, help="Randomly shuffle input words (0 to disable)", ) parser.add_argument( "--word_dropout", type=float, default=0, help="Randomly dropout input words (0 to disable)", ) parser.add_argument( "--word_blank", type=float, default=0, help="Randomly blank input words (0 to disable)", ) # data parser.add_argument("--data_path", type=str, default="", help="Data path") parser.add_argument( "--lgs", type=str, default="", help="Languages (lg1-lg2-lg3 .. ex: en-fr-es-de)" ) parser.add_argument( "--lgs_mapping", type=str, default="", help="Map the lngs to pretrained lgs, java_sa:java_obfuscated" "then the emb of java_sa in this XP will be mapped to the emb of java_obfuscated in pretrained model", ) parser.add_argument( "--mt_lgs_id_mapping", type=str, default="", help="Map the in or out language id of some languages to others for mt_steps " "for instance 'java_np:java_buggy-java_resolved' means java_np gets the " "same language embeddings as java_buggy for input sentences and java_resolved " "for output sentences. Different mappings separated by commas", ) parser.add_argument( "--max_vocab", type=int, default=-1, help="Maximum vocabulary size (-1 to disable)", ) parser.add_argument( "--min_count", type=int, default=0, help="Minimum vocabulary count" ) parser.add_argument( "--lg_sampling_factor", type=float, default=-1, help="Language sampling factor" ) parser.add_argument( "--has_sentences_ids", type=bool_flag, default=False, help="Parallel sentences has an id or not in parallel datasets.", ) # batch parameters parser.add_argument("--bptt", type=int, default=256, help="Sequence length") parser.add_argument( "--max_len", type=int, default=100, help="Maximum length of sentences (after BPE)", ) parser.add_argument( "--group_by_size", type=bool_flag, default=True, help="Sort sentences by size during the training", ) parser.add_argument( "--batch_size", type=int, default=32, help="Number of sentences per batch" ) parser.add_argument( "--max_batch_size", type=int, default=0, help="Maximum number of sentences per batch (used in combination with tokens_per_batch, 0 to disable)", ) parser.add_argument( "--tokens_per_batch", type=int, default=-1, help="Number of tokens per batch" ) parser.add_argument( "--gen_tpb_multiplier", type=int, default=1, help="Multiplier of token per batch during generation when doing back translation. Typically 4", ) # training parameters parser.add_argument( "--split_data", type=bool_flag, default=False, help="Split data across workers of a same node", ) parser.add_argument( "--split_data_accross_gpu", type=str, default="local", help="Split data across GPU locally or globally. Set 'local' or 'global'", ) parser.add_argument( "--optimizer", type=str, default="adam,lr=0.0001", help="Optimizer (SGD / RMSprop / Adam, etc.)", ) parser.add_argument( "--clip_grad_norm", type=float, default=5, help="Clip gradients norm (0 to disable)", ) parser.add_argument( "--epoch_size", type=int, default=100000, help="Epoch size / evaluation frequency (-1 for parallel data size)", ) parser.add_argument( "--max_epoch", type=int, default=100000, help="Maximum epoch size" ) parser.add_argument( "--stopping_criterion", type=str, default="", help="Stopping criterion, and number of non-increase before stopping the experiment", ) parser.add_argument( "--validation_metrics", type=str, default="", help="Validation metrics" ) parser.add_argument( "--accumulate_gradients", type=int, default=1, help="Accumulate model gradients over N iterations (N times larger batch sizes)", ) parser.add_argument( "--add_eof_to_stream", type=bool_flag, default=False, help="Whether to add </s> at the beginning " "of every sentence in steam datasets." "It matters for MLM.", ) # training coefficients parser.add_argument( "--lambda_mlm", type=str, default="1", help="Prediction coefficient (MLM)" ) parser.add_argument( "--lambda_clm", type=str, default="1", help="Causal coefficient (LM)" ) parser.add_argument("--lambda_ae", type=str, default="1", help="AE coefficient") parser.add_argument("--lambda_mt", type=str, default="1", help="MT coefficient") parser.add_argument( "--lambda_do", type=str, default="1", help="Deobfuscation coefficient" ) parser.add_argument("--lambda_bt", type=str, default="1", help="BT coefficient") parser.add_argument( "--lambda_classif", type=str, default="1", help="Classificationlambda coefficient - can have one per pair of lang/label - format 'lang1-label1::lambda / lang2-label2::lambda / lambda' or 'lang1-label1::lambda / lang2-label2::lambda' or 'lambda'", ) # training steps parser.add_argument( "--clm_steps", type=str, default="", help="Causal prediction steps (CLM)" ) parser.add_argument( "--mlm_steps", type=str, default="", help="Masked prediction steps (MLM / TLM)" ) parser.add_argument( "--mt_steps", type=str, default="", help="Machine translation steps" ) parser.add_argument("--do_steps", type=str, default="", help="Deobfuscation steps") parser.add_argument( "--obf_proba", type=float, default=0.5, help="For Deobfuscation steps, probability of obsfuscation. If = 1 everything is obfuscated, 0 only one variable.", ) parser.add_argument( "--ae_steps", type=str, default="", help="Denoising auto-encoder steps" ) parser.add_argument( "--bt_steps", type=str, default="", help="Back-translation steps" ) parser.add_argument( "--mt_spans_steps", type=str, default="", help="Machine translation steps. Format for one step is lang1-lang2-span. Steps are separated by commas.", ) parser.add_argument( "--spans_emb_encoder", type=bool_flag, default=False, help="Whether to use span embeddings in the encoder", ) parser.add_argument( "--classif_steps", type=str, default="", help="Classification steps" ) # reload pretrained embeddings / pretrained model / checkpoint parser.add_argument( "--reload_emb", type=str, default="", help="Reload pretrained word embeddings" ) parser.add_argument( "--reload_model", type=str, default="", help="Reload a pretrained model" ) parser.add_argument( "--reload_encoder_attn_on_decoder", type=bool_flag, default=False, help="If true, reload encoder attention on decoder if there is no pre-trained decoder.", ) parser.add_argument( "--reload_encoder_for_decoder", type=bool_flag, default=False, help="Reload a the encoder of the pretrained model for the decoder.", ) parser.add_argument( "--roberta_mode", type=bool_flag, default=False, help="If we reload a pretrained roberta, need to put this params to True that positions idx are computed in the roberta way and use gelu.", ) parser.add_argument( "--reload_checkpoint", type=str, default="", help="Reload a checkpoint" ) # beam search (for MT only) parser.add_argument( "--beam_size", type=int, default=1, help="Beam size, default = 1 (greedy decoding)", ) parser.add_argument( "--length_penalty", type=float, default=1, help="Length penalty, values < 1.0 favor shorter sentences, while values > 1.0 favor longer ones.", ) parser.add_argument( "--early_stopping", type=bool_flag, default=False, help="Early stopping, stop as soon as we have `beam_size` hypotheses, although longer ones may have better scores.", ) # sampling at eval time parser.add_argument( "--number_samples", type=int, default=1, help="Number of examples to sample (default = 1)", ) parser.add_argument( "--eval_temperature", type=float, default=None, help="Evaluation temperature when using several samples", ) # BT parameters parser.add_argument( "--bt_sample_temperature", type=str, default="0", help="At BT training, sample temperature for generation", ) # Classification parameters parser.add_argument( "--n_classes_classif", type=int, default=0, help="Number of classes for classification steps.", ) parser.add_argument( "--reload_classifier", type=str, default="", help="Reload pretrained classifier.", ) # evaluation parser.add_argument( "--eval_bleu", type=bool_flag, default=False, help="Evaluate BLEU score during MT training", ) parser.add_argument( "--eval_denoising", type=bool_flag, default=False, help="Whether to evaluate the model for denoising", ) parser.add_argument( "--eval_subtoken_score", type=bool_flag, default=False, help="Evaluate subtoken score during MT training", ) parser.add_argument( "--eval_bleu_test_only", type=bool_flag, default=False, help="Evaluate BLEU score during MT training", ) parser.add_argument( "--eval_computation", type=bool_flag, default=False, help="Check if the generated function is compilable, and if it returns the same output as ground truth.",
<gh_stars>1-10 # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'overview.ui' # # Created by: PyQt5 UI code generator 5.10.1 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(1339, 800) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(90, 10, 71, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.label.setFont(font) self.label.setObjectName("label") self.lb_nomeuser = QtWidgets.QLabel(self.centralwidget) self.lb_nomeuser.setGeometry(QtCore.QRect(160, 10, 861, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.lb_nomeuser.setFont(font) self.lb_nomeuser.setObjectName("lb_nomeuser") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(10, 10, 41, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.label_3.setFont(font) self.label_3.setObjectName("label_3") self.lb_tipouser = QtWidgets.QLabel(self.centralwidget) self.lb_tipouser.setGeometry(QtCore.QRect(50, 10, 21, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.lb_tipouser.setFont(font) self.lb_tipouser.setObjectName("lb_tipouser") self.widget_relatorio = QtWidgets.QWidget(self.centralwidget) self.widget_relatorio.setEnabled(True) self.widget_relatorio.setGeometry(QtCore.QRect(20, 30, 981, 491)) self.widget_relatorio.setObjectName("widget_relatorio") self.in_pesquisa = QtWidgets.QLineEdit(self.widget_relatorio) self.in_pesquisa.setGeometry(QtCore.QRect(20, 50, 181, 20)) self.in_pesquisa.setObjectName("in_pesquisa") self.pesquisar = QtWidgets.QPushButton(self.widget_relatorio) self.pesquisar.setGeometry(QtCore.QRect(210, 50, 75, 23)) self.pesquisar.setObjectName("pesquisar") self.lb_titulo = QtWidgets.QLabel(self.widget_relatorio) self.lb_titulo.setGeometry(QtCore.QRect(20, 20, 681, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.lb_titulo.setFont(font) self.lb_titulo.setObjectName("lb_titulo") self.widget_relatorio_normal = QtWidgets.QWidget(self.widget_relatorio) self.widget_relatorio_normal.setGeometry(QtCore.QRect(10, 80, 811, 401)) self.widget_relatorio_normal.setObjectName("widget_relatorio_normal") self.tb_relatorio = QtWidgets.QTableView(self.widget_relatorio_normal) self.tb_relatorio.setGeometry(QtCore.QRect(10, 10, 791, 381)) self.tb_relatorio.setSortingEnabled(False) self.tb_relatorio.setObjectName("tb_relatorio") self.widget_relatorio_P3 = QtWidgets.QWidget(self.widget_relatorio) self.widget_relatorio_P3.setGeometry(QtCore.QRect(10, 70, 851, 411)) self.widget_relatorio_P3.setObjectName("widget_relatorio_P3") self.tb_relatorio_P3_1 = QtWidgets.QTableView(self.widget_relatorio_P3) self.tb_relatorio_P3_1.setGeometry(QtCore.QRect(10, 20, 261, 381)) self.tb_relatorio_P3_1.setSortingEnabled(False) self.tb_relatorio_P3_1.setObjectName("tb_relatorio_P3_1") self.tb_relatorio_P3_2 = QtWidgets.QTableView(self.widget_relatorio_P3) self.tb_relatorio_P3_2.setGeometry(QtCore.QRect(290, 20, 261, 381)) self.tb_relatorio_P3_2.setSortingEnabled(False) self.tb_relatorio_P3_2.setObjectName("tb_relatorio_P3_2") self.tb_relatorio_P3_3 = QtWidgets.QTableView(self.widget_relatorio_P3) self.tb_relatorio_P3_3.setGeometry(QtCore.QRect(570, 20, 261, 381)) self.tb_relatorio_P3_3.setSortingEnabled(False) self.tb_relatorio_P3_3.setObjectName("tb_relatorio_P3_3") self.lb_consulta = QtWidgets.QLabel(self.widget_relatorio) self.lb_consulta.setGeometry(QtCore.QRect(290, 50, 411, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(8) self.lb_consulta.setFont(font) self.lb_consulta.setObjectName("lb_consulta") self.widget_S1 = QtWidgets.QWidget(self.centralwidget) self.widget_S1.setGeometry(QtCore.QRect(20, 30, 541, 241)) self.widget_S1.setObjectName("widget_S1") self.lb_titulo_1 = QtWidgets.QLabel(self.widget_S1) self.lb_titulo_1.setGeometry(QtCore.QRect(20, 20, 681, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.lb_titulo_1.setFont(font) self.lb_titulo_1.setObjectName("lb_titulo_1") self.in_S1_idproduto = QtWidgets.QLineEdit(self.widget_S1) self.in_S1_idproduto.setGeometry(QtCore.QRect(30, 110, 181, 20)) self.in_S1_idproduto.setText("") self.in_S1_idproduto.setObjectName("in_S1_idproduto") self.lb_sim_a_1 = QtWidgets.QLabel(self.widget_S1) self.lb_sim_a_1.setGeometry(QtCore.QRect(30, 80, 151, 31)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(9) self.lb_sim_a_1.setFont(font) self.lb_sim_a_1.setObjectName("lb_sim_a_1") self.lb_sim_1 = QtWidgets.QLabel(self.widget_S1) self.lb_sim_1.setGeometry(QtCore.QRect(230, 80, 151, 31)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(9) self.lb_sim_1.setFont(font) self.lb_sim_1.setObjectName("lb_sim_1") self.in_S1_novovalor = QtWidgets.QLineEdit(self.widget_S1) self.in_S1_novovalor.setGeometry(QtCore.QRect(230, 110, 181, 20)) self.in_S1_novovalor.setText("") self.in_S1_novovalor.setObjectName("in_S1_novovalor") self.S1_alterar_valor = QtWidgets.QPushButton(self.widget_S1) self.S1_alterar_valor.setGeometry(QtCore.QRect(430, 110, 75, 23)) self.S1_alterar_valor.setObjectName("S1_alterar_valor") self.lb_sim_2 = QtWidgets.QLabel(self.widget_S1) self.lb_sim_2.setGeometry(QtCore.QRect(230, 160, 151, 31)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(9) self.lb_sim_2.setFont(font) self.lb_sim_2.setObjectName("lb_sim_2") self.in_S1_novaquantidade = QtWidgets.QLineEdit(self.widget_S1) self.in_S1_novaquantidade.setGeometry(QtCore.QRect(230, 190, 181, 20)) self.in_S1_novaquantidade.setText("") self.in_S1_novaquantidade.setObjectName("in_S1_novaquantidade") self.S1_alterar_quantidade = QtWidgets.QPushButton(self.widget_S1) self.S1_alterar_quantidade.setGeometry(QtCore.QRect(430, 190, 75, 23)) self.S1_alterar_quantidade.setObjectName("S1_alterar_quantidade") self.widget = QtWidgets.QWidget(self.centralwidget) self.widget.setGeometry(QtCore.QRect(30, 40, 1261, 641)) self.widget.setObjectName("widget") self.groupBox = QtWidgets.QGroupBox(self.widget) self.groupBox.setEnabled(True) self.groupBox.setGeometry(QtCore.QRect(10, 10, 381, 121)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox.setFont(font) self.groupBox.setAutoFillBackground(False) self.groupBox.setObjectName("groupBox") self.label_2 = QtWidgets.QLabel(self.groupBox) self.label_2.setGeometry(QtCore.QRect(60, 60, 47, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) font.setBold(True) font.setWeight(75) self.label_2.setFont(font) self.label_2.setObjectName("label_2") self.label_4 = QtWidgets.QLabel(self.groupBox) self.label_4.setGeometry(QtCore.QRect(180, 60, 47, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) font.setBold(True) font.setWeight(75) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.label_5 = QtWidgets.QLabel(self.groupBox) self.label_5.setGeometry(QtCore.QRect(290, 60, 41, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) font.setBold(True) font.setWeight(75) self.label_5.setFont(font) self.label_5.setObjectName("label_5") self.in_1_data = QtWidgets.QDateEdit(self.groupBox) self.in_1_data.setGeometry(QtCore.QRect(259, 20, 101, 22)) font = QtGui.QFont() font.setPointSize(10) self.in_1_data.setFont(font) self.in_1_data.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 21), QtCore.QTime(0, 0, 0))) self.in_1_data.setCalendarPopup(True) self.in_1_data.setObjectName("in_1_data") self.label_6 = QtWidgets.QLabel(self.groupBox) self.label_6.setGeometry(QtCore.QRect(140, 20, 111, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(8) self.label_6.setFont(font) self.label_6.setObjectName("label_6") self.lb_1_dia = QtWidgets.QLabel(self.groupBox) self.lb_1_dia.setGeometry(QtCore.QRect(20, 90, 101, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.lb_1_dia.setFont(font) self.lb_1_dia.setObjectName("lb_1_dia") self.lb_1_mes = QtWidgets.QLabel(self.groupBox) self.lb_1_mes.setGeometry(QtCore.QRect(140, 90, 101, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.lb_1_mes.setFont(font) self.lb_1_mes.setObjectName("lb_1_mes") self.lb_1_ano = QtWidgets.QLabel(self.groupBox) self.lb_1_ano.setGeometry(QtCore.QRect(270, 90, 101, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.lb_1_ano.setFont(font) self.lb_1_ano.setObjectName("lb_1_ano") self.line_2 = QtWidgets.QFrame(self.groupBox) self.line_2.setGeometry(QtCore.QRect(250, 80, 16, 31)) self.line_2.setFrameShape(QtWidgets.QFrame.VLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.line_3 = QtWidgets.QFrame(self.groupBox) self.line_3.setGeometry(QtCore.QRect(120, 80, 16, 31)) self.line_3.setFrameShape(QtWidgets.QFrame.VLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.groupBox_2 = QtWidgets.QGroupBox(self.widget) self.groupBox_2.setGeometry(QtCore.QRect(10, 140, 381, 201)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox_2.setFont(font) self.groupBox_2.setObjectName("groupBox_2") self.label_11 = QtWidgets.QLabel(self.groupBox_2) self.label_11.setGeometry(QtCore.QRect(90, 60, 47, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) font.setBold(True) font.setWeight(75) self.label_11.setFont(font) self.label_11.setObjectName("label_11") self.label_12 = QtWidgets.QLabel(self.groupBox_2) self.label_12.setGeometry(QtCore.QRect(260, 60, 41, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) font.setBold(True) font.setWeight(75) self.label_12.setFont(font) self.label_12.setObjectName("label_12") self.in_2_data = QtWidgets.QDateEdit(self.groupBox_2) self.in_2_data.setGeometry(QtCore.QRect(279, 20, 81, 22)) font = QtGui.QFont() font.setPointSize(10) self.in_2_data.setFont(font) self.in_2_data.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 21), QtCore.QTime(0, 0, 0))) self.in_2_data.setCalendarPopup(True) self.in_2_data.setObjectName("in_2_data") self.label_13 = QtWidgets.QLabel(self.groupBox_2) self.label_13.setGeometry(QtCore.QRect(160, 20, 111, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(8) self.label_13.setFont(font) self.label_13.setObjectName("label_13") self.lb_2_m1 = QtWidgets.QLabel(self.groupBox_2) self.lb_2_m1.setGeometry(QtCore.QRect(20, 90, 161, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.lb_2_m1.setFont(font) self.lb_2_m1.setObjectName("lb_2_m1") self.lb_2_a1 = QtWidgets.QLabel(self.groupBox_2) self.lb_2_a1.setGeometry(QtCore.QRect(200, 90, 161, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.lb_2_a1.setFont(font) self.lb_2_a1.setObjectName("lb_2_a1") self.lb_2_m2 = QtWidgets.QLabel(self.groupBox_2) self.lb_2_m2.setGeometry(QtCore.QRect(20, 120, 161, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.lb_2_m2.setFont(font) self.lb_2_m2.setObjectName("lb_2_m2") self.lb_2_m3 = QtWidgets.QLabel(self.groupBox_2) self.lb_2_m3.setGeometry(QtCore.QRect(20, 150, 161, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.lb_2_m3.setFont(font) self.lb_2_m3.setObjectName("lb_2_m3") self.lb_2_a2 = QtWidgets.QLabel(self.groupBox_2) self.lb_2_a2.setGeometry(QtCore.QRect(200, 120, 161, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.lb_2_a2.setFont(font) self.lb_2_a2.setObjectName("lb_2_a2") self.lb_2_a3 = QtWidgets.QLabel(self.groupBox_2) self.lb_2_a3.setGeometry(QtCore.QRect(200, 150, 161, 21)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.lb_2_a3.setFont(font) self.lb_2_a3.setObjectName("lb_2_a3") self.line = QtWidgets.QFrame(self.groupBox_2) self.line.setGeometry(QtCore.QRect(180, 60, 16, 131)) self.line.setFrameShape(QtWidgets.QFrame.VLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.groupBox_4 = QtWidgets.QGroupBox(self.widget) self.groupBox_4.setGeometry(QtCore.QRect(10, 350, 381, 281)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox_4.setFont(font) self.groupBox_4.setObjectName("groupBox_4") self.tb_4 = QtWidgets.QTableView(self.groupBox_4) self.tb_4.setGeometry(QtCore.QRect(10, 50, 351, 221)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.tb_4.setFont(font) self.tb_4.setObjectName("tb_4") self.in_4_data = QtWidgets.QDateEdit(self.groupBox_4) self.in_4_data.setGeometry(QtCore.QRect(299, 20, 61, 22)) font = QtGui.QFont() font.setPointSize(10) self.in_4_data.setFont(font) self.in_4_data.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 21), QtCore.QTime(0, 0, 0))) self.in_4_data.setCurrentSection(QtWidgets.QDateTimeEdit.YearSection) self.in_4_data.setCalendarPopup(True) self.in_4_data.setObjectName("in_4_data") self.label_14 = QtWidgets.QLabel(self.groupBox_4) self.label_14.setGeometry(QtCore.QRect(180, 20, 111, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(8) self.label_14.setFont(font) self.label_14.setObjectName("label_14") self.groupBox_5 = QtWidgets.QGroupBox(self.widget) self.groupBox_5.setGeometry(QtCore.QRect(400, 350, 381, 281)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox_5.setFont(font) self.groupBox_5.setObjectName("groupBox_5") self.tb_5 = QtWidgets.QTableView(self.groupBox_5) self.tb_5.setGeometry(QtCore.QRect(10, 50, 361, 221)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.tb_5.setFont(font) self.tb_5.setObjectName("tb_5") self.groupBox_3 = QtWidgets.QGroupBox(self.widget) self.groupBox_3.setGeometry(QtCore.QRect(400, 10, 381, 331)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox_3.setFont(font) self.groupBox_3.setObjectName("groupBox_3") self.tb_3 = QtWidgets.QTableView(self.groupBox_3) self.tb_3.setGeometry(QtCore.QRect(10, 21, 361, 301)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.tb_3.setFont(font) self.tb_3.setObjectName("tb_3") self.groupBox_6 = QtWidgets.QGroupBox(self.widget) self.groupBox_6.setGeometry(QtCore.QRect(790, 10, 461, 161)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox_6.setFont(font) self.groupBox_6.setObjectName("groupBox_6") self.tb_6 = QtWidgets.QTableView(self.groupBox_6) self.tb_6.setGeometry(QtCore.QRect(10, 21, 441, 131)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.tb_6.setFont(font) self.tb_6.setObjectName("tb_6") self.groupBox_7 = QtWidgets.QGroupBox(self.widget) self.groupBox_7.setGeometry(QtCore.QRect(790, 180, 461, 221)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox_7.setFont(font) self.groupBox_7.setObjectName("groupBox_7") self.label_16 = QtWidgets.QLabel(self.groupBox_7) self.label_16.setGeometry(QtCore.QRect(270, 20, 111, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(8) self.label_16.setFont(font) self.label_16.setObjectName("label_16") self.in_7_data = QtWidgets.QDateEdit(self.groupBox_7) self.in_7_data.setGeometry(QtCore.QRect(389, 20, 61, 22)) font = QtGui.QFont() font.setPointSize(10) self.in_7_data.setFont(font) self.in_7_data.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 21), QtCore.QTime(0, 0, 0))) self.in_7_data.setCalendarPopup(True) self.in_7_data.setObjectName("in_7_data") self.tb_7 = QtWidgets.QTableView(self.groupBox_7) self.tb_7.setGeometry(QtCore.QRect(10, 50, 441, 161)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.tb_7.setFont(font) self.tb_7.setObjectName("tb_7") self.groupBox_8 = QtWidgets.QGroupBox(self.widget) self.groupBox_8.setGeometry(QtCore.QRect(790, 410, 461, 221)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox_8.setFont(font) self.groupBox_8.setObjectName("groupBox_8") self.in_8_data_1 = QtWidgets.QDateEdit(self.groupBox_8) self.in_8_data_1.setGeometry(QtCore.QRect(220, 20, 61, 22)) font = QtGui.QFont() font.setPointSize(10) self.in_8_data_1.setFont(font) self.in_8_data_1.setDateTime(QtCore.QDateTime(QtCore.QDate(2010, 6, 21), QtCore.QTime(0, 0, 0))) self.in_8_data_1.setCurrentSection(QtWidgets.QDateTimeEdit.YearSection) self.in_8_data_1.setCalendarPopup(True) self.in_8_data_1.setObjectName("in_8_data_1") self.label_15 = QtWidgets.QLabel(self.groupBox_8) self.label_15.setGeometry(QtCore.QRect(100, 20, 111, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(8) self.label_15.setFont(font) self.label_15.setObjectName("label_15") self.tb_8 = QtWidgets.QTableView(self.groupBox_8) self.tb_8.setGeometry(QtCore.QRect(10, 50, 441, 161)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(10) self.tb_8.setFont(font) self.tb_8.setObjectName("tb_8") self.label_17 = QtWidgets.QLabel(self.groupBox_8) self.label_17.setGeometry(QtCore.QRect(290, 20, 31, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(8) self.label_17.setFont(font) self.label_17.setObjectName("label_17") self.in_8_data_2 = QtWidgets.QDateEdit(self.groupBox_8) self.in_8_data_2.setGeometry(QtCore.QRect(330, 20, 61, 22)) font = QtGui.QFont() font.setPointSize(10) self.in_8_data_2.setFont(font) self.in_8_data_2.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 21), QtCore.QTime(0, 0, 0))) self.in_8_data_2.setCurrentSection(QtWidgets.QDateTimeEdit.YearSection) self.in_8_data_2.setCalendarPopup(True) self.in_8_data_2.setObjectName("in_8_data_2") self.widget_S2 = QtWidgets.QWidget(self.centralwidget) self.widget_S2.setGeometry(QtCore.QRect(20, 30, 551, 241)) self.widget_S2.setObjectName("widget_S2") self.lb_titulo_2 = QtWidgets.QLabel(self.widget_S2) self.lb_titulo_2.setGeometry(QtCore.QRect(20, 20, 681, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.lb_titulo_2.setFont(font) self.lb_titulo_2.setObjectName("lb_titulo_2") self.in_S2_idcategoria = QtWidgets.QLineEdit(self.widget_S2) self.in_S2_idcategoria.setGeometry(QtCore.QRect(30, 110, 181, 20)) self.in_S2_idcategoria.setText("") self.in_S2_idcategoria.setObjectName("in_S2_idcategoria") self.lb_sim_a_2 = QtWidgets.QLabel(self.widget_S2) self.lb_sim_a_2.setGeometry(QtCore.QRect(30, 80, 151, 31)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(9) self.lb_sim_a_2.setFont(font) self.lb_sim_a_2.setObjectName("lb_sim_a_2") self.lb_sim_3 = QtWidgets.QLabel(self.widget_S2) self.lb_sim_3.setGeometry(QtCore.QRect(230, 80, 151, 31)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(9) self.lb_sim_3.setFont(font) self.lb_sim_3.setObjectName("lb_sim_3") self.in_S2_idsub = QtWidgets.QLineEdit(self.widget_S2) self.in_S2_idsub.setGeometry(QtCore.QRect(230, 110, 181, 20)) self.in_S2_idsub.setText("") self.in_S2_idsub.setObjectName("in_S2_idsub") self.S2_alterar = QtWidgets.QPushButton(self.widget_S2) self.S2_alterar.setGeometry(QtCore.QRect(430, 110, 75, 23)) self.S2_alterar.setObjectName("S2_alterar") self.widget_S3 = QtWidgets.QWidget(self.centralwidget) self.widget_S3.setGeometry(QtCore.QRect(20, 30, 911, 711)) self.widget_S3.setObjectName("widget_S3") self.lb_titulo_3 = QtWidgets.QLabel(self.widget_S3) self.lb_titulo_3.setGeometry(QtCore.QRect(20, 20, 681, 20)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.lb_titulo_3.setFont(font) self.lb_titulo_3.setObjectName("lb_titulo_3") self.groupBox_9 = QtWidgets.QGroupBox(self.widget_S3) self.groupBox_9.setGeometry(QtCore.QRect(20, 50, 661, 121)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox_9.setFont(font) self.groupBox_9.setObjectName("groupBox_9") self.lb_51 = QtWidgets.QLabel(self.groupBox_9) self.lb_51.setGeometry(QtCore.QRect(10, 30, 151, 31)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(9) self.lb_51.setFont(font) self.lb_51.setObjectName("lb_51") self.in_S2_1 = QtWidgets.QLineEdit(self.groupBox_9) self.in_S2_1.setGeometry(QtCore.QRect(10, 60, 181, 20)) self.in_S2_1.setText("") self.in_S2_1.setObjectName("in_S2_1") self.S3_alterar_1 = QtWidgets.QPushButton(self.groupBox_9) self.S3_alterar_1.setGeometry(QtCore.QRect(210, 60, 75, 23)) font = QtGui.QFont() font.setFamily("MS Shell Dlg 2") font.setPointSize(8) self.S3_alterar_1.setFont(font) self.S3_alterar_1.setObjectName("S3_alterar_1") self.groupBox_10 = QtWidgets.QGroupBox(self.widget_S3) self.groupBox_10.setGeometry(QtCore.QRect(20, 180, 661, 121)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(11) self.groupBox_10.setFont(font) self.groupBox_10.setObjectName("groupBox_10") self.lb_53 = QtWidgets.QLabel(self.groupBox_10) self.lb_53.setGeometry(QtCore.QRect(10, 30, 151, 31)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(9) self.lb_53.setFont(font) self.lb_53.setObjectName("lb_53") self.S3_alterar_2 = QtWidgets.QPushButton(self.groupBox_10) self.S3_alterar_2.setGeometry(QtCore.QRect(370, 60, 75, 23)) font = QtGui.QFont() font.setFamily("MS Shell Dlg 2") font.setPointSize(8) self.S3_alterar_2.setFont(font) self.S3_alterar_2.setObjectName("S3_alterar_2") self.in_S3_datainicial = QtWidgets.QDateEdit(self.groupBox_10) self.in_S3_datainicial.setGeometry(QtCore.QRect(10, 60, 101, 22)) font = QtGui.QFont() font.setPointSize(10) self.in_S3_datainicial.setFont(font) self.in_S3_datainicial.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 21), QtCore.QTime(0, 0, 0))) self.in_S3_datainicial.setCalendarPopup(True) self.in_S3_datainicial.setObjectName("in_S3_datainicial") self.lb_54 = QtWidgets.QLabel(self.groupBox_10) self.lb_54.setGeometry(QtCore.QRect(130, 30, 151, 31)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(9) self.lb_54.setFont(font) self.lb_54.setObjectName("lb_54") self.lb_55 = QtWidgets.QLabel(self.groupBox_10) self.lb_55.setGeometry(QtCore.QRect(250, 30, 151, 31)) font = QtGui.QFont() font.setFamily("Century Gothic") font.setPointSize(9) self.lb_55.setFont(font) self.lb_55.setObjectName("lb_55") self.in_S3_datafinal = QtWidgets.QDateEdit(self.groupBox_10) self.in_S3_datafinal.setGeometry(QtCore.QRect(130, 60, 101, 22)) font = QtGui.QFont() font.setPointSize(10) self.in_S3_datafinal.setFont(font) self.in_S3_datafinal.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 21), QtCore.QTime(0, 0, 0)))
nn.Conv2d(in_channels=self.refine_channel//2, out_channels=self.refine_channel//2, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr3 = nn.Conv2d(in_channels=self.refine_channel//2, out_channels=1, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.act_fn = nn.LeakyReLU(0.1) #bypass unet self.ds0 = PoolConv(1, self.vgg_channels[0]) self.vgg0 = VGGBlock(self.vgg_channels[0]) self.ds1 = PoolConv(self.vgg_channels[0], self.vgg_channels[1]) self.vgg1 = VGGBlock(self.vgg_channels[1]) self.ds2 = PoolConv(self.vgg_channels[1], self.vgg_channels[2]) self.vgg2 = VGGBlock(self.vgg_channels[2]) #250 self.us3 = UpConv(self.vgg_channels[2]+self.width*3, self.vgg_channels[3]) #merge with fno output self.vgg3 = VGGBlock(self.vgg_channels[3]) self.us4 = UpConv(self.vgg_channels[3]+self.vgg_channels[1], self.vgg_channels[4]) self.vgg4 = VGGBlock(self.vgg_channels[4]) self.us5 = UpConv(self.vgg_channels[4]+self.vgg_channels[0], self.vgg_channels[5]) self.vgg5 = VGGBlock(self.vgg_channels[5]) self.tanh = nn.Tanh() def forward(self, x): #fno pass x_fno = self.resize0(x) #print(x_fno.shape) n,c,h,w= x_fno.shape #fno1 tile_c_0 = h//self.subtile_size_0 x_fno_input_0 = torch.zeros((n,tile_c_0*tile_c_0,c,self.subtile_size_0,self.subtile_size_0)).type('torch.cuda.FloatTensor') x_fno_output_0 = torch.zeros((n,self.width,h,w)).type('torch.cuda.FloatTensor') for i in range(tile_c_0): for j in range(tile_c_0): x_fno_input_0[:,i*tile_c_0+j,:,:,:] = x_fno[:,:,i*self.subtile_size_0:(i+1)*self.subtile_size_0,j*self.subtile_size_0:(j+1)*self.subtile_size_0] x_fno_tmp = self.fno0(x_fno_input_0.view(n*tile_c_0*tile_c_0,c,self.subtile_size_0,self.subtile_size_0)) x_fno_tmp = self.act_fn(x_fno_tmp) #n*t*t, 16, 64, 64 x_fno_tmp = x_fno_tmp.view(n, tile_c_0*tile_c_0, self.width, self.subtile_size_0, self.subtile_size_0) for i in range(tile_c_0): for j in range(tile_c_0): x_fno_output_0[:,:,i*self.subtile_size_0:(i+1)*self.subtile_size_0,j*self.subtile_size_0:(j+1)*self.subtile_size_0] = x_fno_tmp[:,i*tile_c_0+j,:,:,:] #n c h w x_fno_0 = self.diconv0(x_fno_output_0) #print(x_fno_0.shape) #fno2 tile_c_1 = h//self.subtile_size_1 x_fno_input_1 = torch.zeros((n,tile_c_1*tile_c_1,c,self.subtile_size_1,self.subtile_size_1)).type('torch.cuda.FloatTensor') x_fno_output_1 = torch.zeros((n,self.width,h,w)).type('torch.cuda.FloatTensor') for i in range(tile_c_1): for j in range(tile_c_1): x_fno_input_1[:,i*tile_c_1+j,:,:,:] = x_fno[:,:,i*self.subtile_size_1:(i+1)*self.subtile_size_1,j*self.subtile_size_1:(j+1)*self.subtile_size_1] x_fno_tmp = self.fno1(x_fno_input_1.view(n*tile_c_1*tile_c_1,c,self.subtile_size_1,self.subtile_size_1)) x_fno_tmp = self.act_fn(x_fno_tmp) #n*t*t, 16, 64, 64 x_fno_tmp = x_fno_tmp.view(n, tile_c_1*tile_c_1, self.width, self.subtile_size_1, self.subtile_size_1) for i in range(tile_c_1): for j in range(tile_c_1): x_fno_output_1[:,:,i*self.subtile_size_1:(i+1)*self.subtile_size_1,j*self.subtile_size_1:(j+1)*self.subtile_size_1] = x_fno_tmp[:,i*tile_c_1+j,:,:,:] #n c h w x_fno_1 = self.diconv1(x_fno_output_1) #print(x_fno_1.shape) #fno3 tile_c_2 = h//self.subtile_size_2 x_fno_input_2 = torch.zeros((n,tile_c_2*tile_c_2,c,self.subtile_size_2,self.subtile_size_2)).type('torch.cuda.FloatTensor') x_fno_output_2 = torch.zeros((n,self.width,h,w)).type('torch.cuda.FloatTensor') for i in range(tile_c_2): for j in range(tile_c_2): x_fno_input_2[:,i*tile_c_2+j,:,:,:] = x_fno[:,:,i*self.subtile_size_2:(i+1)*self.subtile_size_2,j*self.subtile_size_2:(j+1)*self.subtile_size_2] x_fno_tmp = self.fno2(x_fno_input_2.view(n*tile_c_2*tile_c_2,c,self.subtile_size_2,self.subtile_size_2)) x_fno_tmp = self.act_fn(x_fno_tmp) #n*t*t, 16, 64, 64 x_fno_tmp = x_fno_tmp.view(n, tile_c_2*tile_c_2, self.width, self.subtile_size_2, self.subtile_size_2) for i in range(tile_c_2): for j in range(tile_c_2): x_fno_output_2[:,:,i*self.subtile_size_2:(i+1)*self.subtile_size_2,j*self.subtile_size_2:(j+1)*self.subtile_size_2] = x_fno_tmp[:,i*tile_c_2+j,:,:,:] #n c h w x_fno_2 = self.diconv2(x_fno_output_2) #print(x_fno_2.shape) x_unet = self.ds0(x) x_unet_1000 = self.vgg0(x_unet) x_unet = self.ds1(x_unet_1000) x_unet_500 = self.vgg1(x_unet) x_unet = self.ds2(x_unet_500) x_unet_250 = self.vgg2(x_unet) #merge fno and unet x = torch.cat((x_fno_0, x_fno_1, x_fno_2, x_unet_250), 1) #dconv x = self.us3(x) x = torch.cat((self.vgg3(x),x_unet_500), 1) x = self.us4(x) x = torch.cat((self.vgg4(x),x_unet_1000), 1) x = self.us5(x) x = self.vgg5(x) #refine x = self.convr0(x) x = self.act_fn(x) x = self.convr1(x) x = self.act_fn(x) x = self.convr2(x) x = self.act_fn(x) x = self.convr3(x) return self.tanh(x) class oinnopc_v001(nn.Module): def __init__(self, modes1, modes2, width, in_channel=1, refine_channel=32, refine_kernel = 3, smooth_kernel = 3): super(oinnopc_v001, self).__init__() # from design to mask, same as forward v2 as baseline. change output to sigmoid self.modes1 = modes1 self.modes2 = modes2 self.width = width self.refine_kernel = refine_kernel self.in_channel = in_channel self.refine_channel = refine_channel self.smooth_kernel = smooth_kernel #self.cemap = cemap self.vgg_channels = [4,8,16,16,8,4] #resize self.resize0 = nn.AvgPool2d(8) #fourier self.fno = SpectralConv2dLiftChannel(self.in_channel, self.width, self.width, self.modes1, self.modes2) #refine self.convr0 = nn.Conv2d(in_channels=self.vgg_channels[5], out_channels=self.refine_channel, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr1 = nn.Conv2d(in_channels=self.refine_channel, out_channels=self.refine_channel//2, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr2 = nn.Conv2d(in_channels=self.refine_channel//2, out_channels=self.refine_channel//2, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr3 = nn.Conv2d(in_channels=self.refine_channel//2, out_channels=1, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.act_fn = nn.LeakyReLU(0.1) #bypass unet self.ds0 = PoolConv(1, self.vgg_channels[0]) self.vgg0 = VGGBlock(self.vgg_channels[0]) self.ds1 = PoolConv(self.vgg_channels[0], self.vgg_channels[1]) self.vgg1 = VGGBlock(self.vgg_channels[1]) self.ds2 = PoolConv(self.vgg_channels[1], self.vgg_channels[2]) self.vgg2 = VGGBlock(self.vgg_channels[2]) #250 self.us3 = UpConv(self.vgg_channels[2]+self.width, self.vgg_channels[3]) #merge with fno output self.vgg3 = VGGBlock(self.vgg_channels[3]) self.us4 = UpConv(self.vgg_channels[3]+self.vgg_channels[1], self.vgg_channels[4]) self.vgg4 = VGGBlock(self.vgg_channels[4]) self.us5 = UpConv(self.vgg_channels[4]+self.vgg_channels[0], self.vgg_channels[5]) self.vgg5 = VGGBlock(self.vgg_channels[5]) self.final = nn.Sigmoid() def forward(self, x): #fno pass x_fno = self.resize0(x) x_fno = self.fno(x_fno) x_fno = self.act_fn(x_fno) #unet pass x_unet = self.ds0(x) x_unet_1000 = self.vgg0(x_unet) x_unet = self.ds1(x_unet_1000) x_unet_500 = self.vgg1(x_unet) x_unet = self.ds2(x_unet_500) x_unet_250 = self.vgg2(x_unet) #merge fno and unet x = torch.cat((x_fno, x_unet_250), 1) #dconv x = self.us3(x) x = torch.cat((self.vgg3(x),x_unet_500), 1) x = self.us4(x) x = torch.cat((self.vgg4(x),x_unet_1000), 1) x = self.us5(x) x = self.vgg5(x) #refine x = self.convr0(x) x = self.act_fn(x) x = self.convr1(x) x = self.act_fn(x) x = self.convr2(x) x = self.act_fn(x) x = self.convr3(x) return self.final(x) class oinnlitho(nn.Module): def __init__(self, modes1, modes2, width, in_channel=1, refine_channel=32, refine_kernel = 5, smooth_kernel = 3): super(oinnlitho, self).__init__() # from design to mask, same as forward v2 as baseline. self.modes1 = modes1 self.modes2 = modes2 self.width = width self.refine_kernel = refine_kernel self.in_channel = in_channel self.refine_channel = refine_channel self.smooth_kernel = smooth_kernel #self.cemap = cemap self.vgg_channels = [4,8,16,16,8,4] #resize self.resize0 = nn.AvgPool2d(8) #fourier self.fno = SpectralConv2dLiftChannel(self.in_channel, self.width, self.width, self.modes1, self.modes2) #refine self.convr0 = nn.Conv2d(in_channels=self.vgg_channels[5], out_channels=self.refine_channel, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr1 = nn.Conv2d(in_channels=self.refine_channel, out_channels=self.refine_channel//2, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr2 = nn.Conv2d(in_channels=self.refine_channel//2, out_channels=self.refine_channel//2, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr3 = nn.Conv2d(in_channels=self.refine_channel//2, out_channels=1, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.act_fn = nn.LeakyReLU(0.1) #bypass unet self.ds0 = PoolConv(1, self.vgg_channels[0]) self.vgg0 = VGGBlock(self.vgg_channels[0]) self.ds1 = PoolConv(self.vgg_channels[0], self.vgg_channels[1]) self.vgg1 = VGGBlock(self.vgg_channels[1]) self.ds2 = PoolConv(self.vgg_channels[1], self.vgg_channels[2]) self.vgg2 = VGGBlock(self.vgg_channels[2]) #250 self.us3 = UpConv(self.vgg_channels[2]+self.width, self.vgg_channels[3]) #merge with fno output self.vgg3 = VGGBlock(self.vgg_channels[3]) self.us4 = UpConv(self.vgg_channels[3]+self.vgg_channels[1], self.vgg_channels[4]) self.vgg4 = VGGBlock(self.vgg_channels[4]) self.us5 = UpConv(self.vgg_channels[4]+self.vgg_channels[0], self.vgg_channels[5]) self.vgg5 = VGGBlock(self.vgg_channels[5]) self.tanh = nn.Tanh() def forward(self, x): #fno pass x_fno = self.resize0(x) x_fno = self.fno(x_fno) x_fno = self.act_fn(x_fno) #unet pass x_unet = self.ds0(x) x_unet_1000 = self.vgg0(x_unet) x_unet = self.ds1(x_unet_1000) x_unet_500 = self.vgg1(x_unet) x_unet = self.ds2(x_unet_500) x_unet_250 = self.vgg2(x_unet) #merge fno and unet x = torch.cat((x_fno, x_unet_250), 1) #dconv x = self.us3(x) x = torch.cat((self.vgg3(x),x_unet_500), 1) x = self.us4(x) x = torch.cat((self.vgg4(x),x_unet_1000), 1) x = self.us5(x) x = self.vgg5(x) #refine x = self.convr0(x) x = self.act_fn(x) x = self.convr1(x) x = self.act_fn(x) x = self.convr2(x) x = self.act_fn(x) x = self.convr3(x) return self.tanh(x) class oinnopc_large(nn.Module): #one fno unit only def __init__(self, modes1, modes2, width, in_channel=1, refine_channel=32, refine_kernel = 3, smooth_kernel = 3): super(oinnopc_large, self).__init__() '''Support larger 8kX8k tile input''' self.modes1 = modes1 self.modes2 = modes2 self.width = width self.refine_kernel = refine_kernel self.in_channel = in_channel self.refine_channel = refine_channel self.smooth_kernel = smooth_kernel #self.cemap = cemap self.vgg_channels = [4,8,16,16,8,4] #resize self.resize0 = nn.AvgPool2d(8) #fourier self.fno = SpectralConv2dLiftChannel(self.in_channel, self.width, self.width, self.modes1, self.modes2) #refine self.convr0 = nn.Conv2d(in_channels=self.vgg_channels[5], out_channels=self.refine_channel, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr1 = nn.Conv2d(in_channels=self.refine_channel, out_channels=self.refine_channel//2, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr2 = nn.Conv2d(in_channels=self.refine_channel//2, out_channels=self.refine_channel//2, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.convr3 = nn.Conv2d(in_channels=self.refine_channel//2, out_channels=1, kernel_size=self.refine_kernel, padding = (self.refine_kernel-1)//2) self.act_fn = nn.LeakyReLU(0.1) #bypass unet self.ds0 = PoolConv(1, self.vgg_channels[0]) self.vgg0 = VGGBlock(self.vgg_channels[0]) self.ds1 = PoolConv(self.vgg_channels[0], self.vgg_channels[1]) self.vgg1 = VGGBlock(self.vgg_channels[1]) self.ds2 = PoolConv(self.vgg_channels[1], self.vgg_channels[2]) self.vgg2 = VGGBlock(self.vgg_channels[2]) #250 self.us3 = UpConv(self.vgg_channels[2]+self.width, self.vgg_channels[3]) #merge with fno output self.vgg3 = VGGBlock(self.vgg_channels[3]) self.us4 = UpConv(self.vgg_channels[3]+self.vgg_channels[1], self.vgg_channels[4]) self.vgg4 = VGGBlock(self.vgg_channels[4]) self.us5 = UpConv(self.vgg_channels[4]+self.vgg_channels[0], self.vgg_channels[5]) self.vgg5 = VGGBlock(self.vgg_channels[5]) #S-G smoothing #self.conv_smooth = nn.Conv2d(in_channels=1,out_channels=1,kernel_size=self.smooth_kernel, padding = (self.smooth_kernel-1)//2, bias=False) #self.conv_smooth.weight = nn.Parameter(torch.tensor(data=np.expand_dims(np.expand_dims(sg.get_2D_filter(self.smooth_kernel,1,0),0),0), dtype=torch.float), requires_grad=False) ##self.bn0 = nn.BatchNorm2d(self.refine_channel) ##self.bn1 = nn.BatchNorm2d(self.refine_channel//2) ##self.bn2 = nn.BatchNorm2d(self.refine_channel//2) ##self.bn1 = nn.BatchNorm2d(self.refine_channel) ##self.fc1 = nn.Linear(self.width, 128) ##self.fc2 = nn.Linear(128, 1) self.tanh = nn.Tanh() #attentions #self.attention0 = attention_block(self.refine_channel) #self.attention1 = spatial_attention(self.refine_channel//2) #self.attention2 = spatial_attention(self.refine_channel//2) def forward(self, x): #fno pass x_fno = self.resize0(x) #print(x_fno.shape) #fno_size = x_fno.shape[-1] x_fnos=torch.zeros_like(x_fno).cuda().repeat(1, self.vgg_channels[2], 1, 1) #print(x_fno.shape) for i in range(5): for j in range(5): tmpx = x_fno[:,:,i*128:i*128+256, j*128:j*128+256] #print(tmpx.shape) tmpfno = self.fno(tmpx) tmpfno = self.act_fn(tmpfno) #print(tmpfno.shape) x_fnos[:,:,i*128+64:i*128+192,j*128+64:j*128+192] = tmpfno[:,:,64:192,64:192] #x_fno = self.fno(x_fno) #x_fno = self.act_fn(x_fno) #unet pass x_unet = self.ds0(x) x_unet_1000 = self.vgg0(x_unet) x_unet = self.ds1(x_unet_1000) x_unet_500 = self.vgg1(x_unet) x_unet = self.ds2(x_unet_500) x_unet_250 = self.vgg2(x_unet) #print(x_unet_250.shape) #merge fno and unet x = torch.cat((x_fnos, x_unet_250), 1) #print(x.shape) #dconv x = self.us3(x) x = torch.cat((self.vgg3(x),x_unet_500), 1) x = self.us4(x) x = torch.cat((self.vgg4(x),x_unet_1000), 1) x = self.us5(x) x = self.vgg5(x) #refine x = self.convr0(x) x = self.act_fn(x) #x = self.attention0(x) x = self.convr1(x) x = self.act_fn(x) #x = self.attention1(x) x = self.convr2(x) x = self.act_fn(x) #x = self.attention2(x) x = self.convr3(x) #x = self.conv_smooth(x) return self.tanh(x) def conv(in_planes, output_channels, kernel_size, stride, dropout_rate): return nn.Sequential( nn.Conv2d(in_planes, output_channels, kernel_size=kernel_size, stride=stride, padding=(kernel_size - 1) // 2, bias = False), nn.BatchNorm2d(output_channels), nn.LeakyReLU(0.1, inplace=True), nn.Dropout(dropout_rate) ) def deconv(input_channels, output_channels): return nn.Sequential( nn.ConvTranspose2d(input_channels, output_channels, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.1, inplace=True) ) def output_layer(input_channels, output_channels, kernel_size, stride, dropout_rate): return nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride, padding=(kernel_size - 1) // 2) #unet baseline class unet(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, dropout_rate): super(unet, self).__init__() self.input_channels = input_channels self.conv1 = conv(input_channels, 64, kernel_size=kernel_size, stride=2, dropout_rate = dropout_rate) self.conv2 = conv(64, 128, kernel_size=kernel_size, stride=2, dropout_rate = dropout_rate) self.conv3 = conv(128, 256, kernel_size=kernel_size, stride=2, dropout_rate = dropout_rate) self.conv3_1 = conv(256, 256, kernel_size=kernel_size, stride=1, dropout_rate = dropout_rate) self.conv4 = conv(256, 512, kernel_size=kernel_size, stride=2, dropout_rate = dropout_rate) self.conv4_1 = conv(512,
value in DevicePolicyTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevicePolicyTimestamp must be specified if op_DevicePolicyTimestamp is specified. :type val_c_DevicePolicyTimestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_PolicyID: The operator to apply to the field PolicyID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyID: The internal NetMRI identifier for the policy whose status this record represents. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_PolicyID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_PolicyID: If op_PolicyID is specified, the field named in this input will be compared to the value in PolicyID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyID must be specified if op_PolicyID is specified. :type val_f_PolicyID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_PolicyID: If op_PolicyID is specified, this value will be compared to the value in PolicyID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyID must be specified if op_PolicyID is specified. :type val_c_PolicyID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_PolicyRulesChecked: The operator to apply to the field PolicyRulesChecked. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesChecked: The total number of rules that were checked against this device for this policy. Invalid rules and rules that are skipped due to the device not matching the rule filter are not counted as 'checked' rules. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_PolicyRulesChecked: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_PolicyRulesChecked: If op_PolicyRulesChecked is specified, the field named in this input will be compared to the value in PolicyRulesChecked using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesChecked must be specified if op_PolicyRulesChecked is specified. :type val_f_PolicyRulesChecked: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_PolicyRulesChecked: If op_PolicyRulesChecked is specified, this value will be compared to the value in PolicyRulesChecked using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesChecked must be specified if op_PolicyRulesChecked is specified. :type val_c_PolicyRulesChecked: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_PolicyRulesError: The operator to apply to the field PolicyRulesError. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesError: The total number of rules in this policy that the device failed with error status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_PolicyRulesError: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_PolicyRulesError: If op_PolicyRulesError is specified, the field named in this input will be compared to the value in PolicyRulesError using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesError must be specified if op_PolicyRulesError is specified. :type val_f_PolicyRulesError: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_PolicyRulesError: If op_PolicyRulesError is specified, this value will be compared to the value in PolicyRulesError using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesError must be specified if op_PolicyRulesError is specified. :type val_c_PolicyRulesError: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_PolicyRulesFailed: The operator to apply to the field PolicyRulesFailed. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesFailed: The total number of rules in this policy that the device failed with info, warning, or error status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_PolicyRulesFailed: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_PolicyRulesFailed: If op_PolicyRulesFailed is specified, the field named in this input will be compared to the value in PolicyRulesFailed using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesFailed must be specified if op_PolicyRulesFailed is specified. :type val_f_PolicyRulesFailed: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_PolicyRulesFailed: If op_PolicyRulesFailed is specified, this value will be compared to the value in PolicyRulesFailed using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesFailed must be specified if op_PolicyRulesFailed is specified. :type val_c_PolicyRulesFailed: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_PolicyRulesInfo: The operator to apply to the field PolicyRulesInfo. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesInfo: The total number of rules in this policy that the device failed with info status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_PolicyRulesInfo: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_PolicyRulesInfo: If op_PolicyRulesInfo is specified, the field named in this input will be compared to the value in PolicyRulesInfo using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesInfo must be specified if op_PolicyRulesInfo is specified. :type val_f_PolicyRulesInfo: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_PolicyRulesInfo: If op_PolicyRulesInfo is specified, this value will be compared to the value in PolicyRulesInfo using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesInfo must be specified if op_PolicyRulesInfo is specified. :type val_c_PolicyRulesInfo: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_PolicyRulesInvalid: The operator to apply to the field PolicyRulesInvalid. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesInvalid: The total number of invalid rules that were in this policy at the time the policy was executed against this device.
<reponame>nat143/lastfm-data-exporter<filename>main.py ''' Copyright 2016 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import string import time import json import operator import urllib.request import math import threading import os def add_tracks(tracksList ,finalJson): Final = finalJson Tracks = tracksList artists = Final['artists'] for track in Tracks['tracks']: # checks if a track is currently playing (according to lastfm) # if it is the track is skipped try: if track['@attr']['nowplaying'] == 'true': #print('Found now playing track') continue except KeyError: pass track_artist_info = track['artist'] artist_mbid = track_artist_info['mbid'] artist_name = track_artist_info['name'] file_artist_mbid = next((item for item in artists if item["mbid"] == artist_mbid), None) file_artist_name = next((item for item in artists if item["name"] == artist_name), None) if file_artist_name == None: artists.append({'name': artist_name,'mbid': artist_mbid, 'albums':[]}) currentArtist = next((item for item in artists if item["name"] == artist_name), None) elif file_artist_name != None: if file_artist_mbid == None: currentArtist = file_artist_name elif file_artist_mbid != None: currentArtist = file_artist_name # track_album_info = track['album'] album_mbid = track_album_info['mbid'] album_name = track_album_info['#text'] file_album_mbid = next((item for item in currentArtist['albums'] if item["mbid"] == album_mbid), None) file_album_name = next((item for item in currentArtist['albums'] if item["name"] == album_name), None) if file_album_name == None: if file_album_mbid == None: if album_name == None: currentArtist['albums'].append({'name': album_name,'mbid': album_mbid, 'tracks':[], 'playcount': 0}) currentAlbum = next((item for item in currentArtist['albums'] if item["name"] == album_name and item["mbid"] == album_mbid), None) else: currentArtist['albums'].append({'name': album_name,'mbid': album_mbid, 'tracks':[], 'playcount': 0}) currentAlbum = next((item for item in currentArtist['albums'] if item["name"] == album_name), None) else: currentArtist['albums'].append({'name': album_name,'mbid': album_mbid, 'tracks':[], 'playcount': 0}) currentAlbum = next((item for item in currentArtist['albums'] if item["name"] == album_name), None) elif file_album_name != None: if file_album_mbid == None: currentAlbum = file_album_name elif file_album_mbid != None: currentAlbum = file_album_name # track_mbid = track['mbid'] track_name = track['name'] file_track_mbid = next((item for item in currentAlbum['tracks'] if item["mbid"] == track_mbid), None) file_track_name = next((item for item in currentAlbum['tracks'] if item["name"] == track_name), None) if file_track_name == None: currentAlbum['tracks'].append({'name': track_name,'mbid': track_mbid, 'timestamps':[], 'playcount': 0}) currentTrack = next((item for item in currentAlbum['tracks'] if item["name"] == track_name), None) elif file_track_name != None: if file_track_mbid == None: currentTrack = file_track_name elif file_track_mbid != None: currentTrack = file_track_name # track_timestamp = track['date']['uts'] currentTrack['timestamps'].append(track_timestamp) # def get_link(tracksList, page, limit, lock, start, username): Tracks = tracksList page_time = time.clock() url_page = "http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&extended=1" + "&user=" + username + "&page=" + str(page) + "&limit=" + str(limit) + "&from=" + str(start) + "&api_key=" + api_key + "&format=json" url_page_req = urllib.request.urlopen(url_page) tempJson = json.loads(url_page_req.read().decode('utf-8')) locks_acquired = 0 # checks if the list is locked, if it is locked the thread waits, if it isnt locked the thread locks it and write to the list while locks_acquired != 1: #print('Page {} trying to acquire lock' .format(page)) if lock.locked() == False: lock.acquire() tracks_time = time.clock() #print('Lock aquired, page {}' .format(page)) tracks = tempJson['recenttracks']['track'] Tracks['tracks'] += tracks locks_acquired = 1 print('Page {} finished' .format(page)) print('Time to get and add tracks: {}s' .format(time.clock() - tracks_time)) print('Time taken to finish page: {}s' .format(time.clock() - page_time)) lock.release() else: print('Page {} could not aquire lock' .format(page)) # def get_tracks(username, start, limit, filename): if filename[-5:] == '.json': pass else: filename += '.json' # gets the total amount of tracks and finds the timestamp of the last listened track tempLimit = 2 url = "http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&extended=1" + "&user=" + username + "&page=1" + "&limit=" + str(tempLimit) + "&from=" + str(start) + "&api_key=" + api_key + "&format=json" url_req = urllib.request.urlopen(url) recentTrackInfo = json.loads(url_req.read().decode("utf-8")) #print(recentTrackInfo) total_tracks = int(recentTrackInfo['recenttracks']["@attr"]["total"]) total_pages = math.ceil(total_tracks/limit) print('Total number of pages: {}' .format(total_pages)) if int(start) == 0: Final = {} Final['user'] = username Final['artists'] = [] presentTracks = 0 else: Final = json.load(open(filename, 'r')) presentTracks = Final['total playcount'] # try: Final['last track'] = recentTrackInfo['recenttracks']['track'][0]['date']['uts'] except KeyError: try: Final['last track'] = recentTrackInfo['recenttracks']['track'][1]['date']['uts'] except IndexError: pass except IndexError: pass CurrentPage = 1 downloadThreads = [] max_num_Threads = 20 num_Threads = max_num_Threads pages_left = total_pages pagesAcquired = 0 pages_time = time.clock() while pages_left > 0: pageTracks = {} pageTracks['tracks'] = [] #print('CurrentPage {}' .format(CurrentPage)) # number of threads to open # for loop with a range from the current page to (currentpage + num_Threads) # depending on the value of num_Threads, threads will be open simultanouesly if pages_left < num_Threads: num_Threads = pages_left print('----------------------------------------------------------') print('Number of threads has been decreased to {}' .format(num_Threads)) print('----------------------------------------------------------') for page in range(CurrentPage, (CurrentPage + num_Threads)): downloadThread = threading.Thread(target=get_link, args=(pageTracks, page, limit, lock, start, username)) downloadThreads.append(downloadThread) downloadThread.start() pagesAcquired += 1 for downloadThread in downloadThreads: downloadThread.join() pages_left = total_pages - pagesAcquired print('Pages left {}' .format(pages_left)) CurrentPage += num_Threads add_tracks(pageTracks, Final) ### print('----------------------------------------------------------') #print('number of tracks obtained: {}' .format(len(pageTracks['tracks']))) print('Time to get all tracks: {}' .format(time.clock() - pages_time)) print('----------------------------------------------------------') #add_tracks(Tracks, Final) total_playcount = 0 # finds the number of playcounts for every track, album, artist and also the total playcount Final['total playcount'] = 0 for artist in Final['artists']: artist['playcount'] = 0 for album in artist['albums']: album['playcount'] = 0 for track in album['tracks']: track['playcount'] = 0 for timestamp in track['timestamps']: total_playcount += 1 track['playcount'] += 1 album['playcount'] += 1 artist['playcount'] += 1 Final['total playcount'] += 1 # # sorts artists, albums and tracks by number of playcounts Final['artists'] = sorted(Final['artists'], key=operator.itemgetter('playcount'), reverse=True) for artist in Final['artists']: artist['albums'] = sorted(artist['albums'], key=operator.itemgetter('playcount'), reverse=True) for album in artist['albums']: album['tracks'] = sorted(album['tracks'], key=operator.itemgetter('playcount'), reverse=True) for track in album['tracks']: track['timestamps'] = sorted(track['timestamps'], reverse=True) # json.dump(Final, open(filename, 'w')) print('Total number of tracks: {}' .format(total_playcount)) print('Number of tracks added: {}' .format(total_playcount - presentTracks)) # def update(filename): file = json.load(open(filename, 'r')) lastPlay = file['last track'] startingTime = int(lastPlay) + 1 username = file['user'] get_tracks(username, startingTime, 50, filename) # def settings(): settings_filename = 'lastfm data settings.json' username = '' filename = '' settings = {'username': username, 'filename':filename} if os.path.isfile(settings_filename): settings = json.load(open(settings_filename, 'r')) return settings #def __name__(__main__): lock = threading.Lock() valid_chars = "!£$^&()_+{}@~¬`-=[];'#,. %s%s" % (string.ascii_letters, string.digits) api_key = '<KEY>' ''' print("Enter the command 'help' to get list of avaliable commands.") print("Press enter to exit script") settings_filename = 'lastfm data settings.json' if os.path.isfile(settings_filename): settings = json.load(open(settings_filename, 'r')) username = settings['username'] filename = settings['filename'] try: api_key = settings['api_key'] except KeyError: pass else: username = input('Enter username: ') filename = input('Enter filename: ') if filename[-5:] == '.json': pass else: filename += '.json' # filename = ''.join(c for c in filename if c in valid_chars) settings = {'username': username, 'filename':filename} remember_settings = input('Remember settings? (yes/no)\n').lower() if remember_settings == 'yes': print('Settings file created') print('') json.dump(settings, open(settings_filename, 'w')) else: print('Settings will not be remembered') # while True: pageTracks = {} pageTracks['tracks'] = [] print('') command = input('Enter command: ').lower() print('') if command == 'filename': print('The current filename is: {}' .format(filename)) # elif command == 'username': print('The current username is: {}' .format(username)) # elif command == 'change settings': print('Press enter to leave field unchanged') print('The current username is: {}' .format(username)) username = input('Enter username: ') if username != '': settings['username'] = username print('Username changed to {}' .format(username)) else: username = settings["username"] print('The current filename is: {}' .format(filename)) filename = input('Enter filename: ') if filename != '': if filename[-5:] == '.json': pass else: filename += '.json' # filename = ''.join(c for c in filename if c in valid_chars) settings['filename'] = filename print('Filename changed to {}' .format(filename)) else: filename = settings['filename'] json.dump(settings, open(settings_filename, 'w')) # elif command == 'update': if os.path.isfile(filename): Tracks = {} Tracks['tracks'] = [] update(filename) else: print('The file with the given filename has not been
there's already") with assertRaisesRegex(self, command.CommandException, "Bad request. Maybe there's already"): command.create_user('', '<EMAIL>', team='qux') def test_user_create_bogus_team(self): self._mock_error('users/create', status=400, team='qux', message="Please enter a valid email address.") with assertRaisesRegex(self, command.CommandException, "Please enter a valid email address."): command.create_user('bob', 'wrongemail', team='qux') def test_user_create_empty_team(self): self._mock_error('users/create', status=400, team='qux', message="Bad request. Maybe there's already") with assertRaisesRegex(self, command.CommandException, "Bad request. Maybe there's already"): command.create_user('', '<EMAIL>', team='qux') def test_user_create_nonexisting_team(self): self._mock_error('users/create', status=404, team='nonexisting') with self.assertRaises(command.CommandException): command.create_user('bob', '<EMAIL>', team='nonexisting') def test_user_disable_not_found(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('bob', 'qux') def test_user_disable_server_error(self): self._mock_error('users/disable', team='qux', status=500) with self.assertRaises(command.CommandException): command.disable_user('bob', 'qux') def test_user_disable_already(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('bob', team='qux') def test_user_disable_deleted(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('deleted', team='qux') def test_user_disable_non_existing_team(self): self._mock_error('users/disable', status=404, team='nonexisting') with self.assertRaises(command.CommandException): command.disable_user('bob', team='nonexisting') def test_user_disable_non_existing(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('nonexisting', team='qux') def test_user_disable_empty(self): self._mock_error('users/disable', status=400, team='qux', message="Username is not valid") with assertRaisesRegex(self, command.CommandException, "Username is not valid"): command.disable_user('', team='qux') def test_user_disable_no_auth(self): self._mock_error('users/disable', status=401, team='qux') with self.assertRaises(command.CommandException): command.disable_user('bob', team='qux') def test_user_disable_unknown(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('unknown', team='qux') def test_user_delete(self): self._mock_error('users/delete', status=201, team='qux') command.delete_user('bob', force=True, team='qux') def test_user_delete_not_found(self): self._mock_error('users/delete', team='qux', status=404) with self.assertRaises(command.CommandException): command.delete_user('bob', team='qux', force=True) def test_user_delete_server_error(self): self._mock_error('users/delete', status=404, team='qux') with self.assertRaises(command.CommandException): command.delete_user('bob', 'qux', force=True) def test_user_delete_empty(self): self._mock_error('users/delete', status=400, team='qux', message="Username is not valid") with assertRaisesRegex(self, command.CommandException, "Username is not valid"): command.delete_user('', force=True, team='qux') def test_user_delete_no_auth(self): self._mock_error('users/delete', status=401, team='qux') with self.assertRaises(command.CommandException): command.delete_user('bob', force=True, team='qux') def test_user_delete_unknown(self): self._mock_error('users/delete', status=404, team='qux') with self.assertRaises(command.CommandException): command.delete_user('unknown', force=True, team='qux') def test_user_delete_already(self): self._mock_error('users/delete', status=404, team='qux') with self.assertRaises(command.CommandException): command.delete_user('deleted', team='qux', force=True) def test_user_delete_nonexisting_team(self): self._mock_error('users/delete', status=404, team='nonexisting') with self.assertRaises(command.CommandException): command.delete_user('bob', force=True, team='nonexisting') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_user(self): self.requests_mock.add( responses.GET, '%s/api/audit/bob/' % command.get_registry_url("someteam"), status=201, json={ 'events': [{ 'created': '', 'user': 'bob', 'type': 'user', 'package_owner': '', 'package_name': '', 'package_hash': '', 'extra': '' }] }) command.audit('bob') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_package(self): self.requests_mock.add( responses.GET, '%s/api/audit/foo/bar/' % command.get_registry_url("someteam"), status=201, json={ 'events': [{ 'created': '', 'user': 'bob', 'type': 'package', 'package_owner': '', 'package_name': '', 'package_hash': '', 'extra': '' }] }) command.audit('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_no_auth_user(self): self._mock_error('audit/bob/', status=401, team='someteam', method=responses.GET) with self.assertRaises(command.CommandException): command.audit('bob') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_no_auth_package(self): self._mock_error('audit/foo/bar/', status=401, team='someteam', method=responses.GET) with self.assertRaises(command.CommandException): command.audit('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_audit_no_team(self): with assertRaisesRegex(self, command.CommandException, "Not logged in as a team user"): command.audit('bob') command.audit('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_not_admin_user(self): self._mock_error('audit/bob/', status=403, team='someteam', method=responses.GET) with self.assertRaises(command.CommandException): command.audit('bob') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_not_admin_package(self): self._mock_error('audit/foo/bar/', status=403, team='someteam', method=responses.GET) with self.assertRaises(command.CommandException): command.audit('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: None) @patch('sys.stdout', new_callable=StringIO) def test_access_list(self, mock_stdout): self.requests_mock.add( responses.GET, '%s/api/access/foo/bar/' % command.get_registry_url(None), status=201, json={ 'users': ['foo', 'bob'] } ) command.access_list('foo/bar') assert mock_stdout.getvalue() == 'foo\nbob\n' @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_list_no_auth(self): self._mock_error('access/foo/bar/', status=401, method=responses.GET) with self.assertRaises(command.CommandException): command.access_list('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: None) @patch('sys.stdout', new_callable=StringIO) def test_access_remove(self, mock_stdout): self.requests_mock.add( responses.DELETE, '%s/api/access/foo/bar/bob' % command.get_registry_url(None), status=201 ) command.access_remove('foo/bar', 'bob') assert mock_stdout.getvalue() == u'Access removed for bob\n' @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_remove_no_auth(self): self._mock_error('access/foo/bar/bob', status=401, method=responses.DELETE) with self.assertRaises(command.CommandException): command.access_remove('foo/bar', 'bob') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_remove_not_owner(self): self._mock_error('access/foo/bar/bob', status=403, method=responses.DELETE, message="Only the package owner can revoke access") with assertRaisesRegex(self, command.CommandException, "Only the package owner can revoke access"): command.access_remove('foo/bar', 'bob') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_remove_owner(self): self._mock_error('access/foo/bar/foo', status=403, method=responses.DELETE, message="Cannot revoke the owner's access") with assertRaisesRegex(self, command.CommandException, "Cannot revoke the owner's access"): command.access_remove('foo/bar', 'foo') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_remove_free_plan(self): self._mock_error('access/foo/bar/foo', status=402, method=responses.DELETE, message="Insufficient permissions.") with assertRaisesRegex(self, command.CommandException, "Insufficient permissions."): command.access_remove('foo/bar', 'foo') @patch('quilt.tools.command._find_logged_in_team', lambda: None) @patch('sys.stdout', new_callable=StringIO) def test_access_add(self, mock_stdout): self.requests_mock.add( responses.PUT, '%s/api/access/foo/bar/bob' % command.get_registry_url(None), status=201 ) command.access_add('foo/bar', 'bob') assert mock_stdout.getvalue() == u'Access added for bob\n' @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_add_no_auth(self): self._mock_error('access/foo/bar/bob', status=401, method=responses.PUT) with self.assertRaises(command.CommandException): command.access_add('foo/bar', 'bob') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_add_not_owner(self): self._mock_error('access/foo/bar/bob', status=403, method=responses.PUT, message="Only the package owner can revoke access") with assertRaisesRegex(self, command.CommandException, "Only the package owner can revoke access"): command.access_add('foo/bar', 'bob') # TODO: work in progress # def test_find_node_by_name(self): # mydir = os.path.dirname(__file__) # build_path = os.path.join(mydir, './build.yml') # command.build('foo/bar', build_path) # # owner, pkg = store.parse_package('foo/bar') # pkgobj = store.PackageStore.find_package(owner, pkg) # assert pkgobj is not None # assert pkgobj.find_node_by_name('') is None # assert pkgobj.find_node_by_name('bar') is None # assert pkgobj.find_node_by_name('foo') is None # assert pkgobj.find_node_by_name('README.md') is None # assert pkgobj.find_node_by_name('data/README') is None # assert pkgobj.find_node_by_name('data/README.md') is None # assert pkgobj.find_node_by_name('README') is not None # tsvnode = pkgobj.find_node_by_name('dataframes/tsv') # assert tsvnode is not None # tsvdf = pkgobj.get_obj(tsvnode) # assert tsvdf is not None # diff = command.diff_vs_dataframe('foo/bar', 'dataframes/tsv', tsvdf) # assert diff is None # diff = command.diff_vs_dataframe('foo/bar', 'dataframes/csv', tsvdf) # assert diff is None # import random # tsvdf['UID1'] = tsvdf['UID1'].apply( # lambda v: v if random.random()>0.01 else ('val'+str(random.random()))) # diff = command.diff_vs_dataframe('foo/bar', 'dataframes/tsv', tsvdf) # assert diff is None def test_log(self): mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') owner = 'foo' package = 'bar' command.build('%s/%s' % (owner, package), build_path) pkgstore, pkgroot = store.PackageStore.find_package(None, owner, package) self._mock_logs_list(owner, package, hash_contents(pkgroot)) command.log("{owner}/{pkg}".format(owner=owner, pkg=package)) def _mock_logs_list(self, owner, package, pkg_hash): logs_url = "%s/api/log/%s/%s/" % (command.get_registry_url(None), owner, package) resp = dict(logs=[dict( hash=pkg_hash, created=time.time(), author=owner)]) print("MOCKING URL=%s" % logs_url) self.requests_mock.add(responses.GET, logs_url, json.dumps(resp)) def test_generate_buildfile_wo_building(self): mydir = os.path.dirname(__file__) path = os.path.join(mydir, 'data') buildfilename = 'build_test_generate_buildfile_wo_building.yml' buildfilepath = os.path.join(path, buildfilename) assert not os.path.exists(buildfilepath), "%s already exists" % buildfilepath try: command.generate(path, outfilename=buildfilename) assert os.path.exists(buildfilepath), "failed to create %s" % buildfilepath finally: os.remove(buildfilepath) @patch('quilt.tools.command.input') def test_delete_not_confirmed(self, mock_input): mock_input.return_value = 'blah' command.delete('user/test') @patch('quilt.tools.command.input') def test_delete_confirmed(self, mock_input): owner = 'foo' package = 'bar' mock_input.return_value = '%s/%s' % (owner, package) delete_url = "%s/api/package/%s/%s/" % (command.get_registry_url(None), owner, package) self.requests_mock.add(responses.DELETE, delete_url, json.dumps(dict())) command.delete('%s/%s' % (owner, package)) def test_build_from_git(self): git_url = 'https://github.com/quiltdata/testdata.git' def mock_git_clone(cmd): # test git command assert len(cmd) == 6 assert cmd[:5] == ['git', 'clone', '-q', '--depth=1', git_url] # fake git clone by copying test files into destpath srcfile = 'foo.csv' mydir = os.path.dirname(__file__) srcpath = os.path.join(mydir, 'data', srcfile) destpath = os.path.join(cmd[-1], srcfile) shutil.copyfile(srcpath, destpath) with patch('subprocess.check_call', mock_git_clone): command.build('user/test', git_url) from quilt.data.user import test assert hasattr(test, 'foo') assert isinstance(test.foo(), pd.DataFrame) def test_build_from_git_branch(self): branch = 'notmaster' git_url = 'https://github.com/quiltdata/testdata.git' def mock_git_clone(cmd): # test git command assert len(cmd) == 8 assert cmd[:7] == ['git', 'clone', '-q', '--depth=1', '-b', branch, git_url] # fake git clone by copying test files into destpath srcfile = 'foo.csv' mydir = os.path.dirname(__file__) srcpath = os.path.join(mydir, 'data', srcfile) destpath = os.path.join(cmd[-1], srcfile) shutil.copyfile(srcpath, destpath) with patch('subprocess.check_call', mock_git_clone): command.build('user/test', "{url}@{brch}".format(url=git_url, brch=branch)) from quilt.data.user import test assert hasattr(test, 'foo') assert isinstance(test.foo(), pd.DataFrame) def test_build_yaml_syntax_error(self): path = os.path.dirname(__file__) buildfilepath = os.path.join(path, 'build_bad_syntax.yml') with assertRaisesRegex(self, command.CommandException, r'Bad yaml syntax.*build_bad_syntax\.yml'): command.build('user/test', buildfilepath) def test_build_checks_yaml_syntax_error(self): # pylint: disable=C0103 path = os.path.abspath(os.path.dirname(__file__)) buildfilepath = os.path.join(path, 'build_checks_bad_syntax.yml') checksorigpath = os.path.join(path, 'checks_bad_syntax.yml') checksfilepath = os.path.join(path, 'checks.yml') try: origdir = os.curdir os.chdir(path) assert not os.path.exists(checksfilepath) shutil.copy(checksorigpath, checksfilepath) with assertRaisesRegex(self, command.CommandException, r'Bad yaml syntax.*checks\.yml'): command.build('user/test', buildfilepath) finally: os.remove(checksfilepath) os.chdir(origdir) def test_git_clone_fail(self): git_url = 'https://github.com/quiltdata/testdata.git' def mock_git_clone(cmd): # test git command assert len(cmd) == 6 assert cmd[:5] == ['git', 'clone', '-q', '--depth=1', git_url] # fake git clone fail raise Exception() with patch('subprocess.check_call', mock_git_clone): with self.assertRaises(command.CommandException): command.build('user/pkg__test_git_clone_fail', git_url) # TODO: running -n (pytest-xdist) there's leaky state and can throw # either ImportError: cannot import name or ModuleNotFoundError with assertRaisesRegex(self, Exception, r'cannot import|not found|No module named|Could not find'): from quilt.data.user import pkg__test_git_clone_fail def test_logging(self): mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') log_url = '%s/api/log' % (command.get_registry_url(None),) # Successful logging response. with patch('quilt.tools.command._load_config', return_value={}): def callback(request): data = json.loads(request.body) assert data == [dict( type='build', package=hashlib.md5(b'foo/bar').hexdigest(), dry_run=False, env='default', )] return (200, {}, '') self.requests_mock.add_callback(responses.POST, log_url, callback) command.build('foo/bar', build_path) # Failed logging response. with patch('quilt.tools.command._load_config', return_value={}): self.requests_mock.add(responses.POST, log_url, status=500) command.build('foo/bar', build_path) # ConnectionError with patch('quilt.tools.command._load_config', return_value={}): self.requests_mock.add(responses.POST, log_url, body=requests.exceptions.ConnectionError()) command.build('foo/bar', build_path) # Disabled logging. with patch('quilt.tools.command._load_config', return_value={'disable_analytics': True}): self.requests_mock.add(responses.POST, log_url, body=AssertionError('Unexpected logging!')) command.build('foo/bar', build_path) self.requests_mock.reset() # Prevent the "not all requests ..." assert. def test_rm(self): """ Test removing a package. """ mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') command.build('foo/bar', build_path) command.rm('foo/bar', force=True) teststore = store.PackageStore(self._store_dir) assert not os.path.isdir(teststore.package_path(None, 'foo', 'bar')) def test_rm_non_existent_package(self): """ Test removing a non-existent package. """ teststore = store.PackageStore(self._store_dir) assert not os.path.isdir(teststore.package_path(None, 'foo', 'bar')) command.rm('foo/bar', force=True) def test_rm_package_w_shared_obj(self): """ Test removing a package that shares an object with another. The other package should still remain. """ mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') command.build('foo/bar', build_path) command.build('foo/bar2', build_path) command.rm('foo/bar', force=True) teststore = store.PackageStore(self._store_dir) assert not os.path.isdir(teststore.package_path(None, 'foo', 'bar')) from quilt.data.foo import bar2 assert isinstance(bar2.foo(), pd.DataFrame) def test_rm_subpackage(self): """ Test removing a sub-package (not supported). """ with assertRaisesRegex(self, command.CommandException, "Specify package as"): command.rm('foo/bar/baz', force=True)
988, (None, None), # (flag_str, value, pre_delay_ms) (("start", 0, None), (None, 9, None), (None, 5, None), ("end", 13, None)), self.get_expected_result(27, 13, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) for t in threads: t.start() for t in threads: t.join() self.check_deferred_exception() self.check_status(model_name, (1,), 4 * min(2, _model_instances), 8) except InferenceServerException as ex: self.assertTrue(False, "unexpected error {}".format(ex)) def test_skip_batch(self): # Test model instances together are configured with # total-batch-size 4. Send four sequences in parallel where # two sequences have shorter length so that padding must be # applied correctly for the longer sequences. for trial in _trials: try: dtype = self.get_datatype(trial) model_name = tu.get_sequence_model_name(trial, dtype) protocol = "streaming" self.check_setup(model_name) # Need scheduler to wait for queue to contain all # inferences for both sequences. self.assertTrue("TRTSERVER_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_DELAY_SCHEDULER"]), 12) self.assertTrue("TRTSERVER_BACKLOG_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_BACKLOG_DELAY_SCHEDULER"]), 0) threads = [] threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1001, (None, None), # (flag_str, value, pre_delay_ms) (("start", 1, None), ("end", 3, None)), self.get_expected_result(4, 3, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1002, (None, None), # (flag_str, value, pre_delay_ms) (("start", 11, None), (None, 12, None), (None, 13, None), ("end", 14, None)), self.get_expected_result(50, 14, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1003, (None, None), # (flag_str, value, pre_delay_ms) (("start", 111, None), ("end", 113, None)), self.get_expected_result(224, 113, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1004, (None, None), # (flag_str, value, pre_delay_ms) (("start", 1111, None), (None, 1112, None), (None, 1113, None), ("end", 1114, None)), self.get_expected_result(4450, 1114, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads[1].start() threads[3].start() time.sleep(1) threads[0].start() threads[2].start() for t in threads: t.join() self.check_deferred_exception() if _model_instances == 1: self.check_status(model_name, (1,), 4, 12) elif _model_instances == 2: self.check_status(model_name, (1,), 8, 12) elif _model_instances == 4: self.check_status(model_name, (1,), 12, 12) except InferenceServerException as ex: self.assertTrue(False, "unexpected error {}".format(ex)) def test_full_batch(self): # Test model instances together are configured with # total-batch-size 4. Send four equal-length sequences in # parallel and make sure they get completely batched into # batch-size 4 inferences. for trial in _trials: try: dtype = self.get_datatype(trial) model_name = tu.get_sequence_model_name(trial, dtype) protocol = "streaming" self.check_setup(model_name) # Need scheduler to wait for queue to contain all # inferences for both sequences. self.assertTrue("TRTSERVER_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_DELAY_SCHEDULER"]), 12) self.assertTrue("TRTSERVER_BACKLOG_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_BACKLOG_DELAY_SCHEDULER"]), 0) threads = [] threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1001, (None, None), # (flag_str, value, pre_delay_ms) (("start", 1, None), (None, 2, None), ("end", 3, None)), self.get_expected_result(6, 3, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1002, (None, None), # (flag_str, value, pre_delay_ms) (("start", 11, None), (None, 12, None), ("end", 13, None)), self.get_expected_result(36, 13, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1003, (None, None), # (flag_str, value, pre_delay_ms) (("start", 111, None), (None, 112, None), ("end", 113, None)), self.get_expected_result(336, 113, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1004, (None, None), # (flag_str, value, pre_delay_ms) (("start", 1111, None), (None, 1112, None), ("end", 1113, None)), self.get_expected_result(3336, 1113, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) for t in threads: t.start() for t in threads: t.join() self.check_deferred_exception() self.check_status(model_name, (1,), 3 * _model_instances, 12) except InferenceServerException as ex: self.assertTrue(False, "unexpected error {}".format(ex)) def test_backlog(self): # Test model instances together are configured with # total-max-batch-size 4. Send 5 equal-length sequences in # parallel and make sure they get completely batched into # batch-size 4 inferences plus the 5th should go in the # backlog and then get handled once there is a free slot. for trial in _trials: try: protocol = "streaming" dtype = self.get_datatype(trial) model_name = tu.get_sequence_model_name(trial, dtype) self.check_setup(model_name) # Need scheduler to wait for queue to contain all # inferences for both sequences. self.assertTrue("TRTSERVER_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_DELAY_SCHEDULER"]), 12) self.assertTrue("TRTSERVER_BACKLOG_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_BACKLOG_DELAY_SCHEDULER"]), 0) threads = [] threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1001, (None, None), # (flag_str, value, pre_delay_ms) (("start", 1, None), (None, 2, None), ("end", 3, None)), self.get_expected_result(6, 3, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1002, (None, None), # (flag_str, value, pre_delay_ms) (("start", 11, None), (None, 12, None), ("end", 13, None)), self.get_expected_result(36, 13, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1003, (None, None), # (flag_str, value, pre_delay_ms) (("start", 111, None), (None, 112, None), ("end", 113, None)), self.get_expected_result(336, 113, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1004, (None, None), # (flag_str, value, pre_delay_ms) (("start", 1111, None), (None, 1112, None), ("end", 1113, None)), self.get_expected_result(3336, 1113, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1005, (None, None), # (flag_str, value, pre_delay_ms) (("start", 11111, None), (None, 11112, None), ("end", 11113, None)), self.get_expected_result(33336, 11113, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) for t in threads: t.start() for t in threads: t.join() self.check_deferred_exception() self.check_status(model_name, (1,), (3 * _model_instances) + 3, 15) except InferenceServerException as ex: self.assertTrue(False, "unexpected error {}".format(ex)) def test_backlog_fill(self): # Test model instances together are configured with # total-max-batch-size 4. Send 4 sequences in parallel, two of # which are shorter. Send 2 additional sequences that should # go into backlog but should immediately fill into the short # sequences. # Only works with 1 model instance since otherwise an instance # can run ahead and handle more work than expected (leads to # intermittent failures) if _model_instances != 1: return for trial in _trials: try: protocol = "streaming" dtype = self.get_datatype(trial) model_name = tu.get_sequence_model_name(trial, dtype) self.check_setup(model_name) # Need scheduler to wait for queue to contain all # inferences for both sequences. self.assertTrue("TRTSERVER_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_DELAY_SCHEDULER"]), 10) self.assertTrue("TRTSERVER_BACKLOG_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_BACKLOG_DELAY_SCHEDULER"]), 2) threads = [] threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1001, (None, None), # (flag_str, value, pre_delay_ms) (("start", 1, None), (None, 2, None), ("end", 3, None)), self.get_expected_result(6, 3, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1002, (None, None), # (flag_str, value, pre_delay_ms) (("start", 11, None), ("end", 13, None)), self.get_expected_result(24, 13, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1003, (None, None), # (flag_str, value, pre_delay_ms) (("start", 111, None), ("end", 113, None)), self.get_expected_result(224, 113, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1004, (None, None), # (flag_str, value, pre_delay_ms) (("start", 1111, None), (None, 1112, None), ("end", 1113, None)), self.get_expected_result(3336, 1113, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1005, (None, None), # (flag_str, value, pre_delay_ms) (("start,end", 11111, None),), self.get_expected_result(11111, 11111, trial, "start,end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1006, (None, None), # (flag_str, value, pre_delay_ms) (("start,end", 22222, None),), self.get_expected_result(22222, 22222, trial, "start,end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads[0].start() threads[1].start() threads[2].start() threads[3].start() time.sleep(2) threads[4].start() threads[5].start() for t in threads: t.join() self.check_deferred_exception() self.check_status(model_name, (1,), (3 * _model_instances), 12) except InferenceServerException as ex: self.assertTrue(False, "unexpected error {}".format(ex)) def test_backlog_fill_no_end(self): # Test model instances together are configured with # total-max-batch-size 4. Send 4 sequences in parallel, two of # which are shorter. Send 2 additional sequences that should # go into backlog but should immediately fill into the short # sequences. One of those sequences is filled before it gets # its end request. # Only works with 1 model instance since otherwise an instance # can run ahead and handle more work than expected (leads to # intermittent failures) if _model_instances != 1: return for trial in _trials: try: protocol = "streaming" dtype = self.get_datatype(trial) model_name = tu.get_sequence_model_name(trial, dtype) self.check_setup(model_name) # Need scheduler to wait for queue to contain all # inferences for both sequences. self.assertTrue("TRTSERVER_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_DELAY_SCHEDULER"]), 10) self.assertTrue("TRTSERVER_BACKLOG_DELAY_SCHEDULER" in os.environ) self.assertEqual(int(os.environ["TRTSERVER_BACKLOG_DELAY_SCHEDULER"]), 3) threads = [] threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1001, (None, None), # (flag_str, value, pre_delay_ms) (("start", 1, None), (None, 2, None), ("end", 3, None)), self.get_expected_result(6, 3, trial, "end"), protocol), kwargs={'sequence_name' : "{}_{}".format(self._testMethodName, protocol)})) threads.append(threading.Thread( target=self.check_sequence_async, args=(trial, model_name, dtype, 1002, (None, None), # (flag_str, value, pre_delay_ms) (("start", 11, None), ("end", 13, None)), self.get_expected_result(24, 13, trial,
#!/usr/bin/env python3 # Copyright 2020 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # For those usages not covered by the Apache License, Version 2.0 please # contact: <EMAIL> # # To get in touch with the maintainers, please contact: # <EMAIL> ## from typing import NoReturn import unittest import pod_spec class TestPodSpec(unittest.TestCase): """Pod spec unit tests.""" def test_make_pod_ports(self) -> NoReturn: """Testing make pod ports.""" port = 9999 expected_result = [ { "name": "nbi", "containerPort": port, "protocol": "TCP", } ] pod_ports = pod_spec._make_pod_ports(port) self.assertListEqual(expected_result, pod_ports) def test_make_pod_envconfig_without_keystone(self) -> NoReturn: """Teting make pod envconfig without Keystone.""" config = { "enable_test": False, "database_commonkey": "commonkey", "log_level": "DEBUG", "auth_backend": "internal", } relation_state = { "message_host": "kafka", "message_port": 9090, "database_uri": "mongodb://mongo", "prometheus_host": "prometheus", "prometheus_port": 9082, } expected_result = { "ALLOW_ANONYMOUS_LOGIN": "yes", "OSMNBI_SERVER_ENABLE_TEST": config["enable_test"], "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public", "OSMNBI_MESSAGE_HOST": relation_state["message_host"], "OSMNBI_MESSAGE_DRIVER": "kafka", "OSMNBI_MESSAGE_PORT": relation_state["message_port"], "OSMNBI_DATABASE_DRIVER": "mongo", "OSMNBI_DATABASE_URI": relation_state["database_uri"], "OSMNBI_DATABASE_COMMONKEY": config["database_commonkey"], "OSMNBI_STORAGE_DRIVER": "mongo", "OSMNBI_STORAGE_PATH": "/app/storage", "OSMNBI_STORAGE_COLLECTION": "files", "OSMNBI_STORAGE_URI": relation_state["database_uri"], "OSMNBI_PROMETHEUS_HOST": relation_state["prometheus_host"], "OSMNBI_PROMETHEUS_PORT": relation_state["prometheus_port"], "OSMNBI_LOG_LEVEL": config["log_level"], "OSMNBI_AUTHENTICATION_BACKEND": config["auth_backend"], } pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state) self.assertDictEqual(expected_result, pod_envconfig) def test_make_pod_envconfig_with_keystone(self) -> NoReturn: """Teting make pod envconfig with Keystone.""" config = { "enable_test": False, "database_commonkey": "commonkey", "log_level": "DEBUG", "auth_backend": "keystone", } relation_state = { "message_host": "kafka", "message_port": 9090, "database_uri": "mongodb://mongo", "prometheus_host": "prometheus", "prometheus_port": 9082, "keystone_host": "keystone", "keystone_port": 5000, "keystone_user_domain_name": "user_domain", "keystone_project_domain_name": "project_domain", "keystone_username": "username", "keystone_password": "password", "keystone_service": "service", } expected_result = { "ALLOW_ANONYMOUS_LOGIN": "yes", "OSMNBI_SERVER_ENABLE_TEST": config["enable_test"], "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public", "OSMNBI_MESSAGE_HOST": relation_state["message_host"], "OSMNBI_MESSAGE_DRIVER": "kafka", "OSMNBI_MESSAGE_PORT": relation_state["message_port"], "OSMNBI_DATABASE_DRIVER": "mongo", "OSMNBI_DATABASE_URI": relation_state["database_uri"], "OSMNBI_DATABASE_COMMONKEY": config["database_commonkey"], "OSMNBI_STORAGE_DRIVER": "mongo", "OSMNBI_STORAGE_PATH": "/app/storage", "OSMNBI_STORAGE_COLLECTION": "files", "OSMNBI_STORAGE_URI": relation_state["database_uri"], "OSMNBI_PROMETHEUS_HOST": relation_state["prometheus_host"], "OSMNBI_PROMETHEUS_PORT": relation_state["prometheus_port"], "OSMNBI_LOG_LEVEL": config["log_level"], "OSMNBI_AUTHENTICATION_BACKEND": config["auth_backend"], "OSMNBI_AUTHENTICATION_AUTH_URL": relation_state["keystone_host"], "OSMNBI_AUTHENTICATION_AUTH_PORT": relation_state["keystone_port"], "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": relation_state[ "keystone_user_domain_name" ], "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": relation_state[ "keystone_project_domain_name" ], "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": relation_state[ "keystone_username" ], "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": relation_state[ "keystone_password" ], "OSMNBI_AUTHENTICATION_SERVICE_PROJECT": relation_state["keystone_service"], } pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state) self.assertDictEqual(expected_result, pod_envconfig) def test_make_pod_envconfig_wrong_auth_backend(self) -> NoReturn: """Teting make pod envconfig with wrong auth_backend.""" config = { "enable_test": False, "database_commonkey": "commonkey", "log_level": "DEBUG", "auth_backend": "kerberos", } relation_state = { "message_host": "kafka", "message_port": 9090, "database_uri": "mongodb://mongo", "prometheus_host": "prometheus", "prometheus_port": 9082, "keystone_host": "keystone", "keystone_port": 5000, "keystone_user_domain_name": "user_domain", "keystone_project_domain_name": "project_domain", "keystone_username": "username", "keystone_password": "password", "keystone_service": "service", } with self.assertRaises(ValueError) as exc: pod_spec._make_pod_envconfig(config, relation_state) self.assertTrue( "auth_backend needs to be either internal or keystone" in str(exc.exception) ) def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn: """Testing make pod ingress resources without site_url.""" config = {"site_url": ""} app_name = "nbi" port = 9999 pod_ingress_resources = pod_spec._make_pod_ingress_resources( config, app_name, port ) self.assertIsNone(pod_ingress_resources) def test_make_pod_ingress_resources(self) -> NoReturn: """Testing make pod ingress resources.""" config = { "site_url": "http://nbi", "max_file_size": 0, "ingress_whitelist_source_range": "", } app_name = "nbi" port = 9999 expected_result = [ { "name": f"{app_name}-ingress", "annotations": { "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}", "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS", "nginx.ingress.kubernetes.io/ssl-redirect": "false", }, "spec": { "rules": [ { "host": app_name, "http": { "paths": [ { "path": "/", "backend": { "serviceName": app_name, "servicePort": port, }, } ] }, } ] }, } ] pod_ingress_resources = pod_spec._make_pod_ingress_resources( config, app_name, port ) self.assertListEqual(expected_result, pod_ingress_resources) def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn: """Testing make pod ingress resources with whitelist_source_range.""" config = { "site_url": "http://nbi", "max_file_size": 0, "ingress_whitelist_source_range": "0.0.0.0/0", } app_name = "nbi" port = 9999 expected_result = [ { "name": f"{app_name}-ingress", "annotations": { "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}", "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS", "nginx.ingress.kubernetes.io/ssl-redirect": "false", "nginx.ingress.kubernetes.io/whitelist-source-range": config[ "ingress_whitelist_source_range" ], }, "spec": { "rules": [ { "host": app_name, "http": { "paths": [ { "path": "/", "backend": { "serviceName": app_name, "servicePort": port, }, } ] }, } ] }, } ] pod_ingress_resources = pod_spec._make_pod_ingress_resources( config, app_name, port ) self.assertListEqual(expected_result, pod_ingress_resources) def test_make_pod_ingress_resources_with_https(self) -> NoReturn: """Testing make pod ingress resources with HTTPs.""" config = { "site_url": "https://nbi", "max_file_size": 0, "ingress_whitelist_source_range": "", "tls_secret_name": "", } app_name = "nbi" port = 9999 expected_result = [ { "name": f"{app_name}-ingress", "annotations": { "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}", "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS", }, "spec": { "rules": [ { "host": app_name, "http": { "paths": [ { "path": "/", "backend": { "serviceName": app_name, "servicePort": port, }, } ] }, } ], "tls": [{"hosts": [app_name]}], }, } ] pod_ingress_resources = pod_spec._make_pod_ingress_resources( config, app_name, port ) self.assertListEqual(expected_result, pod_ingress_resources) def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn: """Testing make pod ingress resources with HTTPs and TLS secret name.""" config = { "site_url": "https://nbi", "max_file_size": 0, "ingress_whitelist_source_range": "", "tls_secret_name": "secret_name", } app_name = "nbi" port = 9999 expected_result = [ { "name": f"{app_name}-ingress", "annotations": { "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}", "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS", }, "spec": { "rules": [ { "host": app_name, "http": { "paths": [ { "path": "/", "backend": { "serviceName": app_name, "servicePort": port, }, } ] }, } ], "tls": [ {"hosts": [app_name], "secretName": config["tls_secret_name"]} ], }, } ] pod_ingress_resources = pod_spec._make_pod_ingress_resources( config, app_name, port ) self.assertListEqual(expected_result, pod_ingress_resources) def test_make_startup_probe(self) -> NoReturn: """Testing make startup probe.""" expected_result = { "exec": {"command": ["/usr/bin/pgrep python3"]}, "initialDelaySeconds": 60, "timeoutSeconds": 5, } startup_probe = pod_spec._make_startup_probe() self.assertDictEqual(expected_result, startup_probe) def test_make_readiness_probe(self) -> NoReturn: """Testing make readiness probe.""" port = 9999 expected_result = { "httpGet": { "path": "/osm/", "port": port, }, "initialDelaySeconds": 45, "timeoutSeconds": 5, } readiness_probe = pod_spec._make_readiness_probe(port) self.assertDictEqual(expected_result, readiness_probe) def test_make_liveness_probe(self) -> NoReturn: """Testing make liveness probe.""" port = 9999 expected_result = { "httpGet": { "path": "/osm/", "port": port, }, "initialDelaySeconds": 45, "timeoutSeconds": 5, } liveness_probe = pod_spec._make_liveness_probe(port) self.assertDictEqual(expected_result, liveness_probe) def test_make_pod_spec_without_image_info(self) -> NoReturn: """Testing make pod spec without image_info.""" image_info = None config = { "enable_test": False, "database_commonkey": "commonkey", "log_level": "DEBUG", "auth_backend": "internal", "site_url": "", } relation_state = { "message_host": "kafka", "message_port": 9090, "database_uri": "mongodb://mongo", "prometheus_host": "prometheus", "prometheus_port": 9082, } app_name = "nbi" port = 9999 spec = pod_spec.make_pod_spec( image_info, config, relation_state, app_name, port ) self.assertIsNone(spec) def test_make_pod_spec_without_config(self) -> NoReturn: """Testing make pod spec without config.""" image_info = {"upstream-source": "opensourcemano/nbi:8"} config = {} relation_state = { "message_host": "kafka", "message_port": 9090, "database_uri": "mongodb://mongo", "prometheus_host": "prometheus", "prometheus_port": 9082, } app_name = "nbi" port = 9999 with self.assertRaises(ValueError): pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port) def test_make_pod_spec_without_relation_state(self) -> NoReturn: """Testing make pod spec without relation_state.""" image_info = {"upstream-source": "opensourcemano/nbi:8"} config = { "enable_test": False, "database_commonkey": "commonkey", "log_level": "DEBUG", "auth_backend": "internal", "site_url": "", } relation_state = {} app_name = "nbi" port = 9999 with self.assertRaises(ValueError): pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port) def test_make_pod_spec(self) -> NoReturn: """Testing make pod spec.""" image_info = {"upstream-source": "opensourcemano/nbi:8"} config = { "enable_test": False, "database_commonkey": "commonkey", "log_level": "DEBUG", "auth_backend": "internal", "site_url": "", } relation_state = { "message_host": "kafka", "message_port": 9090, "database_uri": "mongodb://mongo", "prometheus_host": "prometheus", "prometheus_port": 9082, } app_name = "nbi" port = 9999 expected_result = { "version": 3, "containers": [ { "name": app_name, "imageDetails": image_info, "imagePullPolicy": "Always", "ports": [ { "name": "nbi", "containerPort": port, "protocol": "TCP", } ], "envConfig": { "ALLOW_ANONYMOUS_LOGIN": "yes", "OSMNBI_SERVER_ENABLE_TEST": config["enable_test"], "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public", "OSMNBI_MESSAGE_HOST": relation_state["message_host"], "OSMNBI_MESSAGE_DRIVER": "kafka", "OSMNBI_MESSAGE_PORT": relation_state["message_port"], "OSMNBI_DATABASE_DRIVER": "mongo", "OSMNBI_DATABASE_URI": relation_state["database_uri"], "OSMNBI_DATABASE_COMMONKEY": config["database_commonkey"], "OSMNBI_STORAGE_DRIVER": "mongo", "OSMNBI_STORAGE_PATH": "/app/storage", "OSMNBI_STORAGE_COLLECTION": "files", "OSMNBI_STORAGE_URI": relation_state["database_uri"], "OSMNBI_PROMETHEUS_HOST": relation_state["prometheus_host"], "OSMNBI_PROMETHEUS_PORT": relation_state["prometheus_port"], "OSMNBI_LOG_LEVEL": config["log_level"], "OSMNBI_AUTHENTICATION_BACKEND": config["auth_backend"], }, } ], "kubernetesResources": { "ingressResources": [], }, } spec = pod_spec.make_pod_spec( image_info, config, relation_state, app_name, port ) self.assertDictEqual(expected_result, spec) def test_make_pod_spec_with_keystone(self) -> NoReturn: """Testing make pod spec with keystone.""" image_info = {"upstream-source": "opensourcemano/nbi:8"} config = { "enable_test": False, "database_commonkey": "commonkey", "log_level": "DEBUG", "auth_backend": "keystone", "site_url": "", } relation_state = { "message_host": "kafka", "message_port": 9090, "database_uri": "mongodb://mongo", "prometheus_host": "prometheus", "prometheus_port": 9082, "keystone_host": "keystone", "keystone_port": 5000, "keystone_user_domain_name": "user_domain", "keystone_project_domain_name": "project_domain", "keystone_username": "username", "keystone_password": "password", "keystone_service": "service", } app_name = "nbi" port = 9999 expected_result = { "version": 3, "containers": [ { "name": app_name, "imageDetails": image_info, "imagePullPolicy": "Always", "ports": [ { "name": "nbi", "containerPort": port, "protocol": "TCP", } ], "envConfig": { "ALLOW_ANONYMOUS_LOGIN": "yes", "OSMNBI_SERVER_ENABLE_TEST": config["enable_test"], "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public", "OSMNBI_MESSAGE_HOST": relation_state["message_host"], "OSMNBI_MESSAGE_DRIVER": "kafka", "OSMNBI_MESSAGE_PORT": relation_state["message_port"], "OSMNBI_DATABASE_DRIVER": "mongo", "OSMNBI_DATABASE_URI": relation_state["database_uri"], "OSMNBI_DATABASE_COMMONKEY": config["database_commonkey"], "OSMNBI_STORAGE_DRIVER": "mongo", "OSMNBI_STORAGE_PATH": "/app/storage", "OSMNBI_STORAGE_COLLECTION": "files", "OSMNBI_STORAGE_URI": relation_state["database_uri"], "OSMNBI_PROMETHEUS_HOST": relation_state["prometheus_host"], "OSMNBI_PROMETHEUS_PORT": relation_state["prometheus_port"], "OSMNBI_LOG_LEVEL": config["log_level"], "OSMNBI_AUTHENTICATION_BACKEND": config["auth_backend"], "OSMNBI_AUTHENTICATION_AUTH_URL": relation_state[ "keystone_host" ], "OSMNBI_AUTHENTICATION_AUTH_PORT": relation_state[ "keystone_port" ], "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": relation_state[ "keystone_user_domain_name" ], "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": relation_state[ "keystone_project_domain_name" ], "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": relation_state[ "keystone_username" ], "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": relation_state[
self.helper('git', 'checkout', '79b3762') self._apply_patches() @stage def configure(self): # The code is stored one folder down self.workdir = os.path.join(self.workdir, 'isis') super(isis, self).configure(other=['-Dpybindings=Off','-DJP2KFLAG=OFF','-DbuildTests=OFF']) #-DNinja class stereopipeline(GITPackage): src = 'https://github.com/NeoGeographyToolkit/StereoPipeline.git' def configure(self): # Skip config in fast mode if config file exists config_file = P.join(self.workdir, 'config.options') if self.fast and os.path.isfile(config_file): return self.helper('./autogen') use_env_flags = False # TODO: What is this? prefix = self.env['INSTALL_DIR'] installdir = prefix vw_build = prefix arch = self.arch write_asp_config(use_env_flags, prefix, installdir, vw_build, arch, geoid, config_file) super(stereopipeline, self).configure( other = ['docdir=%s/doc' % prefix], without = ['clapack', 'slapack', 'tcmalloc'], disable = ['pkg_paths_default', 'static', 'qt-qmake'], enable = ['debug=ignore', 'optimize=ignore'] ) @stage def compile(self, cwd=None): super(stereopipeline, self).compile(cwd) # Run unit tests. If the ISIS env vars are not set, # the ISIS-related tests will be skipped. # Make install must happen before 'make check', # otherwise the old installed library is linked. cmd = ('make', 'install') self.helper(*cmd) if self.fast or self.arch.os == 'osx' or int(self.env['SKIP_TESTS']) == 1: # The tests on the Mac do not compile, looks like a clang/gtest conflict. print("Skipping tests for OSX or in fast mode.") else: cmd = ('make', 'check') self.helper(*cmd) class visionworkbench(GITPackage): src = 'https://github.com/visionworkbench/visionworkbench.git' def __init__(self,env): super(visionworkbench,self).__init__(env) @stage def configure(self): # Skip config in fast mode if config file exists config_file = P.join(self.workdir, 'config.options') if self.fast and os.path.isfile(config_file): return self.helper('./autogen') arch = self.arch installdir = self.env['INSTALL_DIR'] prefix = installdir write_vw_config(prefix, installdir, arch, config_file) fix_install_paths(installdir, arch) # this is needed for Mac for libgeotiff super(visionworkbench, self).configure() @stage def compile(self, cwd=None): super(visionworkbench, self).compile(cwd) # Run unit tests # Make install must happen before 'make check', # otherwise the old installed library is linked. cmd = ('make', 'install') self.helper(*cmd) if self.fast or self.arch.os == 'osx' or int(self.env['SKIP_TESTS']) == 1: # The tests on the Mac do not even compile, looks like a clang/gtest conflict print("Skipping tests for OSX or in fast mode.") else: cmd = ('make', 'check') self.helper(*cmd) class lapack(CMakePackage): src = 'http://www.netlib.org/lapack/lapack-3.5.0.tgz' chksum = '5870081889bf5d15fd977993daab29cf3c5ea970' def configure(self): LDFLAGS_ORIG = self.env['LDFLAGS'] LDFLAGS_CURR = [] for i in self.env['LDFLAGS'].split(' '): if not i.startswith('-L'): LDFLAGS_CURR.append(i); self.env['LDFLAGS'] = ' '.join(LDFLAGS_CURR) super(lapack, self).configure( other=['-DBUILD_SHARED_LIBS=ON','-DBUILD_STATIC_LIBS=OFF','-DCMAKE_Fortran_FLAGS=-fPIC'] ) self.env['LDFLAGS'] = LDFLAGS_ORIG class boost(Package): version = '1_59' # variable is used in class liblas, libnabo, etc. src = 'http://downloads.sourceforge.net/boost/boost_' + version + '_0.tar.bz2' chksum = 'b94de47108b2cdb0f931833a7a9834c2dd3ca46e' patches = 'patches/boost' def __init__(self, env): super(boost, self).__init__(env) self.env['NO_BZIP2'] = '1' #self.env['NO_ZLIB'] = '1' if self.arch.os == 'osx': self.env['PATH'] = '/usr/bin:' + self.env['PATH'] # to use the right libtool @stage def configure(self): with file(P.join(self.workdir, 'user-config.jam'), 'w') as f: if self.arch.os == 'linux': toolkit = 'gcc' elif self.arch.os == 'osx': toolkit = 'darwin' # print('variant myrelease : release : <optimization>none <debug-symbols>none ;', file=f) # print('variant mydebug : debug : <optimization>none ;', file=f) args = [toolkit] + list(self.env.get(i, ' ') for i in ('CXX', 'CXXFLAGS', 'LDFLAGS')) print('using %s : : %s : <cxxflags>"%s" <linkflags>"%s -ldl" ;' % tuple(args), file=f) print('using zlib : 1.2.8 : <include>%s <search>%s ;' % (P.join(self.env['INSTALL_DIR'],'include'),P.join(self.env['INSTALL_DIR'],'lib')), file=f) print('option.set keep-going : false ;', file=f) # TODO: WRONG. There can be other things besides -j4 in MAKEOPTS @stage def compile(self): self.env['BOOST_ROOT'] = self.workdir self.helper('./bootstrap.sh') os.unlink(P.join(self.workdir, 'project-config.jam')) cmd = ['./bjam'] if 'MAKEOPTS' in self.env: cmd += (self.env['MAKEOPTS'],) self.args = [ '-q', '--user-config=%s/user-config.jam' % self.workdir, '--prefix=%(INSTALL_DIR)s' % self.env, '--layout=versioned', 'threading=multi', 'variant=release', 'link=shared', 'runtime-link=shared', '--without-mpi', '--without-python', '--without-wave', 'stage', '-d+2' # Show commands as they are executed ] cmd += self.args self.helper(*cmd) @stage def install(self): self.env['BOOST_ROOT'] = self.workdir cmd = ['./bjam'] + self.args + ['install'] self.helper(*cmd) class gsl(Package): src = 'ftp://ftp.gnu.org/gnu/gsl/gsl-1.15.tar.gz', chksum = 'd914f84b39a5274b0a589d9b83a66f44cd17ca8e', def configure(self): super(gsl, self).configure(disable=('static')) class geos(Package): src = 'http://download.osgeo.org/geos/geos-3.5.1.tar.bz2' chksum = '83373542335c2f20c22d5420ba01d99f645f0c61' def __init__(self, env): super(geos, self).__init__(env) if self.arch.os == 'linux': # Bugfix for SuSE, skip using ccache self.env['CXX']='g++' self.env['CC']='gcc' def configure(self): super(geos, self).configure(disable=('python', 'ruby', 'static')) class superlu(Package): src = ['http://sources.gentoo.org/cgi-bin/viewvc.cgi/gentoo-x86/sci-libs/superlu/files/superlu-4.3-autotools.patch','http://crd-legacy.lbl.gov/~xiaoye/SuperLU/superlu_4.3.tar.gz'] chksum = ['c9cc1c9a7aceef81530c73eab7f599d652c1fddd','d2863610d8c545d250ffd020b8e74dc667d7cbdd'] def __init__(self,env): super(superlu,self).__init__(env) self.patches = [P.join(env['DOWNLOAD_DIR'], 'superlu-4.3-autotools.patch'), P.join(self.pkgdir,'patches','superlu','finish_autotools.patch')] @stage def configure(self): self.helper('mkdir', 'm4') self.helper('autoreconf', '-fvi') blas = '' if self.arch.os == "osx": blas = '"-framework vecLib"' else: blas = glob(P.join(self.env['INSTALL_DIR'],'lib','libblas.so*'))[0] if self.arch.os == 'linux': # This is a bugfix, that took long to investigate. For some versions of Linux, # the FLIBS in configure contains the -R option, which confuses the compiler. # This value is determined dynamically. So we really have no choice but # to edit configure to modify this value before being used. line_in = 'FLIBS="$ac_cv_f77_libs"' line_out = 'FLIBS=$(echo "$ac_cv_f77_libs" | perl -pi -e "s/ -R/ -Wl,-R/g")' configure_file = P.join(self.workdir, 'configure') replace_line_in_file(configure_file, line_in, line_out) super(superlu,self).configure(with_=('blas=%s') % blas, disable=('static')) @stage def install(self): super(superlu, self).install() # Need to comment out a few lines in the include files to get ISIS to compile with clang!! file_list = ['slu_cdefs.h', 'slu_ddefs.h', 'slu_sdefs.h', 'slu_zdefs.h'] target_list = ['extern void countnz', 'extern void ilu_countnz', 'extern void fixupL', 'extern void PrintPerf', 'extern void check_tempv', 'double, double, double ', # Hit the lines following the PrintPerf line 'complex, complex, complex ', 'float, float, float ', 'doublecomplex, doublecomplex, doublecomplex ' ] # Use sed to add // before every instance of these targets in these files for f in file_list: full_path = P.join(self.env['INSTALL_DIR'],'include', 'superlu', f) for target in target_list: cmd = ['sed', '-i', '-e', "s#"+target+"#//"+target+"#g", full_path] self.helper(*cmd) class gmm(Package): src = 'http://download-mirror.savannah.gnu.org/releases/getfem/stable/gmm-4.2.tar.gz' chksum = '3555d5a5abdd525fe6b86db33428604d74f6747c' patches = 'patches/gmm' @stage def configure(self): self.helper('autoreconf', '-fvi') blas = '' if self.arch.os == "osx": blas = '"-framework vecLib"' else: blas = glob(P.join(self.env['INSTALL_DIR'],'lib','libblas.so*'))[0] super(gmm,self).configure(with_=('blas=%s') % blas) class xercesc(Package): src = 'http://archive.apache.org/dist/xerces/c/3/sources/xerces-c-3.1.3.tar.xz' chksum = '44aa39f8b9ccbfcaf58771634761cbea1084e8f1' @stage def configure(self): super(xercesc,self).configure(with_=['curl=%s' % glob(P.join(self.env['INSTALL_DIR'],'lib','libcurl.*'))[0], 'icu=no'], disable = ['static', 'msgloader-iconv', 'msgloader-icu', 'network']) class qt(Package): src = 'http://download.qt.io/official_releases/qt/5.6/5.6.3/single/qt-everywhere-opensource-src-5.6.3.tar.xz' chksum = 'ca7a752bff079337876ca6ab70b0dec17b47e70f' #SHA-1 Hash patches = 'patches/qt' #patch_level = '-p0' @stage def configure(self): # Modify the min OSX version config_path = self.workdir + '/qtbase/mkspecs/macx-clang/qmake.conf' self.helper('sed', '-ibak', '-e', 's/QMAKE_MACOSX_DEPLOYMENT_TARGET = 10.7/QMAKE_MACOSX_DEPLOYMENT_TARGET = 10.12/g', config_path) ## The default confs override our compiler choices. cmd = ("./configure -c++std c++11 -opensource -confirm-license -release -nomake tools -nomake examples " "-prefix %(INSTALL_DIR)s " "-no-openssl -no-libjpeg -no-libpng -no-cups -no-openvg -no-sql-psql -no-pulseaudio " "-skip qt3d " "-skip qtactiveqt " "-skip qtandroidextras " "-skip qtconnectivity " "-skip qtlocation " "-skip qtmacextras " "-skip qtquickcontrols " "-skip qtquickcontrols2 " "-skip qtsensors " "-skip qtserialbus " "-skip qtserialport " "-skip qtwayland " "-skip qtwebchannel " "-skip qtwebengine " "-skip qtwebview " "-skip qtwinextras " ) % self.env # TODO: Make sure static libraries are not built! Causes linker error in ASP in OSX. args = cmd.split() if self.arch.os == 'osx': args.append('-no-framework') args.append('-no-xcb') args.append('-no-pch') # Required to avoid weird redefinition errors, but slows down compilation. args.extend(['-skip', 'x11extras']) args.extend(['-platform', 'macx-clang']) else: args.append('-qt-xcb') # Not needed on OSX self.helper(*args) if self.arch.os == 'osx': # Create a script to do a mass edit of all .pro files # to make them compile. Add some flags, and the -lc++ library. # Then execute the script. script = self.workdir + '/edit_pro.sh' print("script is ", script) f = open(script, 'w') f.write('#!/bin/bash\n' + \ 'cd ' + self.workdir + '\n' + \ 'for f in $(find . -name \*pro); do\n' + \ ' echo Editing $f\n' + \ ' cat $f > tmp.txt\n' + \ ' echo "CONFIG += c++11" > $f\n' + \ ' echo "QMAKE_CXXFLAGS += -stdlib=libc++ -std=c++11" >> $f\n' + \ ' echo "QMAKE_LDLAGS += -stdlib=libc++ -std=c++11" >> $f\n' + \ ' cat tmp.txt >> $f\n' + \ ' perl -pi -e \'s#(QMAKE_LIBS\s+\+=\s)#$1 -lc++ #g\' $f\n' + \ 'done\n') f.close() cmd = ['chmod', 'u+x', script] self.helper(*cmd) cmd=[script] self.helper(*cmd) @stage def install(self): super(qt, self).install() # Wipe some odd things in the .la file which I could not # figure out where they are coming from if self.arch.os == 'osx': cmd=['perl', '-pi', '-e', 's#-framework\s*(Security|Foundation|ApplicationServices' + \ '|IOKit|DiskArbitration)##g'] + \ glob(P.join(self.env['INSTALL_DIR'], 'lib/', '*Qt*.la')) self.helper(*cmd) # Add a Prefix entry to INSTALL_DIR/bin/qt.conf so that qmake # finds the correct QT install location! config_path = os.path.join(self.env['INSTALL_DIR'], 'bin/qt.conf') print(config_path) with open(config_path, "w") as f: f.write('[Paths]\n') f.write('Plugins=../lib/plugins/\n') f.write('Prefix='+self.env['INSTALL_DIR']+'\n') class qwt(Package): src = 'http://downloads.sourceforge.net/qwt/qwt-6.1.3.tar.bz2', chksum = '90ec21bc42f7fae270482e1a0df3bc79cb10e5c7', patches = 'patches/qwt' def configure(self): installDir = self.env['INSTALL_DIR'] #
<filename>build/lib/WORC/classification/SearchCV.py #!/usr/bin/env python # Copyright 2016-2019 Biomedical Imaging Group Rotterdam, Departments of # Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sklearn.base import BaseEstimator, is_classifier, clone from sklearn.base import MetaEstimatorMixin from sklearn.exceptions import NotFittedError from sklearn.utils.metaestimators import if_delegate_has_method from sklearn.utils.validation import indexable, check_is_fitted from WORC.classification.metrics import check_scoring from sklearn.model_selection._split import check_cv from scipy.stats import rankdata from sklearn.externals import six from sklearn.utils.fixes import MaskedArray from sklearn.model_selection._search import _CVScoreTuple, ParameterSampler from sklearn.model_selection._search import ParameterGrid, _check_param_grid from abc import ABCMeta, abstractmethod from collections import Sized, defaultdict import numpy as np from functools import partial import warnings import os import random import string import fastr from fastr.api import ResourceLimit from joblib import Parallel, delayed from WORC.classification.fitandscore import fit_and_score from WORC.classification.fitandscore import delete_nonestimator_parameters import WORC.addexceptions as WORCexceptions import pandas as pd import json import glob from itertools import islice import shutil from sklearn.metrics import f1_score, roc_auc_score, mean_squared_error from sklearn.metrics import accuracy_score from sklearn.multiclass import OneVsRestClassifier from WORC.classification.estimators import RankedSVM from WORC.classification import construct_classifier as cc def rms_score(truth, prediction): ''' Root-mean-square-error metric''' return np.sqrt(mean_squared_error(truth, prediction)) def sar_score(truth, prediction): ''' SAR metric from Caruana et al. 2004''' ROC = roc_auc_score(truth, prediction) # Convert score to binaries first for num in range(0, len(prediction)): if prediction[num] >= 0.5: prediction[num] = 1 else: prediction[num] = 0 ACC = accuracy_score(truth, prediction) RMS = rms_score(truth, prediction) SAR = (ACC + ROC + (1 - RMS))/3 return SAR def chunksdict(data, SIZE): '''Split a dictionary in equal parts of certain slice''' it = iter(data) for i in xrange(0, len(data), SIZE): yield {k: data[k] for k in islice(it, SIZE)} def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] class Ensemble(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Ensemble of BaseSearchCV Estimators.""" # @abstractmethod def __init__(self, estimators): self.estimators = estimators self.n_estimators = len(estimators) def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self.estimators[0]._check_is_fitted('predict') # NOTE: Check if we are dealing with multilabel if type(self.estimators[0].best_estimator_) == OneVsRestClassifier: # Multilabel nlabels = self.estimators[0].predict(X).shape[1] outcome = np.zeros((self.n_estimators, len(X), nlabels)) for num, est in enumerate(self.estimators): if hasattr(est, 'predict_proba'): # BUG: SVM kernel can be wrong type if hasattr(est.best_estimator_, 'kernel'): est.best_estimator_.kernel = str(est.best_estimator_.kernel) outcome[num, :, :] = est.predict_proba(X)[:, 1] else: outcome[num, :, :] = est.predict(X) outcome = np.squeeze(np.mean(outcome, axis=0)) # NOTE: Binarize specifically for multiclass for i in range(0, outcome.shape[0]): label = np.argmax(outcome[i, :]) outcome[i, :] = np.zeros(outcome.shape[1]) outcome[i, label] = 1 else: # Singlelabel outcome = np.zeros((self.n_estimators, len(X))) for num, est in enumerate(self.estimators): if hasattr(est, 'predict_proba'): # BUG: SVM kernel can be wrong type if hasattr(est.best_estimator_, 'kernel'): est.best_estimator_.kernel = str(est.best_estimator_.kernel) outcome[num, :] = est.predict_proba(X)[:, 1] else: outcome[num, :] = est.predict(X) outcome = np.squeeze(np.mean(outcome, axis=0)) # Binarize isclassifier = is_classifier(est.best_estimator_) if isclassifier: outcome[outcome >= 0.5] = 1 outcome[outcome < 0.5] = 0 return outcome def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self.estimators[0]._check_is_fitted('predict_proba') # For probabilities, we get both a class0 and a class1 score outcome = np.zeros((len(X), 2)) outcome_class1 = np.zeros((self.n_estimators, len(X))) outcome_class2 = np.zeros((self.n_estimators, len(X))) for num, est in enumerate(self.estimators): # BUG: SVM kernel can be wrong type if hasattr(est.best_estimator_, 'kernel'): est.best_estimator_.kernel = str(est.best_estimator_.kernel) outcome_class1[num, :] = est.predict_proba(X)[:, 0] outcome_class2[num, :] = est.predict_proba(X)[:, 1] outcome[:, 0] = np.squeeze(np.mean(outcome_class1, axis=0)) outcome[:, 1] = np.squeeze(np.mean(outcome_class2, axis=0)) return outcome def predict_log_proba(self, X): """Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self.estimators[0]._check_is_fitted('predict_log_proba') outcome = np.zeros((self.n_estimators, len(X))) for num, est in enumerate(self.estimators): outcome[num, :] = est.predict_log_proba(X) outcome = np.squeeze(np.mean(outcome, axis=0)) return outcome def decision_function(self, X): """Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self.estimators[0]._check_is_fitted('decision_function') # NOTE: Check if we are dealing with multilabel if type(self.estimators[0].best_estimator_) == OneVsRestClassifier: # Multilabel nlabels = self.estimators[0].decision_function(X).shape[1] outcome = np.zeros((self.n_estimators, len(X), nlabels)) for num, est in enumerate(self.estimators): outcome[num, :, :] = est.decision_function(X) outcome = np.squeeze(np.mean(outcome, axis=0)) else: # Singlelabel outcome = np.zeros((self.n_estimators, len(X))) for num, est in enumerate(self.estimators): outcome[num, :] = est.decision_function(X) outcome = np.squeeze(np.mean(outcome, axis=0)) return outcome def transform(self, X): """Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self.estimators[0]._check_is_fitted('transform') outcome = np.zeros((self.n_estimators, len(X))) for num, est in enumerate(self.estimators): outcome[num, :] = est.transform(X) outcome = np.squeeze(np.mean(outcome, axis=0)) return outcome def inverse_transform(self, Xt): """Call inverse_transform on the estimator with the best found params. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters ----------- Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self.estimators[0]._check_is_fitted('inverse_transform') outcome = np.zeros((self.n_estimators, len(Xt))) for num, est in enumerate(self.estimators): outcome[num, :] = est.transform(Xt) outcome = np.squeeze(np.mean(outcome, axis=0)) return outcome class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for hyper parameter search with cross-validation.""" @abstractmethod def __init__(self, param_distributions={}, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise', return_train_score=True, n_jobspercore=100, maxlen=100, fastr_plugin=None): # Added for fastr and joblib executions self.param_distributions = param_distributions self.n_iter = n_iter self.n_jobspercore = n_jobspercore self.random_state = random_state self.ensemble = list() self.fastr_plugin = fastr_plugin # Below are the defaults from sklearn self.scoring = scoring self.n_jobs = n_jobs self.fit_params = fit_params if fit_params is not None else {} self.iid = iid self.refit = refit self.cv = cv self.verbose = verbose self.pre_dispatch = pre_dispatch self.error_score = error_score self.return_train_score = return_train_score self.maxlen = maxlen @property def _estimator_type(self): return self.estimator._estimator_type def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float """ if self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) X, y = self.preprocess(X, y) return self.scorer_(self.best_estimator_, X, y) def _check_is_fitted(self, method_name): if not self.refit: raise NotFittedError(('This GridSearchCV instance was initialized ' 'with refit=False. %s is ' 'available only after refitting on the best ' 'parameters. ') % method_name) else: check_is_fitted(self, 'best_estimator_') @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict') if self.ensemble: return self.ensemble.predict(X) else: X, _ = self.preprocess(X) return self.best_estimator_.predict(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict_proba') # BUG: kernel sometimes saved as unicode # BUG: SVM kernel can be wrong type if hasattr(self.best_estimator_, 'kernel'): self.best_estimator_.kernel = str(self.best_estimator_.kernel) if self.ensemble: return self.ensemble.predict_proba(X) else: X, _ =
dtype=np.int32) gt_actions[ix, :] = np.zeros((self.num_actions), dtype=np.int32) for aid in np.argwhere(tmp_action == 1): # loop 26 actions # import ipdb # ipdb.set_trace() for j, rid in enumerate(self.roles[aid[0]]): if rid == 'agent': continue else: # tmp_role_id[aid[0]] if np.all(tmp_role_id[aid[0]] == -1): continue for obj_idx in tmp_role_id[aid[0]]: if obj_idx == -1: continue else: action_name = self.actions[aid[0]] + '_' + rid if action_name not in label_map: continue if label_map[action_name] not in self.l_map: continue verb_id = self.l_map[label_map[action_name]] obj_cls = self.json_category_id_to_contiguous_id[valid_objs[obj_idx]['category_id']] # import ipdb # ipdb.set_trace() # if (verb_id, obj_cls) not in self.set_list and (verb_id in self.verb_trans and self.verb_trans[verb_id], obj_cls) not in self.set_list: if (verb_id, obj_cls) not in self.set_list and (verb_id not in self.verb_trans or ((self.verb_trans[verb_id], obj_cls) not in self.set_list)): unseen_labels.append((verb_id, obj_cls)) continue counts += 1 if (verb_id, obj_cls) in self.set_list: action_id = self.set_list.index((verb_id, obj_cls)) seen_labels.append((verb_id, obj_cls)) else: action_id = self.set_list.index((self.verb_trans[verb_id], obj_cls)) seen_labels.append((self.verb_trans[verb_id], obj_cls)) gt_actions[ix, action_id] = 1 role_id[action_id] = obj_idx gt_role_id[ix, :] = role_id entry['boxes'] = np.append(entry['boxes'], boxes, axis=0) entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes) entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd) entry['gt_actions'] = np.append(entry['gt_actions'], gt_actions, axis=0) entry['gt_role_id'] = np.append(entry['gt_role_id'], gt_role_id, axis=0) return unseen_labels, seen_labels, counts def _get_vsrl_data(self, ann_id, ann_ids, objs, num_actions): """ Get VSRL data for ann_id.""" action_id = -np.ones((num_actions), dtype=np.int32) role_id = -np.ones((num_actions, 2), dtype=np.int32) # check if ann_id in vcoco annotations in_vcoco = np.where(self.VCOCO[0]['ann_id'] == ann_id)[0] if in_vcoco.size > 0: action_id[:] = 0 role_id[:] = -1 else: return action_id, role_id for i, x in enumerate(self.VCOCO): assert x['action_name'] == self.actions[i] has_label = np.where(np.logical_and(x['ann_id'] == ann_id, x['label'] == 1))[0] if has_label.size > 0: action_id[i] = 1 assert has_label.size == 1 rids = x['role_object_id'][has_label] assert rids[0, 0] == ann_id for j in range(1, rids.shape[1]): if rids[0, j] == 0: # no role continue aid = np.where(ann_ids == rids[0, j])[0] assert aid.size > 0 role_id[i, j - 1] = aid return action_id, role_id def _collect_detections_for_image(self, dets, image_id): agents = np.empty((0, 4 + self.num_actions), dtype=np.float32) # 4 + 26 = 30 roles = np.empty((0, 5 * self.num_actions, 2), dtype=np.float32) # (5 * 26), 2 for det in dets: # loop all detection instance # print(det.keys()) if det['image_id'] == image_id:# might be several this_agent = np.zeros((1, 4 + self.num_actions), dtype=np.float32) this_role = np.zeros((1, 5 * self.num_actions, 2), dtype=np.float32) this_agent[0, :4] = det['person_box'] for aid in range(self.num_actions): # loop 26 actions for j, rid in enumerate(self.roles[aid]): if rid == 'agent': #if aid == 10: # this_agent[0, 4 + aid] = det['talk_' + rid] #if aid == 16: # this_agent[0, 4 + aid] = det['work_' + rid] #if (aid != 10) and (aid != 16): this_agent[0, 4 + aid] = det[self.actions[aid] + '_' + rid] else: this_role[0, 5 * aid: 5 * aid + 5, j-1] = det[self.actions[aid] + '_' + rid] agents = np.concatenate((agents, this_agent), axis=0) roles = np.concatenate((roles, this_role), axis=0) return agents, roles def _collect_detections_for_image1(self, dets, image_id): agents = np.empty((0, 4 + self.num_actions), dtype=np.float32) # 4 + 26 = 30 roles = np.empty((0, 5 * self.num_actions), dtype=np.float32) # (5 * 26), 2 for det in dets: # loop all detection instance # print(det.keys()) if det['image_id'] == image_id:# might be several this_agent = np.zeros((1, 4 + self.num_actions), dtype=np.float32) this_role = np.zeros((1, 5 * self.num_actions), dtype=np.float32) this_agent[0, :4] = det['person_box'] for aid in range(self.num_actions): # loop 222 actions # for j, rid in enumerate(self.roles[self.set_list[aid]]): # if rid == 'agent': # #if aid == 10: # # this_agent[0, 4 + aid] = det['talk_' + rid] # #if aid == 16: # # this_agent[0, 4 + aid] = det['work_' + rid] # #if (aid != 10) and (aid != 16): # # this_agent[0, 4 + aid] = 0 # else: this_role[0, 5 * aid: 5 * aid + 5] = det[aid] agents = np.concatenate((agents, this_agent), axis=0) roles = np.concatenate((roles, this_role), axis=0) return agents, roles def _do_eval(self, detections_file, ovr_thresh=0.5): # self._do_agent_eval(vcocodb, detections_file, ovr_thresh=ovr_thresh) self._do_role_eval(self.vcocodb, detections_file, ovr_thresh=ovr_thresh, eval_type='scenario_1') # self._do_role_eval(vcocodb, detections_file, ovr_thresh=ovr_thresh, eval_type='scenario_2') def _do_role_eval(self, vcocodb, detections_file, ovr_thresh=0.5, eval_type='scenario_1'): with open(detections_file, 'rb') as f: dets = pickle.load(f) tp = [[] for a in range(self.num_actions)] fp = [[] for a in range(self.num_actions)] sc = [[] for a in range(self.num_actions)] npos = np.zeros((self.num_actions), dtype=np.float32) for i in range(len(vcocodb)): image_id = vcocodb[i]['id'] gt_inds = np.where(vcocodb[i]['gt_classes'] == 1)[0] # person boxes gt_boxes = vcocodb[i]['boxes'][gt_inds] gt_actions = vcocodb[i]['gt_actions'][gt_inds] # some peorson instances don't have annotated actions # we ignore those instances ignore = np.any(gt_actions == -1, axis=1) assert np.all(gt_actions[np.where(ignore==True)[0]]==-1) for aid in range(self.num_actions): npos[aid] += np.sum(gt_actions[:, aid] == 1) pred_agents, pred_roles = self._collect_detections_for_image1(dets, image_id) for aid in range(self.num_actions): # if len(self.roles[aid])<2: # if action has no role, then no role AP computed # continue for rid in range(1): # keep track of detected instances for each action for each role covered = np.zeros((gt_boxes.shape[0]), dtype=np.bool) # get gt roles for action and role gt_role_inds = vcocodb[i]['gt_role_id'][gt_inds, aid] gt_roles = -np.ones_like(gt_boxes) for j in range(gt_boxes.shape[0]): if gt_role_inds[j] > -1: gt_roles[j] = vcocodb[i]['boxes'][gt_role_inds[j]] agent_boxes = pred_agents[:, :4] role_boxes = pred_roles[:, 5 * aid: 5 * aid + 4] agent_scores = pred_roles[:, 5 * aid + 4] valid = np.where(np.isnan(agent_scores) == False)[0] #valid = np.where(agent_scores != 0)[0] agent_scores = agent_scores[valid] agent_boxes = agent_boxes[valid, :] role_boxes = role_boxes[valid, :] idx = agent_scores.argsort()[::-1] for j in idx: pred_box = agent_boxes[j, :] overlaps = get_overlap(gt_boxes, pred_box) # matching happens based on the person jmax = overlaps.argmax() ovmax = overlaps.max() # if matched with an instance with no annotations # continue if ignore[jmax]: continue # overlap between predicted role and gt role if np.all(gt_roles[jmax, :] == -1): # if no gt role if eval_type == 'scenario_1': if np.all(role_boxes[j, :] == 0.0) or np.all(np.isnan(role_boxes[j, :])): # if no role is predicted, mark it as correct role overlap ov_role = 1.0 else: # if a role is predicted, mark it as false ov_role = 0.0 elif eval_type == 'scenario_2': # if no gt role, role prediction is always correct, irrespective of the actual predition ov_role = 1.0 else: raise ValueError('Unknown eval type') else: ov_role = get_overlap(gt_roles[jmax, :].reshape((1, 4)), role_boxes[j, :]) is_true_action = (gt_actions[jmax, aid] == 1) sc[aid].append(agent_scores[j]) # print(ovmax, ov_role, gt_roles[jmax]) # import ipdb # ipdb.set_trace() if is_true_action and (ovmax>=ovr_thresh) and (ov_role>=ovr_thresh): if covered[jmax]: fp[aid].append(1) tp[aid].append(0) else: fp[aid].append(0) tp[aid].append(1) covered[jmax] = True else: fp[aid].append(1) tp[aid].append(0) # compute ap for each action role_ap = np.zeros((self.num_actions), dtype=np.float32) role_ap[:] = np.nan for aid in range(self.num_actions): # if len(self.roles[aid])<2: # continue a_fp = np.array(fp[aid], dtype=np.float32) a_tp = np.array(tp[aid], dtype=np.float32) a_sc = np.array(sc[aid], dtype=np.float32) # sort in descending score order idx = a_sc.argsort()[::-1] a_fp = a_fp[idx] a_tp = a_tp[idx] a_sc = a_sc[idx] a_fp = np.cumsum(a_fp) a_tp = np.cumsum(a_tp) if npos[aid] == 0: rec = np.zeros(a_tp.shape, np.float32) else: rec = a_tp / float(npos[aid]) #check assert(np.amax(rec) <= 1), rec prec = a_tp / np.maximum(a_tp + a_fp, np.finfo(np.float64).eps) role_ap[aid] = voc_ap(rec, prec) print('---------Reporting Role AP (%)------------------') for aid in range(self.num_actions): # if len(self.roles[aid])<2: continue # for rid in range(len(self.roles[aid])-1): print('{: >23}: AP = {:0.2f} (#pos = {:d})'.format(aid, role_ap[aid]*100.0, int(npos[aid]))) nonrare = np.argwhere(self.label_nums > 10) # non rare rare = np.argwhere(self.label_nums <= 10) print('Average Role [%s] AP = %.2f'%(eval_type, np.nanmean(role_ap) * 100.00)) print('Average Role [%s] nonrare = %.2f' % (eval_type, np.nanmean(role_ap[nonrare]) * 100.00)) print('Average Role [%s] rare = %.2f' % (eval_type, np.nanmean(role_ap[rare]) * 100.00)) print('---------------------------------------------') # print('Average Role [%s] AP = %.2f, omitting the action "point"'%(eval_type, (np.nanmean(role_ap) * 25 - role_ap[-3][0]) / 24 * 100.00)) print('---------------------------------------------') model_name = detections_file[len(cfg.LOCAL_DATA + "/Results/"):] iter_str = model_name.split('_')[0] iter_str = iter_str.replace('/', '') model_name = model_name[len(iter_str)+2:] model_name = model_name.replace('.pkl', '') f = open(cfg.LOCAL_DATA + '/coco_csv/{}_{}.csv'.format(model_name, eval_type), 'a') f.write('%.2f %.2f %.2f %.2f\n'%(np.nanmean(role_ap) * 100.00, np.nanmean(role_ap) * 100.00, np.nanmean(role_ap[rare]) * 100.00, np.nanmean(role_ap[nonrare]) * 100.00)) f.flush() f.close() def _do_agent_eval(self, vcocodb, detections_file, ovr_thresh=0.5): with open(detections_file, 'rb') as f: dets = pickle.load(f) tp = [[] for a in range(self.num_actions)] fp = [[] for a in range(self.num_actions)] sc = [[] for a in range(self.num_actions)] npos = np.zeros((self.num_actions), dtype=np.float32) for i in range(len(vcocodb)): image_id = vcocodb[i]['id']#
# "time": 1640819389454, # "orderId": "a17e0874ecbdU0711043490bbtcpDU5X", # "seqNum": -1, # "orderType": "Limit", # "execInst": "NULL_VAL", # "side": "Buy", # "symbol": "BTC-PERP", # "price": "30000", # "orderQty": "0.002", # "stopPrice": "0", # "stopBy": "ref-px", # "status": "Ack", # "lastExecTime": 1640819389454, # "lastQty": "0", # "lastPx": "0", # "avgFilledPx": "0", # "cumFilledQty": "0", # "fee": "0", # "cumFee": "0", # "feeAsset": "", # "errorCode": "", # "posStopLossPrice": "0", # "posStopLossTrigger": "market", # "posTakeProfitPrice": "0", # "posTakeProfitTrigger": "market", # "liquidityInd": "n" # } # } # } # data = self.safe_value(response, 'data', {}) order = self.safe_value_2(data, 'order', 'info', {}) return self.parse_order(order, market) def create_reduce_only_order(self, symbol, type, side, amount, price=None, params={}): request = { 'execInst': 'reduceOnly', } return self.create_order(symbol, type, side, amount, price, self.extend(request, params)) def fetch_order(self, id, symbol=None, params={}): self.load_markets() self.load_accounts() market = None if symbol is not None: market = self.market(symbol) type, query = self.handle_market_type_and_params('fetchOrder', market, params) options = self.safe_value(self.options, 'fetchOrder', {}) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, type, 'cash') account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') request = { 'account-group': accountGroup, 'account-category': accountCategory, 'orderId': id, } defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderStatus') method = self.get_supported_mapping(type, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupGetFuturesOrderStatus', }) if method == 'v1PrivateAccountCategoryGetOrderStatus': if accountCategory is not None: request['category'] = accountCategory else: request['account-category'] = accountCategory response = getattr(self, method)(self.extend(request, query)) # # AccountCategoryGetOrderStatus # # { # "code": 0, # "accountCategory": "CASH", # "accountId": "<KEY>", # "data": [ # { # "symbol": "BTC/USDT", # "price": "8131.22", # "orderQty": "0.00082", # "orderType": "Market", # "avgPx": "7392.02", # "cumFee": "0.005152238", # "cumFilledQty": "0.00082", # "errorCode": "", # "feeAsset": "USDT", # "lastExecTime": 1575953151764, # "orderId": "a16eee20b6750866943712zWEDdAjt3", # "seqNum": 2623469, # "side": "Buy", # "status": "Filled", # "stopPrice": "", # "execInst": "NULL_VAL" # } # ] # } # # AccountGroupGetFuturesOrderStatus # # { # "code": 0, # "accountId": "<KEY>", # "ac": "FUTURES", # "data": { # "ac": "FUTURES", # "accountId": "<KEY>", # "time": 1640247020217, # "orderId": "r17de65747aeU0711043490bbtcp0cmt", # "seqNum": 28796162908, # "orderType": "Limit", # "execInst": "NULL_VAL", # "side": "Buy", # "symbol": "BTC-PERP", # "price": "30000", # "orderQty": "0.0021", # "stopPrice": "0", # "stopBy": "market", # "status": "New", # "lastExecTime": 1640247020232, # "lastQty": "0", # "lastPx": "0", # "avgFilledPx": "0", # "cumFilledQty": "0", # "fee": "0", # "cumFee": "0", # "feeAsset": "USDT", # "errorCode": "", # "posStopLossPrice": "0", # "posStopLossTrigger": "market", # "posTakeProfitPrice": "0", # "posTakeProfitTrigger": "market", # "liquidityInd": "n" # } # } # data = self.safe_value(response, 'data', {}) return self.parse_order(data, market) def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): self.load_markets() self.load_accounts() market = None if symbol is not None: market = self.market(symbol) symbol = market['symbol'] account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') type, query = self.handle_market_type_and_params('fetchOpenOrders', market, params) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, type, 'cash') request = { 'account-group': accountGroup, 'account-category': accountCategory, } options = self.safe_value(self.options, 'fetchOpenOrders', {}) defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderOpen') method = self.get_supported_mapping(type, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupGetFuturesOrderOpen', }) if method == 'v1PrivateAccountCategoryGetOrderOpen': if accountCategory is not None: request['category'] = accountCategory else: request['account-category'] = accountCategory response = getattr(self, method)(self.extend(request, query)) # # AccountCategoryGetOrderOpen # # { # "ac": "CASH", # "accountId": "<KEY>", # "code": 0, # "data": [ # { # "avgPx": "0", # Average filled price of the order # "cumFee": "0", # cumulative fee paid for self order # "cumFilledQty": "0", # cumulative filled quantity # "errorCode": "", # error code; could be empty # "feeAsset": "USDT", # fee asset # "lastExecTime": 1576019723550, # The last execution time of the order # "orderId": "s16ef21882ea0866943712034f36d83", # server provided orderId # "orderQty": "0.0083", # order quantity # "orderType": "Limit", # order type # "price": "7105", # order price # "seqNum": 8193258, # sequence number # "side": "Buy", # order side # "status": "New", # order status on matching engine # "stopPrice": "", # only available for stop market and stop limit orders; otherwise empty # "symbol": "BTC/USDT", # "execInst": "NULL_VAL" # execution instruction # }, # ] # } # # AccountGroupGetFuturesOrderOpen # # { # "code": 0, # "data": [ # { # "ac": "FUTURES", # "accountId": "<KEY>", # "time": 1640247020217, # "orderId": "r17de65747aeU0711043490bbtcp0cmt", # "seqNum": 28796162908, # "orderType": "Limit", # "execInst": "NULL_VAL", # "side": "Buy", # "symbol": "BTC-PERP", # "price": "30000", # "orderQty": "0.0021", # "stopPrice": "0", # "stopBy": "market", # "status": "New", # "lastExecTime": 1640247020232, # "lastQty": "0", # "lastPx": "0", # "avgFilledPx": "0", # "cumFilledQty": "0", # "fee": "0", # "cumFee": "0", # "feeAsset": "USDT", # "errorCode": "", # "posStopLossPrice": "0", # "posStopLossTrigger": "market", # "posTakeProfitPrice": "0", # "posTakeProfitTrigger": "market", # "liquidityInd": "n" # } # ] # } # data = self.safe_value(response, 'data', []) if accountCategory == 'futures': return self.parse_orders(data, market, since, limit) # a workaround for https://github.com/ccxt/ccxt/issues/7187 orders = [] for i in range(0, len(data)): order = self.parse_order(data[i], market) orders.append(order) return self.filter_by_symbol_since_limit(orders, symbol, since, limit) def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}): self.load_markets() self.load_accounts() account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') request = { 'account-group': accountGroup, # 'category': accountCategory, # 'symbol': market['id'], # 'orderType': 'market', # optional, string # 'side': 'buy', # or 'sell', optional, case insensitive. # 'status': 'Filled', # "Filled", "Canceled", or "Rejected" # 'startTime': exchange.milliseconds(), # 'endTime': exchange.milliseconds(), # 'page': 1, # 'pageSize': 100, } market = None if symbol is not None: market = self.market(symbol) request['symbol'] = market['id'] type, query = self.handle_market_type_and_params('fetchCLosedOrders', market, params) options = self.safe_value(self.options, 'fetchClosedOrders', {}) defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountGroupGetOrderHist') method = self.get_supported_mapping(type, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupGetFuturesOrderHistCurrent', }) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, type, 'cash') if method == 'v1PrivateAccountGroupGetOrderHist': if accountCategory is not None: request['category'] = accountCategory else: request['account-category'] = accountCategory if since is not None: request['startTime'] = since if limit is not None: request['pageSize'] = limit response = getattr(self, method)(self.extend(request, query)) # # accountCategoryGetOrderHistCurrent # # { # "code":0, # "accountId":"<KEY>", # "ac":"CASH", # "data":[ # { # "seqNum":15561826728, # "orderId":"a17294d305c0U6491137460bethu7kw9", # "symbol":"ETH/USDT", # "orderType":"Limit", # "lastExecTime":1591635618200, # "price":"200", # "orderQty":"0.1", # "side":"Buy", # "status":"Canceled", # "avgPx":"0", # "cumFilledQty":"0", # "stopPrice":"", # "errorCode":"", # "cumFee":"0", # "feeAsset":"USDT", # "execInst":"NULL_VAL" # } # ] # } # # accountGroupGetOrderHist # # { # "code": 0, # "data": { # "data": [ # { # "ac": "FUTURES", # "accountId": "testabcdefg", # "avgPx": "0", # "cumFee": "0", # "cumQty": "0", # "errorCode": "NULL_VAL", # "execInst": "NULL_VAL", # "feeAsset": "USDT", # "lastExecTime": 1584072844085, # "orderId": "r170d21956dd5450276356bbtcpKa74", # "orderQty": "1.1499", # "orderType": "Limit", # "price": "4000", # "sendingTime": 1584072841033, # "seqNum": 24105338, # "side": "Buy", # "status": "Canceled", # "stopPrice": "", # "symbol": "BTC-PERP" # }, # ], # "hasNext": False, # "limit": 500, # "page": 1, # "pageSize": 20 # } # } # # accountGroupGetFuturesOrderHistCurrent # # { # "code": 0, # "data": [ # { # "ac": "FUTURES", # "accountId": "<KEY>", # "time": 1640245777002, # "orderId": "r17de6444fa6U0711043490bbtcpJ2lI", # "seqNum": 28796124902, # "orderType": "Limit", # "execInst": "NULL_VAL", # "side": "Buy", # "symbol": "BTC-PERP", # "price": "30000", # "orderQty": "0.0021", # "stopPrice": "0", # "stopBy": "market", # "status": "Canceled", # "lastExecTime": 1640246574886, # "lastQty": "0", # "lastPx": "0", # "avgFilledPx": "0", # "cumFilledQty": "0", # "fee": "0", # "cumFee": "0", # "feeAsset": "USDT", # "errorCode": "", # "posStopLossPrice": "0", # "posStopLossTrigger": "market", # "posTakeProfitPrice": "0", # "posTakeProfitTrigger": "market", # "liquidityInd": "n" # } # ] # } # data = self.safe_value(response, 'data') isArray = isinstance(data, list) if not isArray: data = self.safe_value(data, 'data', []) return self.parse_orders(data, market, since, limit) def cancel_order(self, id, symbol=None, params={}): if symbol is None: raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument') self.load_markets() self.load_accounts() market = self.market(symbol) type, query = self.handle_market_type_and_params('cancelOrder', market, params) options = self.safe_value(self.options, 'cancelOrder', {}) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, type, 'cash') account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') request = { 'account-group': accountGroup, 'account-category': accountCategory, 'symbol':
#!/usr/bin/env python3 """ FOP Filter Orderer and Preener Copyright (C) 2011 Michael This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.""" # FOP version number VERSION = 3.9 # Import the key modules import collections, filecmp, os, re, subprocess, sys import argparse ap = argparse.ArgumentParser() ap.add_argument("--dir", "-d", nargs="+", help="Set directories", default=None) ap.add_argument( "--commit", "-c", help="Enable commit mode", action="store_true", default=False ) arg = ap.parse_args() # Check the version of Python for language compatibility and subprocess.check_output() MAJORREQUIRED = 3 MINORREQUIRED = 2 if sys.version_info < (MAJORREQUIRED, MINORREQUIRED): raise RuntimeError( "FOP requires Python {reqmajor}.{reqminor} or greater, but Python {ismajor}.{isminor} is being used to run this program.".format( reqmajor=MAJORREQUIRED, reqminor=MINORREQUIRED, ismajor=sys.version_info.major, isminor=sys.version_info.minor, ) ) # Import a module only available in Python 3 from urllib.parse import urlparse # Compile regular expressions to match important filter parts (derived from Wladimir Palant's Adblock Plus source code) ELEMENTDOMAINPATTERN = re.compile(r"^([^\/\*\|\@\"\!]*?)#\@?#") FILTERDOMAINPATTERN = re.compile(r"(?:\$|\,)domain\=([^\,\s]+)$") ELEMENTPATTERN = re.compile(r"^([^\/\*\|\@\"\!]*?)(#\@?#?\+?)([^{}]+)$") OPTIONPATTERN = re.compile( r"^(.*)\$(~?[\w\-]+(?:=[^,\s]+)?(?:,~?[\w\-]+(?:=[^,\s]+)?)*)$" ) # Compile regular expressions that match element tags and pseudo classes and strings and tree selectors; "@" indicates either the beginning or the end of a selector SELECTORPATTERN = re.compile( r"(?<=[\s\[@])([a-zA-Z]*[A-Z][a-zA-Z0-9]*)((?=([\[\]\^\*\$=:@#\.]))|(?=(\s(?:[+>~]|\*|[a-zA-Z][a-zA-Z0-9]*[\[:@\s#\.]|[#\.][a-zA-Z][a-zA-Z0-9]*))))" ) PSEUDOPATTERN = re.compile(r"(\:[a-zA-Z\-]*[A-Z][a-zA-Z\-]*)(?=([\(\:\@\s]))") REMOVALPATTERN = re.compile(r"((?<=([>+~,]\s))|(?<=(@|\s|,)))(\*)(?=([#\.\[\:]))") ATTRIBUTEVALUEPATTERN = re.compile( r"^([^\'\"\\]|\\.)*(\"(?:[^\"\\]|\\.)*\"|\'(?:[^\'\\]|\\.)*\')|\*" ) TREESELECTOR = re.compile(r"(\\.|[^\+\>\~\\\ \t])\s*([\+\>\~\ \t])\s*(\D)") UNICODESELECTOR = re.compile(r"\\[0-9a-fA-F]{1,6}\s[a-zA-Z]*[A-Z]") # Compile a regular expression that describes a completely blank line BLANKPATTERN = re.compile(r"^\s*$") # Compile a regular expression that validates commit comments COMMITPATTERN = re.compile(r"^(A|M|P)\:\s(\((.+)\)\s)?(.*)$") # List the files that should not be sorted, either because they have a special sorting system or because they are not filter files IGNORE = ("output", "output_filters", "requirements.txt", "templates", "node_modules") # List all uBO options (excepting domain, which is handled separately), as of version 1.21.9b7 # https://github.com/gorhill/uBlock/wiki/Resources-Library # Support for AdGuard's empty and mp4 filter https://github.com/gorhill/uBlock/releases/tag/1.22.0 KNOWNOPTIONS = ( "collapse", "csp", "document", "elemhide", "font", "genericblock", "generichide", "specifichide", "image", "match-case", "object", "media", "object-subrequest", "other", "ping", "popup", "script", "stylesheet", "subdocument", "third-party", "first-party", "1p", "3p", "inline-script", "xhr", "websocket", "webrtc", "xmlhttprequest", "important", "redirect=googletagmanager_gtm.js", "redirect=google-analytics_ga.js", "redirect=google-analytics_analytics.js", "redirect=googletagservices_gpt.js", "redirect=google-analytics_cx_api.js", "redirect=googlesyndication_adsbygoogle.js", "redirect=doubleclick_instream_ad_status.js", "redirect=ampproject_v0.js", "redirect=noop.js", "redirect=noop.html", "redirect=noop.txt", "redirect=noop-0.1s.mp3", "redirect=noop-1s.mp4", "redirect=1x1.gif", "redirect=2x2.png", "redirect=3x2.png", "redirect=32x32.png", "redirect-rule=googletagmanager_gtm.js", "redirect-rule=google-analytics_ga.js", "redirect-rule=google-analytics_analytics.js", "redirect-rule=googletagservices_gpt.js", "redirect-rule=google-analytics_cx_api.js", "redirect-rule=googlesyndication_adsbygoogle.js", "redirect-rule=doubleclick_instream_ad_status.js", "redirect-rule=ampproject_v0.js", "redirect-rule=noop.js", "redirect-rule=noop.html", "redirect-rule=noop.txt", "redirect-rule=noop-0.1s.mp3", "redirect-rule=noop-1s.mp4", "redirect-rule=1x1.gif", "redirect-rule=2x2.png", "redirect-rule=3x2.png", "redirect-rule=32x32.png", "empty", "mp4", "ehide", "ghide", "shide", ) # List the supported revision control system commands REPODEF = collections.namedtuple( "repodef", "name, directory, locationoption, repodirectoryoption, checkchanges, difference, commit, pull, push", ) GIT = REPODEF( ["git"], "./.git/", "--work-tree=", "--git-dir=", ["status", "-s", "--untracked-files=no"], ["diff"], ["commit", "-a", "-m"], ["pull"], ["push"], ) HG = REPODEF( ["hg"], "./.hg/", "-R", None, ["stat", "-q"], ["diff"], ["commit", "-m"], ["pull"], ["push"], ) REPOTYPES = (GIT, HG) def start(): """ Print a greeting message and run FOP in the directories specified via the command line, or the current working directory if no arguments have been passed.""" greeting = "FOP (Filter Orderer and Preener) version {version}".format( version=VERSION ) characters = len(str(greeting)) print("=" * characters) print(greeting) print("=" * characters) # Convert the directory names to absolute references and visit each unique location places = arg.dir if places: places = [os.path.abspath(place) for place in places] for place in sorted(set(places)): main(place) print() else: main(os.getcwd()) def main(location): """ Find and sort all the files in a given directory, committing changes to a repository if one exists.""" # Check that the directory exists, otherwise return if not os.path.isdir(location): print("{location} does not exist or is not a folder.".format(location=location)) return # Set the repository type based on hidden directories repository = None for repotype in REPOTYPES: if os.path.isdir(os.path.join(location, repotype.directory)): repository = repotype break # If this is a repository, record the initial changes; if this fails, give up trying to use the repository if repository: try: basecommand = repository.name if repository.locationoption.endswith("="): basecommand.append( "{locationoption}{location}".format( locationoption=repository.locationoption, location=location ) ) else: basecommand.extend([repository.locationoption, location]) if repository.repodirectoryoption: if repository.repodirectoryoption.endswith("="): basecommand.append( "{repodirectoryoption}{location}".format( repodirectoryoption=repository.repodirectoryoption, location=os.path.normpath( os.path.join(location, repository.directory) ), ) ) else: basecommand.extend([repository.repodirectoryoption, location]) command = basecommand + repository.checkchanges originaldifference = True if subprocess.check_output(command) else False except (subprocess.CalledProcessError, OSError): print( 'The command "{command}" was unable to run; FOP will therefore not attempt to use the repository tools. On Windows, this may be an indication that you do not have sufficient privileges to run FOP - the exact reason why is unknown. Please also ensure that your revision control system is installed correctly and understood by FOP.'.format( command=" ".join(command) ) ) repository = None # Work through the directory and any subdirectories, ignoring hidden directories print( "\nPrimary location: {folder}".format( folder=os.path.join(os.path.abspath(location), "") ) ) for path, directories, files in os.walk(location): for direct in directories[:]: if direct.startswith(".") or direct in IGNORE: directories.remove(direct) print( "Current directory: {folder}".format( folder=os.path.join(os.path.abspath(path), "") ) ) directories.sort() for filename in sorted(files): address = os.path.join(path, filename) extension = os.path.splitext(filename)[1] # Sort all text files that are not blacklisted if extension == ".txt" and filename not in IGNORE: fopsort(address) # Delete unnecessary backups and temporary files if extension == ".orig" or extension == ".temp": try: os.remove(address) except (IOError, OSError): # Ignore errors resulting from deleting files, as they likely indicate that the file has already been deleted pass # If in a repository, offer to commit any changes if repository and arg.commit: commit(repository, basecommand, originaldifference) def fopsort(filename): """ Sort the sections of the file and save any modifications.""" temporaryfile = "{filename}.temp".format(filename=filename) CHECKLINES = 10 section = [] lineschecked = 1 filterlines = elementlines = 0 # Read in the input and output files concurrently to allow filters to be saved as soon as they are finished with with open(filename, "r", encoding="utf-8", newline="\n") as inputfile, open( temporaryfile, "w", encoding="utf-8", newline="\n" ) as outputfile: # Combines domains for (further) identical rules def combinefilters(uncombinedFilters, DOMAINPATTERN, domainseparator): combinedFilters = [] for i in range(len(uncombinedFilters)): domains1 = re.search(DOMAINPATTERN, uncombinedFilters[i]) if i + 1 < len(uncombinedFilters) and domains1: domains2 = re.search(DOMAINPATTERN, uncombinedFilters[i + 1]) domain1str = domains1.group(1) if ( not domains1 or i + 1 == len(uncombinedFilters) or not domains2 or len(domain1str) == 0 or len(domains2.group(1)) == 0 ): # last filter or filter didn't match regex or no domains combinedFilters.append(uncombinedFilters[i]) else: domain2str = domains2.group(1) if domains1.group(0).replace( domain1str, domain2str, 1 ) != domains2.group(0): # non-identical filters shouldn't be combined combinedFilters.append(uncombinedFilters[i]) elif re.sub(DOMAINPATTERN, "", uncombinedFilters[i]) == re.sub( DOMAINPATTERN, "", uncombinedFilters[i + 1] ): # identical filters. Try to combine them... newDomains = "{d1}{sep}{d2}".format( d1=domain1str, sep=domainseparator, d2=domain2str ) newDomains = domainseparator.join( sorted( set(newDomains.split(domainseparator)), key=lambda domain: domain.strip("~"), ) ) if ( domain1str.count("~") != domain1str.count(domainseparator) + 1 ) != ( domain2str.count("~") != domain2str.count(domainseparator) + 1 ): # do not combine rules containing included domains with rules containing only excluded domains combinedFilters.append(uncombinedFilters[i]) else: # either both contain one or more included domains, or both contain only excluded domains domainssubstitute = domains1.group(0).replace( domain1str, newDomains, 1 ) uncombinedFilters[i + 1] = re.sub( DOMAINPATTERN, domainssubstitute, uncombinedFilters[i] ) else: # non-identical filters shouldn't be combined combinedFilters.append(uncombinedFilters[i]) return combinedFilters # Writes the filter lines to the file def writefilters(): if elementlines > filterlines: uncombinedFilters = sorted( set(section), key=lambda rule: re.sub(ELEMENTDOMAINPATTERN, "", rule), ) outputfile.write( "{filters}\n".format( filters="\n".join( combinefilters(uncombinedFilters, ELEMENTDOMAINPATTERN, ",") ) ) ) else: uncombinedFilters = sorted(set(section), key=str.lower) outputfile.write( "{filters}\n".format( filters="\n".join( combinefilters(uncombinedFilters, FILTERDOMAINPATTERN, "|") ) ) ) for line in inputfile: line = line.strip() if not re.match(BLANKPATTERN, line): # Include comments verbatim and, if applicable, sort the preceding section of filters and save them in the new version of the file if ( line[0] == "!" or line[:8] == "%include" or line[0] == "[" and line[-1] == "]" ): if section: writefilters() section = [] lineschecked = 1 filterlines = elementlines = 0 outputfile.write("{line}\n".format(line=line)) else: # Convert inject:script to +js line = line.replace("##script:inject", "##+js") # Neaten up filters and, if necessary, check their type for the sorting algorithm elementparts = re.match(ELEMENTPATTERN, line) if elementparts: domains = elementparts.group(1).lower() if
<gh_stars>100-1000 """ By <NAME> <<EMAIL>> ecdsa implementation in python demonstrating several 'unconventional' calculations, like finding a public key from a signature, and finding a private key from 2 signatures with identical 'r' """ def GCD(a, b): """ (gcd,c,d)= GCD(a, b) ===> a*c+b*d!=gcd: """ if a == 0: return (b, 0, 1) d1, x1, y1 = GCD(b % a, a) return (d1, y1 - (b // a) * x1, x1) def modinv(x, m): (gcd, c, d) = GCD(x, m) return c def samefield(a, b): """ determine if a uses the same field """ if a.field != b.field: raise RuntimeError("field mismatch") return True class FiniteField: """ FiniteField implements a value modulus a number. """ class Value: """ represent a value in the FiniteField this class forwards all operations to the FiniteField class """ def __init__(self, field, value): self.field = field self.value = field.integer(value) # Value * int def __add__(self, rhs): return self.field.add(self, self.field.value(rhs)) def __sub__(self, rhs): return self.field.sub(self, self.field.value(rhs)) def __mul__(self, rhs): return self.field.mul(self, self.field.value(rhs)) def __truediv__(self, rhs): return self.field.div(self, self.field.value(rhs)) def __pow__(self, rhs): return self.field.pow(self, rhs) # int * Value def __radd__(self, rhs): return self.field.add(self.field.value(rhs), self) def __rsub__(self, rhs): return self.field.sub(self.field.value(rhs), self) def __rmul__(self, rhs): return self.field.mul(self.field.value(rhs), self) def __rtruediv__(self, rhs): return self.field.div(self.field.value(rhs), self) def __rpow__(self, rhs): return self.field.pow(self.field.value(rhs), self) def __eq__(self, rhs): return self.field.eq(self, self.field.value(rhs)) def __ne__(self, rhs): return not (self == rhs) def __neg__(self): return self.field.neg(self) def sqrt(self, flag): return self.field.sqrt(self, flag) def inverse(self): return self.field.inverse(self) def iszero(self): return self.value == 0 def __repr__(self): return "%d (mod %d)" % (self.value, self.field.p) def __str__(self): return "%d (mod %d)" % (self.value, self.field.p) def __init__(self, p, order=1): self.p = p ** order """ several basic operators """ def add(self, lhs, rhs): return samefield(lhs, rhs) and self.value((lhs.value + rhs.value) % self.p) def sub(self, lhs, rhs): return samefield(lhs, rhs) and self.value((lhs.value - rhs.value) % self.p) def mul(self, lhs, rhs): return samefield(lhs, rhs) and self.value((lhs.value * rhs.value) % self.p) def div(self, lhs, rhs): return samefield(lhs, rhs) and self.value((lhs.value * rhs.inverse()) % self.p) def pow(self, lhs, rhs): return self.value(pow(lhs.value, self.integer(rhs), self.p)) def eq(self, lhs, rhs): return (lhs.value - rhs.value) % self.p == 0 def neg(self, val): return self.value(self.p - val.value) def sqrt(self, val, flag): """ calculate the square root modulus p """ if val.iszero(): return val sw = self.p % 8 if sw == 3 or sw == 7: res = val ** ((self.p + 1) / 4) elif sw == 5: x = val ** ((self.p + 1) / 4) if x == 1: res = val ** ((self.p + 3) / 8) else: res = (4 * val) ** ((self.p - 5) / 8) * 2 * val else: raise Exception("modsqrt non supported for (p%8)==1") if res.value % 2 == flag: return res else: return -res def inverse(self, value): """ calculate the multiplicative inverse """ return modinv(value.value, self.p) def value(self, x): """ converts an integer or FinitField.Value to a value of this FiniteField. """ return ( x if isinstance(x, FiniteField.Value) and x.field == self else FiniteField.Value(self, x) ) def integer(self, x): """ returns a plain integer """ return x.value if isinstance(x, FiniteField.Value) else x def zero(self): """ returns the additive identity value meaning: a + 0 = a """ return FiniteField.Value(self, 0) def one(self): """ returns the multiplicative identity value meaning a * 1 = a """ return FiniteField.Value(self, 1) class EllipticCurve: """ EllipticCurve implements a point on a elliptic curve """ class Point: """ represent a value in the EllipticCurve this class forwards all operations to the EllipticCurve class """ def __init__(self, curve, x, y): self.curve = curve self.x = x self.y = y # Point + Point def __add__(self, rhs): return self.curve.add(self, rhs) def __sub__(self, rhs): return self.curve.sub(self, rhs) # Point * int or Point * Value def __mul__(self, rhs): return self.curve.mul(self, rhs) def __div__(self, rhs): return self.curve.div(self, rhs) def __eq__(self, rhs): return self.curve.eq(self, rhs) def __ne__(self, rhs): return not (self == rhs) def __str__(self): return "(%s,%s)" % (self.x, self.y) def __neg__(self): return self.curve.neg(self) def iszero(self): return self.x.iszero() and self.y.iszero() def isoncurve(self): return self.curve.isoncurve(self) def __init__(self, field, a, b): self.field = field self.a = field.value(a) self.b = field.value(b) def add(self, p, q): """ perform elliptic curve addition """ if p.iszero(): return q if q.iszero(): return p # calculate the slope of the intersection line if p == q: if p.y == 0: return self.zero() l = (3 * p.x ** 2 + self.a) / (2 * p.y) elif p.x == q.x: return self.zero() else: l = (p.y - q.y) / (p.x - q.x) # calculate the intersection point x = l ** 2 - (p.x + q.x) y = l * (p.x - x) - p.y return self.point(x, y) # subtraction is : a - b = a + -b def sub(self, lhs, rhs): return lhs + -rhs # scalar multiplication is implemented like repeated addition def mul(self, pt, scalar): scalar = self.field.integer(scalar) accumulator = self.zero() shifter = pt while scalar != 0: bit = scalar % 2 if bit: accumulator += shifter shifter += shifter scalar //= 2 return accumulator def div(self, pt, scalar): """ scalar division: P / a = P * (1/a) scalar is assumed to be of type FiniteField(grouporder) """ return pt * (1 // scalar) def eq(self, lhs, rhs): return lhs.x == rhs.x and lhs.y == rhs.y def neg(self, pt): return self.point(pt.x, -pt.y) def zero(self): """ Return the additive identity point ( aka '0' ) P + 0 = P """ return self.point(self.field.zero(), self.field.zero()) def point(self, x, y): """ construct a point from 2 values """ return EllipticCurve.Point(self, self.field.value(x), self.field.value(y)) def isoncurve(self, p): """ verifies if a point is on the curve """ return p.iszero() or p.y ** 2 == p.x ** 3 + self.a * p.x + self.b def decompress(self, x, flag): """ calculate the y coordinate given only the x value. there are 2 possible solutions, use 'flag' to select. """ x = self.field.value(x) ysquare = x ** 3 + self.a * x + self.b return self.point(x, ysquare.sqrt(flag)) class ECDSA: """ Digital Signature Algorithm using Elliptic Curves """ def __init__(self, ec, G, n): self.ec = ec self.G = G self.GFn = FiniteField(n) def calcpub(self, privkey): """ calculate the public key for private key x return G*x """ return self.G * self.GFn.value(privkey) def sign(self, message, privkey, secret): """ sign the message using private key and sign secret for signsecret k, message m, privatekey x return (G*k, (m+x*r)/k) """ m = self.GFn.value(message) x = self.GFn.value(privkey) k = self.GFn.value(secret) R = self.G * k r = self.GFn.value(R.x) s = (m + x * r) / k print("=== Signature Generated Successfully ===") return (r, s) def verify(self, message, pubkey, rnum, snum): """ Verify the signature for message m, pubkey Y, signature (r,s) r = xcoord(R) verify that : G*m+Y*r=R*s this is true because: { Y=G*x, and R=G*k, s=(m+x*r)/k } G*m+G*x*r = G*k*(m+x*r)/k -> G*(m+x*r) = G*(m+x*r) several ways to do the verification: r == xcoord[ G*(m/s) + Y*(r/s) ] <<< the standard way R * s == G*m + Y*r r == xcoord[ (G*m + Y*r)/s) ] """ m = self.GFn.value(message) r = self.GFn.value(rnum) s = self.GFn.value(snum) R = self.G * (m / s) + pubkey * (r / s) # alternative methods of verifying # RORG= self.ec.decompress(r, 0) # RR = self.G * m + pubkey * r # print "#1: %s .. %s" % (RR, RORG*s) # print "#2: %s .. %s" % (RR*(1/s), r) # print "#3: %s .. %s" % (R, r) if R.x == r: print("=== Signature is Valid ===") return R.x == r def crack2(self, r, s1, s2, m1, m2): """ find signsecret and privkey from duplicate 'r' signature (r,s1) for message m1 and signature (r,s2) for message m2 s1= (m1 + x*r)/k s2= (m2 + x*r)/k subtract -> (s1-s2) = (m1-m2)/k -> k = (m1-m2)/(s1-s2) -> privkey = (s1*k-m1)/r .. or (s2*k-m2)/r """ sdelta =
<filename>src/jobTreeSlave.py #!/usr/bin/env python #Copyright (C) 2011 by <NAME> (<EMAIL>) # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in #all copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #THE SOFTWARE. import os import sys import time import subprocess import xml.etree.cElementTree as ET import cPickle import traceback import time import socket import logging def truncateFile(fileNameString, tooBig=50000): """Truncates a file that is bigger than tooBig bytes, leaving only the last tooBig bytes in the file. """ if os.path.getsize(fileNameString) > tooBig: fh = open(fileNameString, 'rb+') fh.seek(-tooBig, 2) data = fh.read() fh.seek(0) # rewind fh.write(data) fh.truncate() fh.close() def loadStack(command): commandTokens = command.split() assert commandTokens[0] == "scriptTree" for className in commandTokens[2:]: l = className.split(".") moduleName = ".".join(l[:-1]) className = l[-1] _temp = __import__(moduleName, globals(), locals(), [className], -1) exec "%s = 1" % className vars()[className] = _temp.__dict__[className] return loadPickleFile(commandTokens[1]) def loadPickleFile(pickleFile): """Loads the first object from a pickle file. """ fileHandle = open(pickleFile, 'r') i = cPickle.load(fileHandle) fileHandle.close() return i def nextOpenDescriptor(): """Gets the number of the next available file descriptor. """ descriptor = os.open("/dev/null", os.O_RDONLY) os.close(descriptor) return descriptor def main(): sys.path.append(sys.argv[1]) sys.argv.remove(sys.argv[1]) #Now we can import all the stuff.. from sonLib.bioio import getBasicOptionParser from sonLib.bioio import parseBasicOptions from sonLib.bioio import logger from sonLib.bioio import addLoggingFileHandler, redirectLoggerStreamHandlers from sonLib.bioio import setLogLevel from sonLib.bioio import getTotalCpuTime, getTotalCpuTimeAndMemoryUsage from sonLib.bioio import getTempDirectory from sonLib.bioio import makeSubDir from jobTree.src.job import Job from jobTree.src.master import getEnvironmentFileName, getConfigFileName, listChildDirs, getTempStatsFile, setupJobAfterFailure from sonLib.bioio import system ########################################## #Input args ########################################## jobTreePath = sys.argv[1] jobFile = sys.argv[2] ########################################## #Load the environment for the job ########################################## #First load the environment for the job. fileHandle = open(getEnvironmentFileName(jobTreePath), 'r') environment = cPickle.load(fileHandle) fileHandle.close() for i in environment: if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"): os.environ[i] = environment[i] # sys.path is used by __import__ to find modules if "PYTHONPATH" in environment: for e in environment["PYTHONPATH"].split(':'): if e != '': sys.path.append(e) #os.environ = environment #os.putenv(key, value) ########################################## #Setup the temporary directories. ########################################## #Dir to put all the temp files in. localSlaveTempDir = getTempDirectory() localTempDir = makeSubDir(os.path.join(localSlaveTempDir, "localTempDir")) ########################################## #Setup the logging ########################################## #Setup the logging. This is mildly tricky because we don't just want to #redirect stdout and stderr for this Python process; we want to redirect it #for this process and all children. Consequently, we can't just replace #sys.stdout and sys.stderr; we need to mess with the underlying OS-level #file descriptors. See <http://stackoverflow.com/a/11632982/402891> #When we start, standard input is file descriptor 0, standard output is #file descriptor 1, and standard error is file descriptor 2. #What file do we want to point FDs 1 and 2 to? tempSlaveLogFile = os.path.join(localSlaveTempDir, "slave_log.txt") #Save the original stdout and stderr (by opening new file descriptors to the #same files) origStdOut = os.dup(1) origStdErr = os.dup(2) #Open the file to send stdout/stderr to. logDescriptor = os.open(tempSlaveLogFile, os.O_WRONLY | os.O_CREAT | os.O_APPEND) #Replace standard output with a descriptor for the log file os.dup2(logDescriptor, 1) #Replace standard error with a descriptor for the log file os.dup2(logDescriptor, 2) #Since we only opened the file once, all the descriptors duped from the #original will share offset information, and won't clobber each others' #writes. See <http://stackoverflow.com/a/5284108/402891>. This shouldn't #matter, since O_APPEND seeks to the end of the file before every write, but #maybe there's something odd going on... #Close the descriptor we used to open the file os.close(logDescriptor) for handler in list(logger.handlers): #Remove old handlers logger.removeHandler(handler) #Add the new handler. The sys.stderr stream has been redirected by swapping #the file descriptor out from under it. logger.addHandler(logging.StreamHandler(sys.stderr)) #Put a message at the top of the log, just to make sure it's working. print "---JOBTREE SLAVE OUTPUT LOG---" sys.stdout.flush() #Log the number of open file descriptors so we can tell if we're leaking #them. logger.debug("Next available file descriptor: {}".format( nextOpenDescriptor())) ########################################## #Parse input files ########################################## config = ET.parse(getConfigFileName(jobTreePath)).getroot() setLogLevel(config.attrib["log_level"]) job = Job.read(jobFile) job.messages = [] #This is the only way to stop messages logging twice, as are read only in the master job.children = [] #Similarly, this is where old children are flushed out. job.write() #Update status, to avoid reissuing children after running a follow on below. if os.path.exists(job.getLogFileName()): #This cleans the old log file os.remove(job.getLogFileName()) logger.info("Parsed arguments and set up logging") #Try loop for slave logging ########################################## #Setup the stats, if requested ########################################## if config.attrib.has_key("stats"): startTime = time.time() startClock = getTotalCpuTime() stats = ET.Element("slave") else: stats = None ########################################## #The max time ########################################## maxTime = float(config.attrib["job_time"]) assert maxTime > 0.0 assert maxTime < sys.maxint ########################################## #Slave log file trapped from here on in ########################################## slaveFailed = False try: ########################################## #The next job ########################################## def globalTempDirName(job, depth): return job.getGlobalTempDirName() + str(depth) command, memoryAvailable, cpuAvailable, depth = job.followOnCommands[-1] defaultMemory = int(config.attrib["default_memory"]) defaultCpu = int(config.attrib["default_cpu"]) assert len(job.children) == 0 startTime = time.time() while True: job.followOnCommands.pop() ########################################## #Global temp dir ########################################## globalTempDir = makeSubDir(globalTempDirName(job, depth)) i = 1 while os.path.isdir(globalTempDirName(job, depth+i)): system("rm -rf %s" % globalTempDirName(job, depth+i)) i += 1 ########################################## #Old children, not yet deleted # #These may exist because of the lazy cleanup #we do ########################################## for childDir in listChildDirs(job.jobDir): logger.debug("Cleaning up old child %s" % childDir) system("rm -rf %s" % childDir) ########################################## #Run the job ########################################## if command != "": #Not a stub if command[:11] == "scriptTree ": ########################################## #Run the target ########################################## loadStack(command).execute(job=job, stats=stats, localTempDir=localTempDir, globalTempDir=globalTempDir, memoryAvailable=memoryAvailable, cpuAvailable=cpuAvailable, defaultMemory=defaultMemory, defaultCpu=defaultCpu, depth=depth) else: #Is another command system(command) ########################################## #Cleanup/reset a successful job/checkpoint ########################################## job.remainingRetryCount = int(config.attrib["try_count"]) system("rm -rf %s/*" % (localTempDir)) job.update(depth=depth, tryCount=job.remainingRetryCount) ########################################## #Establish if we can run another job ########################################## if time.time() - startTime > maxTime: logger.info("We are breaking because the maximum time the job should run for has been exceeded") break #Deal with children if len(job.children) >= 1: #We are going to have to return to the parent logger.info("No more jobs can run in series by this slave, its got %i children" % len(job.children)) break if len(job.followOnCommands) == 0: logger.info("No more jobs can run by this slave as we have exhausted the follow ons") break #Get the next job and see if we have enough cpu and memory to run it.. command, memory, cpu, depth = job.followOnCommands[-1] if memory > memoryAvailable: logger.info("We need more memory for the next job, so finishing") break if cpu > cpuAvailable: logger.info("We need more cpus for the next job, so finishing") break logger.info("Starting the next job") ########################################## #Finish up the stats ########################################## if stats != None: totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage() stats.attrib["time"] = str(time.time() - startTime) stats.attrib["clock"] = str(totalCpuTime - startClock) stats.attrib["memory"] = str(totalMemoryUsage) tempStatsFile = getTempStatsFile(jobTreePath) fileHandle = open(tempStatsFile + ".new", "w") ET.ElementTree(stats).write(fileHandle) fileHandle.close() os.rename(tempStatsFile + ".new", tempStatsFile) #This operation is atomic logger.info("Finished running the chain of jobs on this node, we ran for a total of %f seconds" % (time.time() - startTime)) ########################################## #Where slave goes wrong ########################################## except: #Case that something goes wrong in slave traceback.print_exc() logger.critical("Exiting
# This class defines key analytical routines for performing a 'gap-analysis' # on EYA-estimated annual energy production (AEP) and that from operational data. # Categories considered are availability, electrical losses, and long-term # gross energy. The main output is a 'waterfall' plot linking the EYA- # estimated and operational-estiamted AEP values. import pandas as pd import numpy as np from tqdm import tqdm import random from operational_analysis.toolkits import met_data_processing from operational_analysis.toolkits import filters from operational_analysis.toolkits.power_curve import functions from operational_analysis.toolkits import imputing from operational_analysis.toolkits import timeseries from operational_analysis import logged_method_call from operational_analysis import logging logger = logging.getLogger(__name__) class TurbineLongTermGrossEnergy(object): """ A serial (Pandas-driven) implementation of calculating long-term gross energy for each turbine in a wind farm. This module collects standard processing and analysis methods for estimating this metric. The method proceeds as follows: 1. Filter turbine data for normal operation 2. Calculate daily means of wind speed, wind direction, and air density from reanalysis products 3. Calculate daily sums of energy from each turbine 4. Fit daily data (features are atmospheric variables, response is turbine power) using a generalized additive model (GAM) 5. Apply model results to long-term atmospheric varaibles to calculate long term gross energy for each turbine A Monte Carlo approach is implemented to repeat the procedure multiple times to get a distribution of results, from which deriving uncertainty quantification for the long-term gross energy estimate. The end result is a table of long-term gross energy values for each turbine in the wind farm. Note that this gross energy metric does not back out losses associated with waking or turbine performance. Rather, gross energy in this context is what turbine would have produced under normal operation (i.e. excluding downtime and underperformance). Required schema of PlantData: - _scada_freq - reanalysis products ['merra2', 'erai', 'ncep2'] with columns ['time', 'u_ms', 'v_ms', 'windspeed_ms', 'rho_kgm-3'] - scada with columns: ['time', 'id', 'wmet_wdspd_avg', 'wtur_W_avg', 'energy_kwh'] """ @logged_method_call def __init__(self, plant, UQ = False, num_sim = 2000): """ Initialize turbine long-term gross energy analysis with data and parameters. Args: plant(:obj:`PlantData object`): PlantData object from which TurbineLongTermGrossEnergy should draw data. UQ:(:obj:`bool`): choice whether to perform ('Y') or not ('N') uncertainty quantification num_sim:(:obj:`int`): number of Monte Carlo simulations. Please note that this script is somewhat computationally heavy so the default num_sim value has been adjusted accordingly. """ logger.info("Initializing TurbineLongTermGrossEnergy Object") # Check that selected UQ is allowed if UQ == True: logger.info("Note: uncertainty quantification will be performed in the calculation") self.num_sim = num_sim elif UQ == False: logger.info("Note: uncertainty quantification will NOT be performed in the calculation") self.num_sim = None else: raise ValueError("UQ has to either be True (uncertainty quantification performed, default) or False (uncertainty quantification NOT performed)") self.UQ = UQ self._plant = plant # Set plant as attribute of analysis object self._turbs = self._plant._scada.df['id'].unique() # Store turbine names # Get start and end of POR days in SCADA self._por_start = format(plant._scada.df.index.min(), '%Y-%m-%d') self._por_end = format(plant._scada.df.index.max(), '%Y-%m-%d') self._full_por = pd.date_range(self._por_start, self._por_end, freq = 'D') # Define several dictionaries and data frames to be populated within this method self._scada_dict = {} self._daily_reanal_dict = {} self._model_dict = {} self._model_results = {} self._turb_lt_gross = {} self._scada_daily_valid = pd.DataFrame() # Set number of 'valid' counts required when summing data to daily values self._num_valid_daily = 60. / (pd.to_timedelta(self._plant._scada_freq).seconds/60) * 24 # Initially sort the different turbine data into dictionary entries logger.info("Processing SCADA data into dictionaries by turbine (this can take a while)") self.sort_scada_by_turbine() @logged_method_call def run(self,reanal_subset = ['erai', 'ncep2', 'merra2'], uncertainty_scada=0.005, wind_bin_thresh=(1, 3), max_power_filter=(0.8, 0.9), correction_threshold=(0.85, 0.95), enable_plotting = False, plot_dir = None): """ Perform pre-processing of data into an internal representation for which the analysis can run more quickly. Args: reanal_subset(:obj:`list`): Which reanalysis products to use for long-term correction uncertainty_scada(:obj:`float`): uncertainty imposed to scada data (used in UQ = True case only) max_power_filter(:obj:`tuple`): Maximum power threshold (fraction) to which the bin filter should be applied (default 0.85). This should be a tuple in the UQ = True case, a single value when UQ = False. wind_bin_thresh(:obj:`tuple`): The filter threshold for each bin (default is 2 m/s). This should be a tuple in the UQ = True case, a single value when UQ = False. correction_threshold(:obj:`tuple`): The threshold (fraction) above which daily scada energy data hould be corrected (default is 0.90). This should be a tuple in the UQ = True case, a single value when UQ = False. enable_plotting(:obj:`boolean`): Indicate whether to output plots plot_dir(:obj:`string`): Location to save figures Returns: (None) """ # Assign parameters as object attributes self.enable_plotting = enable_plotting self.plot_dir = plot_dir self._reanal = reanal_subset # Reanalysis data to consider in fitting # Check uncertainty types vars = [wind_bin_thresh, max_power_filter, correction_threshold] expected_type = float if self.UQ == False else tuple for var in vars: assert type(var) == expected_type, f"wind_bin_thresh, max_power_filter, correction_threshold must all be {expected_type} for UQ={self.UQ}" # Define relevant uncertainties, to be applied in Monte Carlo sampling self.uncertainty_wind_bin_thresh = np.array(wind_bin_thresh, dtype=np.float64) self.uncertainty_max_power_filter = np.array(max_power_filter, dtype=np.float64) self.uncertainty_correction_threshold = np.array(correction_threshold, dtype=np.float64) if self.UQ == True: self.uncertainty_scada = uncertainty_scada self.setup_inputs() # Loop through number of simulations, store TIE results for n in tqdm(np.arange(self.num_sim)): self._run = self._inputs.loc[n] # MC-sampled parameter in this function! logger.info("Filtering turbine data") self.filter_turbine_data() # Filter turbine data if self.enable_plotting: logger.info("Plotting filtered power curves") self.plot_filtered_power_curves(self.plot_dir) # MC-sampled parameter in this function! logger.info("Processing reanalysis data to daily averages") self.setup_daily_reanalysis_data() # Setup daily reanalysis products # MC-sampled parameter in this function! logger.info("Processing scada data to daily sums") self.filter_sum_impute_scada() # Setup daily scada data logger.info("Setting up daily data for model fitting") self.setup_model_dict() # Setup daily data to be fit using the GAM # MC-sampled parameter in this function! logger.info("Fitting model data") self.fit_model() # Fit daily turbine energy to atmospheric data logger.info("Applying fitting results to calculate long-term gross energy") self.apply_model_to_lt(n) # Apply fitting result to long-term reanalysis data if self.enable_plotting: logger.info("Plotting daily fitted power curves") self.plot_daily_fitting_result(self.plot_dir) # Setup daily reanalysis products # Log the completion of the run logger.info("Run completed") def setup_inputs(self): """ Create and populate the data frame defining the simulation parameters. This data frame is stored as self._inputs Args: (None) Returns: (None) """ if self.UQ == True: reanal_list = list(np.repeat(self._reanal, self.num_sim)) # Create extra long list of renanalysis product names to sample from inputs = { "reanalysis_product": np.asarray(random.sample(reanal_list, self.num_sim)), "scada_data_fraction": np.random.normal(1, self.uncertainty_scada, self.num_sim), "wind_bin_thresh": np.random.randint(self.uncertainty_wind_bin_thresh[0]*100, self.uncertainty_wind_bin_thresh[1]*100, self.num_sim) / 100., "max_power_filter": np.random.randint(self.uncertainty_max_power_filter[0]*100, self.uncertainty_max_power_filter[1]*100, self.num_sim) / 100., "correction_threshold": np.random.randint(self.uncertainty_correction_threshold[0]*100, self.uncertainty_correction_threshold[1]*100, self.num_sim) / 100., } self._plant_gross = np.empty([self.num_sim,1]) if self.UQ == False: inputs = { "reanalysis_product": self._reanal, "scada_data_fraction": 1, "wind_bin_thresh": self.uncertainty_wind_bin_thresh, "max_power_filter": self.uncertainty_max_power_filter, "correction_threshold": self.uncertainty_correction_threshold, } self._plant_gross = np.empty([len(self._reanal),1]) self.num_sim = len(self._reanal) self._inputs = pd.DataFrame(inputs) def sort_scada_by_turbine(self): """ Take raw SCADA data in plant object and sort into a dictionary using turbine IDs. Args: (None) Returns: (None) """ df = self._plant._scada.df dic = self._scada_dict # Loop through turbine IDs for t in self._turbs: # Store relevant variables in dictionary dic[t] = df[df['id'] == t].reindex(columns = ['wmet_wdspd_avg', 'wtur_W_avg', 'energy_kwh']) dic[t].sort_index(inplace=True) def filter_turbine_data(self): """ Apply a set of filtering algorithms to the turbine wind speed vs power curve to flag data not representative of normal turbine operation Args: n(:obj:`int`): The Monte Carlo iteration number Returns: (None) """ dic = self._scada_dict # Loop through turbines for t in self._turbs: turb_capac = dic[t].wtur_W_avg.max() max_bin = self._run.max_power_filter * turb_capac # Set maximum range for using bin-filter dic[t].dropna(subset = ['wmet_wdspd_avg', 'energy_kwh'], inplace = True) # Drop any data where scada wind speed or energy is NaN # Flag turbine energy data less than zero dic[t].loc[:,'flag_neg'] = filters.range_flag(dic[t].loc[:, 'wtur_W_avg'], below = 0, above = turb_capac) # Apply range filter dic[t].loc[:,'flag_range'] = filters.range_flag(dic[t].loc[:, 'wmet_wdspd_avg'], below = 0, above = 40) #
219: '마그카르고', 220: '꾸꾸리', 221: '메꾸리', 222: '코산호', 223: '총어', 224: '대포무노', 225: '딜리버드', 226: '만타인', 227: '무장조', 228: '델빌', 229: '헬가', 230: '킹드라', 231: '코코리', 232: '코리갑', 233: '폴리곤2', 234: '노라키', 235: '루브도', 236: '배루키', 237: '카포에라', 238: '뽀뽀라', 239: '에레키드', 240: '마그비', 241: '밀탱크', 242: '해피너스', 243: '라이코', 244: '앤테이', 245: '스이쿤', 246: '애버라스', 247: '데기라스', 248: '마기라스', 249: '루기아', 250: '칠색조', 251: '세레비' }) MOVES = defaultdict(lambda: '?', { 1: '전기쇼크', 2: '전광석화', 3: '할퀴기', 4: '불꽃세례', 5: '덩굴채찍', 6: '몸통박치기', 7: '잎날가르기', 8: '돌진', 9: '물대포', 10: '물기', 11: '막치기', 12: '연속뺨치기', 13: '김밥말이', 14: '파괴광선', 15: '핥기', 16: '악의파동', 17: '스모그', 18: '오물공격', 19: '메탈크로우', 20: '찝기', 21: '화염자동차', 22: '메가폰', 23: '날개치기', 24: '화염방사', 25: '기습', 26: '구멍파기', 27: '안다리걸기', 28: '크로스촙', 29: '사이코커터', 30: '환상빔', 31: '지진', 32: '스톤에지', 33: '냉동펀치', 34: '하트스탬프', 35: '방전', 36: '러스터캐논', 37: '쪼기', 38: '회전부리', 39: '냉동빔', 40: '눈보라', 41: '에어슬래시', 42: '열풍', 43: '더블니들', 44: '독찌르기', 45: '제비반환', 46: '드릴라이너', 47: '꽃보라', 48: '메가드레인', 49: '벌레의야단법석', 50: '독엄니', 51: '깜짝베기', 52: '베어가르기', 53: '거품광선', 54: '지옥의바퀴', 55: '태권당수', 56: '로킥', 57: '아쿠아제트', 58: '아쿠아테일', 59: '씨폭탄', 60: '사이코쇼크', 61: '돌떨구기', 62: '원시의힘', 63: '암석봉인', 64: '스톤샤워', 65: '파워젬', 66: '야습', 67: '섀도펀치', 68: '섀도크루', 69: '괴상한바람', 70: '섀도볼', 71: '불릿펀치', 72: '마그넷봄', 73: '강철날개', 74: '아이언헤드', 75: '파라볼라차지', 76: '스파크', 77: '번개펀치', 78: '번개', 79: '10만볼트', 80: '회오리', 81: '용의숨결', 82: '용의파동', 83: '드래곤크루', 84: '차밍보이스', 85: '드레인키스', 86: '매지컬샤인', 87: '문포스', 88: '치근거리기', 89: '크로스포이즌', 90: '오물폭탄', 91: '오물웨이브', 92: '더스트슈트', 93: '머드숏', 94: '뼈다귀치기', 95: '땅고르기', 96: '진흙폭탄', 97: '연속자르기', 98: '벌레먹음', 99: '시그널빔', 100: '시저크로스', 101: '니트로차지', 102: '불꽃튀기기', 103: '불대문자', 104: '소금물', 105: '물의파동', 106: '열탕', 107: '하이드로펌프', 108: '사이코키네시스', 109: '사이코브레이크', 110: '얼음뭉치', 111: '얼다바람', 112: '얼음숨결', 113: '흡수', 114: '기가드레인', 115: '불꽃펀치', 116: '솔라빔', 117: '리프블레이드', 118: '파워휩', 119: '튀어오르기', 120: '용해액', 121: '에어컷터', 122: '폭풍', 123: '깨트리다', 124: '풀베기', 125: '스피드스타', 126: '뿔찌르기', 127: '짓밟기', 128: '박치기', 129: '필살앞니', 130: '힘껏치기', 131: '누르기', 132: '잠자기', 133: '발버둥', 134: '열탕', 135: '하이드로펌프', 136: '김밥말이', 137: '김밥말이', 200: '연속자르기', 201: '벌레먹음', 202: '물기', 203: '기습', 204: '용의숨결', 205: '전기쇼크', 206: '스파크', 207: '안다리걸기', 208: '태권당수', 209: '불꽃세례', 210: '날개치기', 211: '쪼기', 212: '핥기', 213: '섀도크루', 214: '덩굴채찍', 215: '잎날가르기', 216: '머드숏', 217: '얼음뭉치', 218: '얼음숨결', 219: '전광석화', 220: '할퀴기', 221: '몸통박치기', 222: '막치기', 223: '풀베기', 224: '독찌르기', 225: '용해액', 226: '사이코커터', 227: '돌떨구기', 228: '메탈크로우', 229: '불릿펀치', 230: '물대포', 231: '튀어오르기', 232: '물대포', 233: '진흙뿌리기', 234: '사념의박치기', 235: '염동력', 236: '독침', 237: '거품', 238: '속여때리기', 239: '강철날개', 240: '불꽃엄니', 241: '바위깨기', 242: '변신', 243: '카운터', 244: '눈싸라기', 245: '인파이트', 246: '폭발펀치', 247: '기합구슬', 248: '오로라빔', 249: '차지빔', 250: '볼트체인지', 251: '와일드볼트', 252: '전자포', 253: '드래곤테일', 254: '눈사태', 255: '에어슬래시', 256: '브레이브버드', 257: '불새', 258: '모래지옥', 259: '락블레스트', 260: '엉겨붙기', 261: '벌레의저항', 262: '은빛바람', 263: '놀래키기', 264: '병상첨병', 265: '나이트헤드', 266: '아이언테일', 267: '자이로볼', 268: '헤비봄버', 269: '회오리불꽃', 270: '오버히트', 271: '기관총', 272: '풀묶기', 273: '에너지볼', 274: '신통력', 275: '미래예지', 276: '미러코트', 277: '역린', 278: '바크아웃', 279: '깨물어부수기', 280: '속임수', 281: '잠재파워' }) elif language == 'PT': POKEMON[205] = 'Forrestress' MOVES = defaultdict(lambda: '?', { 1: 'Trovoada de Choques', 2: 'Ataque Rápido', 3: 'Arranhão', 4: 'Brasa', 5: 'Chicote de Vinha', 6: 'Investida', 7: 'Folha Navalha', 8: 'Desmantelar', 9: "Revólver d'Água", 10: 'Mordida', 11: 'Pancada', 12: 'Tapa Duplo', 13: 'Embrulho', 14: 'Hiper-raio', 15: 'Lambida', 16: 'Pul<NAME>', 17: 'Nevoeiro de Fumaça', 18: 'Lodo', 19: 'Garra de Metal', 20: 'Agarramento Viciado', 21: 'Roda de Fogo', 22: 'Megachifre', 23: 'Ataque de Asa', 24: 'Lança-chamas', 25: 'Soco Enganador', 26: 'Cavar', 27: 'Rasteira', 28: '<NAME>', 29: 'Corte Psíquico', 30: 'Feixe Psíquico', 31: 'Terremoto', 32: '<NAME>', 33: 'Soco de Gelo', 34: 'Estampa de Coração', 35: 'Descarga', 36: 'Canhão de Flash', 37: 'Bicada', 38: '<NAME>', 39: 'Ra<NAME>', 40: 'Nevasca', 41: 'Golpe de Ar', 42: 'Onda de Calor', 43: 'Agulha Dupla', 44: 'Golpe Envenenado', 45: 'Ás dos Ares', 46: 'Furação', 47: 'Nevasca de Pétalas', 48: 'Megadreno', 49: 'Zumbido de Inseto', 50: 'Presa Venenosa', 51: 'Talho Noturno', 52: 'Talho', 53: 'Jato de Bolhas', 54: 'Submissão', 55: 'Golpe de Caratê', 56: 'Movimento Baixo', 57: 'Aqua Jato', 58: 'Aqua Cauda', 59: 'Bomba de Sementes', 60: 'Choque Psíquico', 61: 'Lançamento de Rocha', 62: 'Poder Ancestral', 63: 'Tumba de Rochas', 64: 'Deslize de Pedras', 65: 'Gema Poderosa', 66: 'Furtividade nas Sombras', 67: 'Soco Sombrio', 68: 'Garra Sombria', 69: 'Vento Ominoso', 70: 'Bola Sombria', 71: 'Soco Projétil', 72: 'Bomba Ímã', 73: 'Asa de Aço', 74: 'Cabeça de Ferro', 75: 'Ataque Parabólico', 76: 'Faísca', 77: 'Soco Trovoada', 78: 'Trovão', 79: 'Relâmpago', 80: 'Twister', 81: 'Sopro do Dragão', 82: 'Pulso do Dragão', 83: 'Garra de Dragão', 84: 'Voz Desarmante', 85: 'Beijo Drenante', 86: 'Clarão Deslumbrante', 87: 'Explosão Lunar', 88: 'Jogo Duro', 89: 'Corte-veneno', 90: 'Bomba de Lodo', 91: 'Onda de Lama', 92: 'Tiro de Sujeira', 93: 'Tiro de Lama', 94: 'Bastão de Osso', 95: 'Tremor', 96: 'Bomba de Lama', 97: 'Cortador de Fúria', 98: 'Picada', 99: 'Feixe Sinalizador', 100: 'Tesoura X', 101: 'Ataque de Chamas', 102: 'Rajada de Chamas', 103: 'Rajada de Fogo', 104: 'Salmoura', 105: "Pulso d'Água", 106: 'Escaldada', 107: "Jato d'Água", 108: 'Psíquico', 109: 'Ataque Psíquico', 110: 'Caco de Gelo', 111: 'Vento Congelante', 112: 'Respiração de Gelo', 113: 'Absorção', 114: 'Gigadreno', 115: 'Soco de Fogo', 116: 'Raio Solar', 117: 'Lâmina de Folha', 118: 'Chicote Poderoso', 119: 'Borrifada', 120: 'Ácido', 121: 'Cortador de Ar', 122: 'Furacão', 123: 'Quebra-telha', 124: 'Cortar', 125: 'Ataque Veloz', 126: 'Ataque de Chifre', 127: 'Pisotear', 128: 'Cabeçada', 129: 'Hiperpresa', 130: 'Pancada Brusca', 131: 'Pancada Corporal', 132: 'Descansar', 133: 'Insistência', 134: 'Escaldada', 135: "Jato d'Água", 136: 'Embrulho', 137: 'Embrulho', 200: 'Cortador de Fúria', 201: 'Picada', 202: 'Mordida', 203: 'Soco Enganador', 204: 'Sopro do Dragão', 205: 'Trovoada de Choques', 206: 'Faísca', 207: 'Rasteira', 208: 'Golpe de Caratê', 209: 'Brasa', 210: 'Ataque de Asa', 211: 'Bicada', 212: 'Lambida', 213: 'Garra Sombria', 214: 'Chicote de Vinha', 215: 'Folha Navalha', 216: 'Tiro de Lama', 217: 'Caco de Gelo', 218: 'Respiração de Gelo', 219: 'Ataque Rápido', 220: 'Arranhão', 221: 'Investida', 222: 'Pancada', 223: 'Cortar', 224: 'Golpe Envenenado', 225: 'Ácido', 226: 'Corte Psíquico', 227: 'Lançamento de Rocha', 228: 'Garra de Metal', 229: 'Soco Projétil', 230: "Revólver d'Água", 231: 'Borrifada', 232: "Revólver d'Água", 233: 'Tapa de Lama', 234: 'Cabeçada Zen', 235: 'Confusão', 236: 'Ferrão Venenoso', 237: 'Bolha', 238: 'Ataque Dissimulado', 239: 'Asa de Aço', 240: 'Presas de Fogo', 241: 'Esmagamento de Pedras', 242: 'Transformação', 243: 'Contra-atacar', 244: 'Neve em Pó', 245: 'Corpo-a-corpo', 246: 'Soco Dinâmico', 247: 'Explosão Focalizada', 248: 'Raio Aurora', 249: 'Carga de Raio', 250: 'Troca Elétrica', 251: 'Ataque Selvagem', 252: 'Canhão Zap', 253: 'Cauda do Dragão', 254: 'Avalanche', 255: 'Golpe de Ar', 256: 'Pássaro Bravo', 257: 'Ataque do Céu', 258: 'Fosso de Areia', 259: 'Explosão de Rocha', 260: 'Infestação', 261: 'Ira de Inseto', 262: 'Vento Prateado', 263: 'Abismar', 264: 'Feitiço', 265: 'Sombra Noturna', 266: 'Cauda de Ferro', 267: 'Girobola', 268: 'Golpe Pesado', 269: 'Chama Furacão', 270: 'Superaquecimento', 271: 'Projétil de Semente', 272: 'Nó de Grama', 273: 'Bola de Energia', 274: 'Extrassensorial', 275: 'Visão do Futuro', 276: 'Casaco Espelhado', 277: 'Ultraje', 278: 'Rosnado', 279: 'Mastigada', 280: 'Jogo Sujo', 281: 'Poder Oculto' }) else: raise ValueError('Language must be EN, DE, ES, FR, IT, JA, KO, PT, or ZH. You set {}.'.format(conf.LANGUAGE)) DAMAGE = defaultdict(lambda: '?', { 13: 60, 14: 150, 16: 80, 18: 50, 20: 35, 21: 60, 22: 90, 24: 70, 26: 100, 28: 50, 30: 70, 31: 120, 32: 100, 33: 50, 34: 40, 35: 65, 36: 100, 38: 60, 39: 90, 40: 130, 42: 95, 45: 55, 46: 80, 47: 110, 48: 25, 49: 90, 50: 35, 51: 50, 53: 45, 54: 60, 56: 40, 57: 45, 58: 50, 59: 55, 60: 65, 62: 70, 63: 70, 64: 80, 65: 80, 66: 50, 67: 40, 69: 50, 70: 100, 72: 70, 74:
<reponame>AutoCoinDCF/NEW_API<filename>api/graph/utility/graph_inquiry.py """ SQLGraphAPI utility level: 1.create a sql query 2.call executor to execute sql query and get raw response data 3.simply pre-process raw data (generate frontend-defined response code, extract useful data from raw data) """ import time # from .restful_executor import Executor from api.graph.utility.restful_executor import Executor from api.graph.utility.utility import create_response_dict from api.configs.MiddleEndConfig import CONFIG from api.graph.utility.utility import Match_list import api.configs.dynamic_config as dy_config from api.log.QBLog import QBLog logger = QBLog({ 'LOG_LEVEL': dy_config.LOG_LEVEL }) class UtilityBaseGraphInquiry(object): """ a base class of some basic method to get sql data and simply pre-process raw sqlgraph response data """ def __init__(self, user: str = None, password: str = None, ip: str = '10.60.1.142', port: int or str = 8123, database_name: str = 'default', graph_name: str = "relationGraph", event_graph_database_name: str = 'default', event_graph_graph_name: str = "relationGraph", ): self.database_name = database_name self.graph_name = graph_name self.event_graph_database_name = event_graph_database_name self.event_graph_graph_name = event_graph_graph_name self.executor = Executor(user=user, password=password, ip=ip, port=port, database_name=database_name) self.match_list = Match_list() def get_nodes(self, query: str) -> dict: """ execute sql query and return nodes from SQLgraph format JSON response :param query: sql string :return: a response dict with nodes """ response = self.executor.execute(query, format="JSON") if response.status_code != 200: return create_response_dict(code=-1, message=response.text) else: return create_response_dict(code=0, nodes=response.json()["data"]) def get_nodes_and_links(self, query: str) -> dict: """ execute sql query and return nodes and links from SQLgraph fromat Graph response :param query: sql string :return: a response dict with nodes and links """ response = self.executor.execute(query, format="Graph") if response.status_code != 200: return create_response_dict(code=-1, message=response.text) else: return create_response_dict(code=0, nodes=response.json()["nodes"], links=response.json()["links"]) def post_sqlGraph(self, query: str, insert=False): try: self.executor.execute(query, format="", insert=insert) except Exception as error: logger.error(f'error operating sqlgraph database: {error}') def post_related(self, query: str): try: response = self.executor.execute_related(query) return create_response_dict(code=0, nodes=response["nodes"], links=response["links"]) except Exception as error: logger.error(f'related search the failed error: {error}') return dict(code=-1) class GraphInquiry(UtilityBaseGraphInquiry): """ each utility method to get different data from sqlgraph """ def select_entity_detail_by_id(self, nodeIds: list) -> dict: """ a detailed inquire for entities :param nodeIds: the list of entity id :return: a list of entities id, name, type, image """ query = f""" select *, __v.1 as __v from vertex({self.database_name}.{self.graph_name}) where __v in {f"({str(nodeIds)[1:-1]})"} """ # print("%s %s" % ("select_entity_detail_by_id", query)) return self.get_nodes(query) def select_neighbor_by_id(self, nodeIds: list, className: list = None) -> dict: """ select all neighbors from nodeIds :param nodeIds: a list of nodeId :param className: if not None, neighbors' className should be in list className :return: all neighbors' nodes and links """ query = f""" select * from out({self.database_name}.{self.graph_name}, {nodeIds}, 1) {f"where vertexProperty(edge.2.2, 'className') in {className}" if className else ""} union all select {"*"} from in({self.database_name}.{self.graph_name}, {nodeIds}, 1) {f"where vertexProperty(edge.2, 'className') in {className}" if className else ""} """ # print("%s %s" % ("select_neighbor_by_id", query)) return self.get_nodes_and_links(query) def select_in_by_id(self, nodeIds: list, className: list = None) -> dict: """ select in neighbors from nodeIds :param nodeIds: a list of nodeId :param className: if not None, neighbors' className should be in list className :return: in neighbors' nodes and links """ query = f""" select * from in({self.database_name}.{self.graph_name}, {nodeIds}, 1) {f"where vertexProperty(edge.2, 'className') in {className}" if className else ""} """ # print("%s %s" % ("select_in_by_id", query)) return self.get_nodes_and_links(query) def select_out_by_id(self, nodeIds: list, className: list = None) -> dict: """ select out neighbors from nodeIds :param nodeIds: a list of nodeId :param className: if not None, neighbors' className should be in list className :return: out neighbors' nodes and links """ query = f""" select * from out({self.database_name}.{self.graph_name}, {nodeIds}, 1) {f"where vertexProperty(edge.2.2, 'className') in {className}" if className else ""} """ # print("%s %s" % ("select_out_by_id", query)) return self.get_nodes_and_links(query) def select_common_neighbor_by_id(self, nodeIds: list, className: list = None) -> dict: """ select common neighbor of given nodeIds (ignore the direction of edge). :param nodeIds: a list of nodeId :param className: if not None, neighbors' className should be in list className :return: common neighbors' nodes and links """ query = f""" select * from commonneighbors({self.database_name}.{self.graph_name},{nodeIds}) {f"where vertexProperty(edge.2.2, 'className') in {className}" if className else ""} """ # print("%s %s" % ("select_common_neighbor_by_id", query)) return self.get_nodes_and_links(query) def select_path(self, start_node: str, end_node: str, step_num=None) -> dict: """ select path , including from start_node to end_node, and from end_node to start_node :param start_node: one vertex id :param end_node: the other one vertex id :param step_num: the length of the path. if None, all paths (ignore length) could be selected :return: paths' nodes and links """ query = f""" select *, vertexProperty(e.1, 'meta_type') as from_meta_type, vertexProperty(e.2, 'meta_type') as to_meta_type from {f"path({self.event_graph_database_name}.{self.event_graph_graph_name}, v('{start_node}'), v('{end_node}'), {step_num})" if step_num else f"path({self.event_graph_database_name}.{self.event_graph_graph_name}, v('{start_node}'), v('{end_node}'))"} union all select *, vertexProperty(e.1, 'meta_type') as from_meta_type, vertexProperty(e.2, 'meta_type') as to_meta_type from {f"path({self.event_graph_database_name}.{self.event_graph_graph_name}, v('{end_node}'), v('{start_node}'), {step_num})" if step_num else f"path({self.event_graph_database_name}.{self.event_graph_graph_name}, v('{end_node}'), v('{start_node}'))"} """ # print("%s %s" % ("select_path", query)) return self.get_nodes_and_links(query) def select_edges_inside_nodes(self, nodeIds: list) -> dict: """ select edges between given nodes :param nodeIds: a list of nodeId :return: nodes and links between these nodes """ query = f""" select *, key(e) as __e, vertexProperty(e.1, 'meta_type') as from_meta_type, vertexProperty(e.2, 'meta_type') as to_meta_type, vertexProperty(e.1, 'Entity_type') as from_entity_type, vertexProperty(e.2, 'Entity_type') as to_entity_type from edge({self.database_name}.{self.graph_name}) where __e.1.1 in {f"({str(nodeIds)[1:-1]})"} and __e.2.1 in {f"({str(nodeIds)[1:-1]})"} """ # print("%s %s" % ("select_edges_inside_nodes", query)) return self.get_nodes_and_links(query) def select_edges_between_two_groups(self, nodeIds1: list, nodeIds2: list) -> dict: """ select edges between two groups of nodeIds :param nodeIds1: a list of nodeId :param nodeIds2: another list of nodeId :return: the input nodes, and links between these two groups """ query = f""" select * from edge({self.database_name}.{self.graph_name}) where __e.1.1 in {f"({str(nodeIds1)[1:-1]})"} and __e.2.1 in {f"({str(nodeIds2)[1:-1]})"} or __e.1.1 in {f"({str(nodeIds2)[1:-1]})"} and __e.2.1 in {f"({str(nodeIds1)[1:-1]})"} """ # print("%s %s" % ("select_edges_between_two_groups", query)) return self.get_nodes_and_links(query) def select_related_out(self, nodeIds: list, type_label: str = None): query = f""" select *, key(e) as edge, vertexProperty(e.1, 'meta_type') as from_meta_type, vertexProperty(e.2, 'meta_type') as to_meta_type, vertexProperty(e.1, 'Entity_type') as from_entity_type, vertexProperty(e.2, 'Entity_type') as to_entity_type from out({self.event_graph_database_name}.{self.event_graph_graph_name}, v({nodeIds}), 1) {f"where vertexProperty(e.2, 'meta_type') = '{type_label}'" if type_label else ""}""" print("%s %s" % ("select_related_out", query)) return self.post_related(query) def short_path_str(self, start_node: str, end_node: str, step: str): query = f""" select *, vertexProperty(e.1, 'meta_type') as from_meta_type, vertexProperty(e.2, 'meta_type') as to_meta_type, vertexProperty(e.1, 'Entity_type') as from_entity_type, vertexProperty(e.2, 'Entity_type') as to_entity_type from allshortestpath({self.event_graph_database_name}.{self.event_graph_graph_name},v('{start_node}'), v('{end_node}'), {step}) """ print("%s %s" % ("short_path_str", query)) return self.get_nodes_and_links(query) def short_path_list(self, start_node: list, end_node: list, step: str): query = f""" select *, vertexProperty(e.1, 'meta_type') as from_meta_type, vertexProperty(e.2, 'meta_type') as to_meta_type, vertexProperty(e.1, 'Entity_type') as from_entity_type, vertexProperty(e.2, 'Entity_type') as to_entity_type from allshortestpath({self.event_graph_database_name}.{self.event_graph_graph_name}, v({start_node}), v({end_node}), {step}) """ # print("%s %s" % ("short_path_list", query)) return self.get_nodes_and_links(query) def select_common(self, nodeIds: list, com_label: str = None) -> dict: query = f""" select *, vertexProperty(e.1, 'meta_type') as from_meta_type, vertexProperty(e.2, 'meta_type') as to_meta_type, vertexProperty(e.1, 'Entity_type') as from_entity_type, vertexProperty(e.2, 'Entity_type') as to_entity_type from commonneighbors({self.event_graph_database_name}.{self.event_graph_graph_name}, v({nodeIds}), 2) {f"where vertexProperty(e.2,'meta_type') = '{com_label}'" if com_label in ['entity', 'event', 'document'] else ""} """ # print("%s %s" % ("select_common", query)) return self.get_nodes_and_links(query) def entity_num(self, entity_id): query = f""" select * from out({self.event_graph_database_name}.{self.event_graph_graph_name}, '{entity_id}', 1) union all select * from in({self.event_graph_database_name}.{self.event_graph_graph_name}, '{entity_id}', 1) """ # print("%s %s" % ("entity_num", query)) return self.get_nodes_and_links(query) # 创建graph图集-------------------------------------------------start! def creat_vertex_table(self, vertex_table_name, field, databases_name=None): if databases_name: query_drop_database = f""" drop database if exists {databases_name} """ query_creat_database = f""" create database if not exists {databases_name} """ self.post_sqlGraph(query_drop_database) self.post_sqlGraph(query_creat_database) query_vertex_table = f""" create table if not exists {databases_name}.{vertex_table_name} ({field[0]} String Key) engine=V """ self.post_sqlGraph(query_vertex_table) def creat_edge_table(self, databases_name, edge_table_name, field, rely): query = f""" create table if not exists {databases_name}.{edge_table_name} ( {field[0]} VS({rely[0]}), {field[1]} VD({rely[0]}), {field[2]} UInt32 ) engine=E """ self.post_sqlGraph(query) def insert_vertex_table(self, vertex_table_name, idlist): val = ['(%s)' % id for id in idlist] query_s = f""" insert into {vertex_table_name} values {val} """ query = query_s.replace("[", "").replace("]", "").replace("'(", "('").replace(")'", "')") self.post_sqlGraph(query, insert=True) def insert_edge_table(self, edge_table_name, idlist): val = [(k[0], k[1], k[2]) for k in idlist] query_s = f""" insert into {edge_table_name} values {val} """ query = query_s.replace("[", "").replace("]", "") self.post_sqlGraph(query, insert=True) def creat_graph(self, graph_name, rely_edge_name): query = f""" create symmetric graph {graph_name} populate \ as edgeGroup({rely_edge_name[0]}); """ self.post_sqlGraph(query) self.post_sqlGraph(f'load property {graph_name}') def drop_databases(self, databases_name): self.post_sqlGraph(f'drop database {databases_name}') # --------------------------------------------------------------end! def sql_community(self,
'resource_id': 'resourceId', }, 'location_map': { 'project_id': 'path', 'location_id': 'path', 'agent_id': 'path', 'resource_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__provider_project_agent_resource_get ) def __provider_project_agent_resource_inspect( self, project_id, location_id, agent_id, resource_id, **kwargs ): """Inspect provider/agent.resource # noqa: E501 action inspect # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.provider_project_agent_resource_inspect(project_id, location_id, agent_id, resource_id, async_req=True) >>> result = thread.get() Args: project_id (str): Project Id location_id (str): Location Id agent_id (str): Agent Id resource_id (str): resourceId Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: bool, date, datetime, dict, float, int, list, str, none_type If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['project_id'] = \ project_id kwargs['location_id'] = \ location_id kwargs['agent_id'] = \ agent_id kwargs['resource_id'] = \ resource_id return self.call_with_http_info(**kwargs) self.provider_project_agent_resource_inspect = _Endpoint( settings={ 'response_type': (bool, date, datetime, dict, float, int, list, str, none_type,), 'auth': [ 'BearerAuth' ], 'endpoint_path': '/provider/{locationId}/project/{projectId}/agent/{agentId}/resource/{resourceId}/actions/inspect', 'operation_id': 'provider_project_agent_resource_inspect', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'project_id', 'location_id', 'agent_id', 'resource_id', ], 'required': [ 'project_id', 'location_id', 'agent_id', 'resource_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'project_id': (str,), 'location_id': (str,), 'agent_id': (str,), 'resource_id': (str,), }, 'attribute_map': { 'project_id': 'projectId', 'location_id': 'locationId', 'agent_id': 'agentId', 'resource_id': 'resourceId', }, 'location_map': { 'project_id': 'path', 'location_id': 'path', 'agent_id': 'path', 'resource_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__provider_project_agent_resource_inspect ) def __provider_project_agent_resource_list( self, project_id, location_id, agent_id, **kwargs ): """List provider/agent.resource # noqa: E501 List provider/agent.resource # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.provider_project_agent_resource_list(project_id, location_id, agent_id, async_req=True) >>> result = thread.get() Args: project_id (str): Project Id location_id (str): Location Id agent_id (str): Agent Id Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: [ProviderAgentResource] If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['project_id'] = \ project_id kwargs['location_id'] = \ location_id kwargs['agent_id'] = \ agent_id return self.call_with_http_info(**kwargs) self.provider_project_agent_resource_list = _Endpoint( settings={ 'response_type': ([ProviderAgentResource],), 'auth': [ 'BearerAuth' ], 'endpoint_path': '/provider/{locationId}/project/{projectId}/agent/{agentId}/resource', 'operation_id': 'provider_project_agent_resource_list', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'project_id', 'location_id', 'agent_id', ], 'required': [ 'project_id', 'location_id', 'agent_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'project_id': (str,), 'location_id': (str,), 'agent_id': (str,), }, 'attribute_map': { 'project_id': 'projectId', 'location_id': 'locationId', 'agent_id': 'agentId', }, 'location_map': { 'project_id': 'path', 'location_id': 'path', 'agent_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__provider_project_agent_resource_list ) def __provider_project_agent_resource_recreate( self, project_id, location_id, agent_id, resource_id, **kwargs ): """Recreate provider/agent.resource # noqa: E501 action recreate # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.provider_project_agent_resource_recreate(project_id, location_id, agent_id, resource_id, async_req=True) >>> result = thread.get() Args: project_id (str): Project Id location_id (str): Location Id agent_id (str): Agent Id resource_id (str): resourceId Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ProviderAgentResource If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['project_id'] = \ project_id kwargs['location_id'] = \ location_id kwargs['agent_id'] = \ agent_id kwargs['resource_id'] = \ resource_id return self.call_with_http_info(**kwargs) self.provider_project_agent_resource_recreate = _Endpoint( settings={ 'response_type': (ProviderAgentResource,), 'auth': [ 'BearerAuth' ], 'endpoint_path': '/provider/{locationId}/project/{projectId}/agent/{agentId}/resource/{resourceId}/actions/recreate', 'operation_id': 'provider_project_agent_resource_recreate', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'project_id', 'location_id', 'agent_id', 'resource_id', ], 'required': [ 'project_id', 'location_id', 'agent_id', 'resource_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'project_id': (str,), 'location_id': (str,), 'agent_id': (str,), 'resource_id': (str,), }, 'attribute_map': { 'project_id': 'projectId', 'location_id': 'locationId', 'agent_id': 'agentId', 'resource_id': 'resourceId', }, 'location_map': { 'project_id': 'path', 'location_id': 'path', 'agent_id': 'path', 'resource_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__provider_project_agent_resource_recreate ) def __provider_project_agent_service_get( self, project_id, location_id, agent_id, service_id, **kwargs ): """Get provider/agent.service # noqa: E501 Get provider/agent.service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.provider_project_agent_service_get(project_id, location_id, agent_id, service_id, async_req=True) >>> result = thread.get() Args: project_id (str): Project Id location_id (str): Location Id agent_id (str): Agent Id service_id (str): serviceId Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that
import logging import json import base64 from django.core.paginator import Paginator # Create your views here. from rest_framework import viewsets, status from rest_framework.decorators import action, detail_route from rest_framework.generics import get_object_or_404 from rest_framework.response import Response from django.http import HttpResponse from core.authors.models import Follow from core.authors.util import get_author_id, get_author_url from core.authors.friends_util import are_friends, get_friends from core.posts.constants import DEFAULT_POST_PAGE_SIZE from core.posts.create_posts_view import handle_posts from core.posts.models import Posts, Comments from core.posts.serializers import PostsSerializer, CommentsSerializer from core.posts.util import can_user_view, can_external_user_view, add_page_details_to_response, merge_posts_with_github_activity, merge_posts from core.github_util import get_github_activity from core.servers.SafeServerUtil import ServerUtil from core.hostUtil import is_external_host logger = logging.getLogger(__name__) COMMENT_NOT_ALLOWED = 'You are not allowed to comment on this post' COMMENT_ADDED = 'Your comment has been added' POST_NOT_VISIBLE = "This post is not visible to the current user" def create_comment(request, pk=None): post = get_object_or_404(Posts, pk=pk) is_server = ServerUtil.is_server(request.user) if (not is_server and not can_user_view(request.user, post)): return Response(status=status.HTTP_403_FORBIDDEN) if post: data = request.data comment = data.get("comment", None) if (isinstance(comment, str)): comment = json.loads(comment) author = comment.get('author', None) author_id = author['id'] try: su = ServerUtil(authorUrl=author_id) if (is_server and not (su.is_valid() and su.should_share_posts() and can_external_user_view(author_id, post))): return Response({ "query": "addComment", "success": False, "message": POST_NOT_VISIBLE, }, status=status.HTTP_403_FORBIDDEN) except Exception as e: print(e) return Response({ "query": "addComment", "success": False, "message": POST_NOT_VISIBLE, }, status=status.HTTP_403_FORBIDDEN) comment['author'] = author_id serializer = CommentsSerializer(data=comment) if serializer.is_valid(): post.comments.create(**serializer.validated_data) return Response({ "query": "addComment", "success": True, "message": COMMENT_ADDED }, status=status.HTTP_200_OK) else: return Response({ "query": "addComment", "success": False, "message": COMMENT_NOT_ALLOWED, }, status=status.HTTP_403_FORBIDDEN) else: return Response(status=status.HTTP_404_NOT_FOUND) def list_comments(request, pk=None): size = int(request.query_params.get("size", 5)) queryPage = int(request.query_params.get('page', 0)) if size < 1 or queryPage < 0 or size > 100: return Response({ "success": False, "message": "The query parameters were invalid", "query": "comments" }, status=status.HTTP_400_BAD_REQUEST) post = get_object_or_404(Posts, pk=pk) if not can_user_view(request.user, post): return Response({ "success": False, "message": "You are not authorized to view this post's comments.", "query": "comments" }, status=status.HTTP_403_FORBIDDEN) comments = Comments.objects.filter(post=post) try: paginator = Paginator(comments, size) page = paginator.page(queryPage + 1) serializer = CommentsSerializer(page, many=True, context={'request': request}) comments_to_return = serializer.data except: comments_to_return = [] data = { "comments": comments_to_return, "query": "comments", "count": len(comments), "size": size } if (len(comments_to_return) > 0): add_page_details_to_response(request, data, page, queryPage) return Response(data) class PostsViewSet(viewsets.ModelViewSet): queryset = Posts.objects.filter(visibility="PUBLIC").order_by('-published') serializer_class = PostsSerializer def retrieve(self, request, pk): print("PostsViewSet retrieve:", request, pk) if ServerUtil.is_server(request.user): xUser = request.META.get("HTTP_X_REQUEST_USER_ID") if not xUser: return Response("Foreign node failed to provide required X-Header.", status=400) data = { "author": { "url": xUser } } return self.__do_a_get_post(request.user, data, pk) try: post = Posts.objects.get(pk=pk) except: return Response({ "success": False, "message": "No post was found with that ID", "query": "post" }, status=404) if not can_user_view(request.user, post): return Response({ "success": False, "message": "You are not authorized to view this post.", "query": "post" }, status=status.HTTP_403_FORBIDDEN) serializer = PostsSerializer(post, context={'request': request}) return Response({ "query": "posts", "count": 1, "size": 1, "posts": [serializer.data] }) @action(detail=True, url_path='image', methods=["GET"]) def image(self, request, pk): try: post = Posts.objects.get(pk=pk) except: return Response({ "success": False, "message": "No post was found with that ID", "query": "getImage" }, status=404) if not can_user_view(request.user, post): return Response({ "success": False, "message": "You are not authorized to view this post.", "query": "post" }, status=status.HTTP_403_FORBIDDEN) if (post.visibility == "PUBLIC"): if ("," in post.content): data = post.content.split(",")[1] else: data = post.content data = data.encode() data = base64.b64decode(data) else: data = post.content return HttpResponse(data, content_type=post.contentType.split(";")[0]) def update(self, request, pk): try: post = Posts.objects.get(pk=pk) except: return Response({ "success": False, "message": "No post was found with that ID", "query": "updatePost" }, status=404) if ((not request.user.is_authenticated) or request.user.author != post.author): return Response({ "success": False, "message": "You are not authorized to edit this post.", "query": "updatePost" }, status=status.HTTP_403_FORBIDDEN) return super().update(request, pk) def list(self, request, *args, **kwargs): print("hit posts list endpoint:", request, args, kwargs) size = int(request.query_params.get("size", 5)) queryPage = int(request.query_params.get('page', 0)) if size < 1 or queryPage < 0 or size > 100: return Response({ "success": False, "message": "The query parameters were invalid", "query": "posts" }, status=status.HTTP_400_BAD_REQUEST) try: qs_posts = self.get_queryset().exclude(unlisted=True) paginator = Paginator(qs_posts, size) page = paginator.page(queryPage + 1) serializer = self.get_serializer(page, many=True) pages_to_return = serializer.data except Exception as e: print(e) pages_to_return = [] data = { "posts": pages_to_return, "query": "posts", "count": len(qs_posts), "size": size } if len(pages_to_return) > 0: add_page_details_to_response(request, data, page, queryPage) return Response(data, status=200) def __do_a_get_post(self, user, data, pk): try: post = Posts.objects.get(pk=pk) except: return Response({ "success": False, "message": "No post was found with that ID", "query": "getPost" }, status=404) visibility = post.visibility requestingAuthorUrl = data.get("author", {}).get("url", None) if not requestingAuthorUrl: return Response("You must specify the URL of the author who is requesting the post.", status=400) postAuthorUrl = get_author_url(str(post.author.pk)) sUtil = ServerUtil(authorUrl=requestingAuthorUrl) if not sUtil.is_valid(): return Response("Could not find a foreign node matching the reqesting author's url.", status=400) # TODO block pictures or posts based on content type if not sUtil.should_share_posts(): return Response("This node is currently not sharing posts with the requesting foreign node.", status=400) if visibility == "PUBLIC": serializer = PostsSerializer(post) return Response({ "query": "posts", "count": 1, "size": 1, "posts": [serializer.data] }) # If they are direct friends they can still see a FOAF post if visibility == "FRIENDS" or visibility == "FOAF": local, remote_follow = are_friends(postAuthorUrl, requestingAuthorUrl) success, remote = sUtil.check_direct_friendship(requestingAuthorUrl, postAuthorUrl) if not success: return Response("Failed to communicate with external server to check friendship.", status=500) if not remote: remote_follow.delete() elif local: # remote = true, local = true, can respond with post return Response({ "query": "posts", "count": 1, "size": 1, "posts": [serializer.data] }) # If we reach here, we know that they are not direct friends # We need to find all the friends of the post writer # and then ask the remote server if any of those friends are friends with the requesting author if visibility == "FOAF": postAuthorFriends = get_friends(postAuthorUrl) success, foafs = sUtil.check_at_least_one_friend(requestingAuthorUrl, postAuthorFriends) if not success: return Response("Failed to communicate with external server to check foaf-ship.", status=500) if foafs: return Response({ "query": "posts", "count": 1, "size": 1, "posts": [serializer.data] }) if visibility == "PRIVATE": print("UGHHH") return Response({ "query": "posts", "count": 0, "size": 1, "posts": [] }) # For FOAF, test you can actually hand out a post def post(self, request, pk): user = request.user data = request.data if not user.is_authenticated or not ServerUtil.is_server(user): return Response("You must be authenticated as a foreign node to access this endpoint.", status=401) query = request.data.get("query", False) if not query and not query == "getPost": return Response("This endpoint only accepts the 'getPost' query type.", status=400) if not data.get("postid", "") == pk or not data.get("url", "").endswith(pk): return Response("You must ensure that the post IDs and urls match.", status=400) return self.__do_a_get_post(user, data, pk) @action(detail=True, url_path='comments', methods=["GET", "POST"]) def comments(self, request, pk=None): print(request) if request.method == "GET": return list_comments(request, pk=pk) elif ServerUtil.is_server(request.user): print("This is a server") xUser = request.META.get("HTTP_X_REQUEST_USER_ID") postUrl = request.data.get("post", None) if not postUrl: return Response("You failed to specify the 'post' of the query.", 400) pk = postUrl.split("posts/")[1] post = get_object_or_404(Posts, pk=pk) commentData = request.data.get("comment", {}) authorData = commentData.get("author", {}) authorUrl = authorData.get("id", None) if not authorUrl: return Response("You failed to specify the author's id.", 400) serializer = CommentsSerializer(data=commentData) commentData['author'] = authorUrl if serializer.is_valid(): post.comments.create(**serializer.validated_data) return Response("It's created hopefully", 200) else: return Response("Some error who knows", 400) elif request.method == "POST": return create_comment(request, pk=pk) else: return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) @action(detail=True, url_path='update', methods=["POST"]) def update_post(self, request, pk=None): try: post = Posts.objects.get(pk=pk) if (post.author != request.user.author): return Response({ "query": "updatePost", "success": False, "message": "You must be authenticated to update a post" }, status=status.HTTP_403_FORBIDDEN) Posts.objects.filter(pk=pk).update(**json.loads(request.data["postData"])) return Response({ "query": "updatePost", "success": True, "message": "Your post has been updated." }) except Exception as e: return Response({ "query": "updatePost", "success": False, "message": str(e) }, status=400) @action(detail=False, url_path='createExternalComment', methods=["POST"]) def create_external_comment(self, request): if (not request.user.is_authenticated): return Response({ "query": "createExternalComment", "message": "You must be authenticated", "success": False }, status=403) try: postUrl = request.data["postUrl"] authorUrl = get_author_url(str(request.user.author.pk)) sUtil = ServerUtil(postUrl=postUrl) if not sUtil.is_valid(): return Response("No foreign node with the base url: "+postUrl, status=404) data = request.data comment = data.get("comment", None) if (isinstance(comment, str)): comment = json.loads(comment) success, res = sUtil.create_comment(postUrl.split("/posts/")[1], authorUrl, comment, postUrl) if not success: return Response("Failed to post foreign comment: "+postUrl, status=500) return Response(res) except Exception
import random import feedparser import time from DiscordCharacters import WoDCharacter import os.path from discord import message def splitstr(text,length): return [text[i:i+length] for i in range(0, len(text), length)] # list of feeds to pull down rss_feed_list = [ "https://www.reddit.com/r/WhiteWolfRPG/new.rss" , "http://theonyxpath.com/feed/" , "https://www.twitrss.me/twitter_user_to_rss/?user=theonyxpath" ] DiceLimit = 50 #Arbitrary UpdateFrequency = 1800 #rss update frequency, in seconds R20BNServer = '239041359384805377' Schrecknet = '272633168178446337' Default_Channel = '239041359384805377' Bot_Update_Channel = '277843135193677824' Sheets_Channel = '293805465647841280' # '276364607906643968' # Deprecated? Announce_Channel = '239041359384805377' #'239050929716985856' # Dammit badger! :P Gamelist_Channel = '270962496653885451' Application_Channel = '277689369488523264' Appquestion_Channel = '277689512992309250' Voting_Channel = '278873402209468416' Character_List = '293820268449628161' # This is a message ID! rss_chan = ['271771293739778058', '270382116322148353'] class Bot(): def __init__(self): self.last_updated = time.time()-UpdateFrequency self.sheets = {} for filename in os.listdir('../sheets'): if filename.endswith(".txt"): f = open('../sheets/'+filename) self.sheets[filename.replace('.txt','')] = WoDCharacter(f.read()) f.close() def log(self,message): logfile = open('log.txt','a+') logtext = str(message.timestamp) + ' ' + str(message.author) + ' [' + str(message.channel) + '] ' + message.content + '\n' logfile.write(logtext) logfile.close() def exploderoll(self, faces): results = [] roll = random.randrange(1,faces+1) results.append(roll) if roll==faces: results.append(self.exploderoll(faces)) return results def rolldice(self, amount, faces, diff = None, botch = None, explode = False, modifier = 0, doubler = False): response = "(" total = 0 roll_results = [] if explode: # Exploding die rolled recursively, like this: for _ in range(amount): for result in self.exploderoll(faces): roll_results.append(result) else: # Non-exploding dice rolled like this! for _ in range(amount): roll_results.append(random.randrange(1,faces+1)) if diff is not None: success = 0 for result in roll_results: if result>=diff: if doubler and result==faces: success += 2 response = response + "__" + str(result) + "__ " else: response = response + str(result) + " " success += 1 elif botch is not None and result <= botch: response = response + "**" + str(result) + "** " success -= 1 else: response = response + "~~" + str(result) + "~~ " response = response + ", " + str(success) + " successes)" total = success else: for result in roll_results: response = response + str(result) + " " total += result total += modifier if modifier == 0: modifier = "" return response + ") " + str(modifier) + " = " + str(total) def parse_dieroll(self,exp,target=None): try: if target!=None: target += ": `" else: target = "" comment = "" amount = 0 faces = 0 diff = None botch = None explode = False doubler = False modifier = 0 the_command = "" the_command = exp.replace('!roll ','').replace('!r ','') if the_command.find('#') != -1: both = the_command.partition('#') the_command = both[0] comment = both[2] the_command = the_command.replace(' ','') parsed = the_command # print("DEBUG: Full message is " + message.content) # print("DEBUG: " + parsed) if parsed.find('s') != -1: both = parsed.split('s') doubler = True parsed = both[0] if parsed.find('-') != -1: both = parsed.split('-') modifier -= int(both[1]) parsed = both[0] if parsed.find('+') != -1: both = parsed.split('+') modifier += int(both[1]) parsed = both[0] if parsed.find('f') != -1: both = parsed.split('f') botch = int(both[1]) parsed = both[0] if parsed.find('>=') != -1: both = parsed.split('>=') diff = int(both[1]) parsed = both[0] if parsed.find('>') != -1: both = parsed.split('>') diff = int(both[1])+1 parsed = both[0] if parsed.find('d') != -1: both = parsed.split('d') faces = int(both[1]) amount = int(both[0]) else: return "I don't see what I should roll." return target + the_command + '`' + comment + ' = ' + self.rolldice(amount, faces, diff, botch, explode, modifier, doubler) except Exception as e: print("DEBUG: Couldn't read roll [malformed] : " + str(e)) return "I didn't understand this roll request." pass def dice(self, message): self.log(message) the_command = message.content.replace('!roll ','').replace('!r ','') return self.parse_dieroll(the_command, message.author.mention) def check_role_sufficiency(self,member,role): roles = member.server.role_hierarchy roles.reverse() targetrole = None for r in roles: if r.name == str(role) : targetrole = r if targetrole == None: return None elif targetrole <= member.top_role: return True else: return False def find_role(self,server,role): roles = server.role_hierarchy roles.reverse() targetrole = None for r in roles: if r.name == str(role) : targetrole = r return targetrole def schrecknetpost(self, message): self.log(message) schmsg = message.content.partition(' ')[2] schname = "**" + schmsg.partition(' ')[0].replace('*','') + ":** " schmsg = schmsg.partition(' ')[2] return Schrecknet, schname+schmsg def give_role(self, message): self.log(message) if message.server == None: return "Please use this command on the server.", None try: target = message.mentions[0] except: return "I don't see who I should promote/demote.", None try: parts = message.content.split(' ') target_role = int(parts[2]) except: return "The role to be given seems invalid.", None roles = message.server.role_hierarchy roles.reverse() AST = None for role in roles: if role.name == "Assistant Storyteller" : AST = role if AST == None : return "It seems the Assistant Storyteller role no longer exists???", None if message.author.top_role<AST: return "Only staff can promote/demote.", None if message.author.top_role<=roles[target_role]: return "You cannot promote/demote to your top role or higher", None # Here we know the requester has the rights to promote the requestee return roles[target_role], target def create_character(self,message): self.log(message) parts = message.content.split(' ') if len(parts)<2: return "Name missing.", None name = parts[1].lower() if os.path.isfile("../sheets/"+name+".txt"): return "This name is already taken. Pick another!", None character = WoDCharacter(name,message.author.id) if len(parts)>2: character.template(parts[2].capitalize()) self.character_update(character) self.sheets[name] = character return "Character created successfully.", name.capitalize() def character_update(self,character): charfile = open('../sheets/'+character.name+'.txt','w') charfile.write(str(character)) charfile.close() def character_log(self,character,message,response): charlog = open('../sheets/'+character.name+'.log','a') charlog.write(str(message.timestamp) + ' ' + str(message.author) + ' [' + str(message.channel) + '] ' + message.content + '\n') charlog.write("[RESPONSE] " + response + "\n") charlog.close() def character_handling_st(self,message, R20server=None): response, privacy = self.character_handling(message,R20server) try: name = message.content.split(' ')[1] character = self.sheets[name] st = character.st return response, st except: pass return response, None def extract_label(self,sheetpiece): piece = sheetpiece.partition(' ')[0] labelmark = None if "\"" in piece: labelmark = "\"" elif "'" in piece: labelmark = "'" if labelmark is not None: pieces = sheetpiece.split(labelmark) label = pieces[1] del pieces[0] del pieces[0] value = labelmark.join(pieces) else: label = piece value = sheetpiece.partition(' ')[2] return label, value def character_handling(self,message, R20server=None): self.log(message) parts = message.content.split(' ') private = False del parts[0] if len(parts)<1: return "Name missing.", private name = parts[0].lower() del parts[0] if len(parts)<1: return "Command missing.", private command = parts[0].lower() del parts[0] character = None for sheet in self.sheets.keys(): if sheet == name: character = self.sheets[sheet] if character == None: return "There is no character with that name.", private silly = R20server.get_member(message.author.id) if message.author.id != character.owner and True != self.check_role_sufficiency(silly, "Assistant Storyteller"): return "You are neither the owner of this character, nor Staff.", private sheet_object = None if len(parts)>=1: sheet_object = " ".join(parts) response = "There was some error." if command=="set": try: thing, value = self.extract_label(sheet_object) thing = thing.capitalize() except: return "There is something wrong with your labels.", private if thing == "Name": return "Character renaming is not currently supported.", private # if os.path.isfile("../sheets/"+value+".txt"): # return "This name is already taken. Pick another!", private # character.name = value # os.rename("../sheets/"+name+".txt") # # Some action to change the character list? # Return "Character renamed." elif thing == "Owner": return "Character owner transfer is not currently supported.", private elif thing == "St" or thing == "Storyteller": try: character.st = message.mentions[0].id except: return "Please mention the ST you wish to set for this character.", private response = "Storyteller set!" else: response = character.set_property(thing,value) self.character_update(character) self.character_log(character, message, response) elif command=="max": try: thing, value = self.extract_label(sheet_object) thing = thing.capitalize() except: return "There is something wrong with your labels.", private try: character.set_resource_capacity(thing,value) self.character_update(character) response = "New maximum for " + sheet_object.split(' ')[0] + " set on " + character.name.capitalize() + "!" self.character_log(character, message, response) except: return "Could not set capacity. Check spelling or report an error!", private elif command=="get" or command=="show": if sheet_object.lower() == "sheet": private = True return character.display(), private if sheet_object.lower() == "health": return character.show_health(), private if sheet_object.lower() == "status": return character.status(), private return character.get_property(sheet_object.strip("'\"")), private elif command=="use": try: thing, value = self.extract_label(sheet_object) thing = thing.capitalize() except: return "There is something wrong with your labels.", private if len(value)>=1: try: response = character.consume_resource(thing, value) except: return "Error consuming this resource.",
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function import sys import numpy as np import scipy.optimize import matplotlib.pyplot as plt import cv2 import ellipse DEBUG_IMAGES = [] def debug_show(name, src): global DEBUG_IMAGES filename = 'debug{:02d}_{}.png'.format(len(DEBUG_IMAGES), name) cv2.imwrite(filename, src) h, w = src.shape[:2] fx = w/1280.0 fy = h/700.0 f = 1.0/np.ceil(max(fx, fy)) if f < 1.0: img = cv2.resize(src, (0, 0), None, f, f, cv2.INTER_AREA) else: img = src.copy() DEBUG_IMAGES.append(img) def translation(x, y): return np.array([[1, 0, x], [0, 1, y], [0, 0, 1]], dtype=float) def rotation(theta): c = np.cos(theta) s = np.sin(theta) return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=float) def perspective_warp(a, b): return np.array([[1, 0, 0], [0, 1, 0], [a, b, 1]], dtype=float) def slant(sx): return np.array([[1, sx, 0], [0, 1, 0], [0, 0, 1]], dtype=float) def softmax(x, k=1.0): b = x.max() return np.log( np.exp(k*(x-b)).sum() ) / k + b def skewed_widths(contours, H): xvals = [] for c in contours: pts = cv2.perspectiveTransform(c, H) x = pts[:,:,0] xvals.append( x.max() - x.min() ) xvals = np.array(xvals) return softmax(xvals, 0.1) def centered_warp(u0, v0, a, b): return np.dot(translation(u0, v0), np.dot(perspective_warp(a, b), translation(-u0, -v0))) def warp_containing_points(img, pts, H, border=4, shape_only=False): ''' display = img.copy() for pt in pts.reshape((-1,2)).astype(int): cv2.circle(display, tuple(pt), 4, (255, 0, 0), -1, cv2.LINE_AA) debug_show('warp', display) ''' pts2 = cv2.perspectiveTransform(pts, H) x0, y0, w, h = cv2.boundingRect(pts2) print('got bounding rect', x0, y0, w, h) T = translation(-x0+border, -y0+border) TH = np.dot(T, H) if shape_only: return (h+2*border, w+2*border), TH else: dst = cv2.warpPerspective(img, TH, (w+2*border, h+2*border), borderMode=cv2.BORDER_REPLICATE) return dst, TH def conic_area_discrepancy(conics, x, H, opt_results=None): areas = [] for conic in conics: cx = ellipse.conic_transform(conic, H) k, ab = ellipse.conic_scale(cx) if np.isinf(ab): areas.append(1e20) else: areas.append(ab) areas = np.array(areas) areas /= areas.mean() # rescale so mean is 1.0 areas -= 1 # subtract off mean rval = 0.5*np.dot(areas, areas) if opt_results is not None: if not opt_results or rval < opt_results[-1][-1]: opt_results.append( (x, H, rval) ) return rval def threshold(img): if len(img.shape) > 2: img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mean = img.mean() if mean < 100: img = 255-img return cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 101, 21) def get_contours(img): work = threshold(img) debug_show('threshold', work) contours, hierarchy = cv2.findContours(work, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) return contours, hierarchy def get_conics(img, contours, hierarchy, abs_area_cutoff=0.0001, mean_area_cutoff=0.15): hierarchy = hierarchy.reshape((-1, 4)) conics = [] used_contours = [] areas = [] okcontours = [] allchildren = [] pts = np.empty((0,1,2), dtype='float32') centroid_accum = np.zeros(2) total_area = 0.0 centroids = [] abs_area_cutoff *= img.shape[0] * img.shape[1] print('abs_area_cutoff = ',abs_area_cutoff) for i, (c, h) in enumerate(zip(contours, hierarchy.reshape((-1, 4)))): next_idx, prev_idx, child_idx, parent_idx = h if parent_idx >= 0: continue m = ellipse.moments_from_dict(cv2.moments(c)) if m[0] <= abs_area_cutoff: continue children = [] while child_idx >= 0: child_contour = contours[child_idx] cm = cv2.moments(child_contour) if cm['m00'] > abs_area_cutoff: children.append(child_contour) allchildren.append(child_contour) child_idx = hierarchy[child_idx][0] if children: work = np.zeros(img.shape[:2], dtype=np.uint8) cv2.drawContours(work, contours, i, (1,1,1), -1) cv2.drawContours(work, children, -1, (0,0,0), -1) m = ellipse.moments_from_dict(cv2.moments(work, True)) centroids.append(m[1:3]/m[0]) centroid_accum += m[1:3] total_area += m[0] pts = np.vstack((pts, c.astype('float32'))) conic = ellipse.conic_from_moments(m) okcontours.append(c) conics.append(conic) areas.append(m[0]) display = img.copy() cv2.drawContours(display, okcontours+allchildren, -1, (0, 255, 0), 6, cv2.LINE_AA) debug_show('contours_only', display) for c, a in zip(okcontours, areas): x, y, w, h = cv2.boundingRect(c) s = str('{:,d}'.format(int(a))) #ctr = (x + w/2 - 15*len(s), y+h/2+10) ctr = (x, y+h+20) cv2.putText(display, s, ctr, cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 0, 0), 12, cv2.LINE_AA) cv2.putText(display, s, ctr, cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 6, cv2.LINE_AA) debug_show('contours', display) areas = np.array(areas) amean = areas.mean() print('got {} contours with {} small.'.format( len(areas), (areas < mean_area_cutoff*amean).sum())) idx = np.where(areas > mean_area_cutoff*amean)[0] conics = np.array(conics) conics = conics[idx] centroid_accum /= total_area display = img.copy() for conic in conics: x0, y0, a, b, theta = ellipse.gparams_from_conic(conic) cv2.ellipse(display, (int(x0), int(y0)), (int(a), int(b)), theta*180/np.pi, 0, 360, (0,0,255), 6, cv2.LINE_AA) debug_show('conics', display) contours = [okcontours[i].astype('float32') for i in idx] if 0: centroids = np.array([centroids[i] for i in idx]) areas = areas[idx] def polyfit(x, y): coeffs = np.polyfit(x, y, deg=1) ypred = np.polyval(coeffs, x) ymean = np.mean(y) sstot = np.sum((y - ymean)**2) ssres = np.sum((y.flatten() - ypred.flatten())**2) r2 = 1 - ssres/sstot return coeffs, r2 xfit, xr2 = polyfit(centroids[:,0], areas) yfit, yr2 = polyfit(centroids[:,1], areas) xlabel = 'X coordinate (r²={:.2f})'.format(xr2) ylabel = 'Y coordinate (r²={:.2f})'.format(yr2) plt.plot(centroids[:,0], areas, 'b.', zorder=1) plt.plot(centroids[:,1], areas, 'r.', zorder=1) plt.gca().autoscale(False) plt.plot([0, 3000], np.polyval(xfit, [0,3000]), 'b--', zorder=0, label=xlabel) plt.plot([0, 3000], np.polyval(yfit, [0,3000]), 'r--', zorder=0, label=ylabel) plt.legend(loc='upper right') plt.xlabel('X/Y coordinate (px)') plt.ylabel('Contour area (px²)') plt.savefig('position-vs-area.pdf') return conics, contours, centroid_accum def optimize_conics(conics, p0): x0 = np.array([0.0, 0.0]) hfunc = lambda x: centered_warp(p0[0], p0[1], x[0], x[1]) opt_results = [] f = lambda x: conic_area_discrepancy(conics, x, hfunc(x), opt_results) res = scipy.optimize.minimize(f, x0, method='Powell') H = hfunc(res.x) rects = [] if 0: phi = np.linspace(0, 2*np.pi, 16, endpoint=False) width, height = 0, 0 for x, H, fval in opt_results: allxy = [] for conic in conics: Hconic = ellipse.conic_transform(conic, H) gparams = ellipse.gparams_from_conic(Hconic) x, y = ellipse.gparams_evaluate(gparams, phi) xy = np.dstack((x.reshape((-1, 1, 1)), y.reshape((-1, 1, 1)))) allxy.append(xy) allxy = np.vstack(tuple(allxy)).astype(np.float32) rect = cv2.boundingRect(allxy) rects.append(rect) x, y, w, h = rect width = max(width, w) height = max(height, h) border = int(0.05 * min(width, height)) width += border height += border aspect = float(width)/height if aspect < 2.0: width = 2*height else: height = width/2 for i, (rect, (x, H, fval)) in enumerate(zip(rects, opt_results)): display = np.zeros((height, width), dtype=np.uint8) x, y, w, h = rect xoffs = width/2 - (x+w/2) yoffs = height/2 - (y+h/2) for conic in conics: Hconic = ellipse.conic_transform(conic, H) x0, y0, a, b, theta = ellipse.gparams_from_conic(Hconic) cv2.ellipse(display, (int(x0+xoffs), int(y0+yoffs)), (int(a), int(b)), theta*180/np.pi, 0, 360, (255,255,255), 6, cv2.LINE_AA) cv2.putText(display, 'Area discrepancy: {:.3f}'.format(fval), (16, height-24), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255,255,255), 6, cv2.LINE_AA) cv2.imwrite('frame{:04d}.png'.format(i), display) return H def orientation_detect(img, contours, H, rho=8.0, ntheta=512): # ignore this, just deal with edge-detected text pts = np.vstack(tuple(contours)) shape, TH = warp_containing_points(img, pts, H, shape_only=True) text_edges = np.zeros(shape, dtype=np.uint8) for contour in contours: contour = cv2.perspectiveTransform(contour.astype(np.float32), TH) cv2.drawContours(text_edges, [contour.astype(int)], 0, (255,255,255)) debug_show('edges', text_edges) # generate a linspace of thetas thetas = np.linspace(-0.5*np.pi, 0.5*np.pi, ntheta, endpoint=False) # rho is pixels per r bin in polar (theta, r) histogram # irho is bins per pixel irho = 1.0/rho # get height and width h, w = text_edges.shape # maximum bin index is given by hypotenuse of (w, h) divided by pixels per bin bin_max = int(np.ceil(np.hypot(w, h)*irho)) # initialize zeroed histogram height bin_max and width num theta hist = np.zeros((bin_max, ntheta)) # let u and v be x and y coordinates (respectively) of non-zero # pixels in edge map v, u = np.mgrid[0:h, 0:w] v = v[text_edges.view(bool)] u = u[text_edges.view(bool)] # get center coordinates u0 = w*0.5 v0 = h*0.5 # for each i and theta = thetas[i] for i, theta in enumerate(thetas): # for each nonzero edge pixel, compute bin in r direction from # pixel location and cos/sin of theta bin_idx = ( (-(u-u0)*np.sin(theta) # x term + (v-v0)*np.cos(theta))*irho # y term, both # divided by pixels # per bin + 0.5*bin_max ) # offset for center pixel assert( bin_idx.min() >= 0 and bin_idx.max() < bin_max ) # 0.5 is for correct rounding here # # e.g. np.bincount([1, 1, 0, 3]) = [1, 2, 0, 1] # returns count of each integer in the array bc = np.bincount((bin_idx + 0.5).astype(int)) # push this into the histogram hist[:len(bc),i] = bc # number of zero pixels in each column num_zero = (hist == 0).sum(axis=0) # find the maximum number of zero pixels best_theta_idx = num_zero.argmax() # actual detected theta - could just return this now theta = thetas[best_theta_idx] # compose with previous homography RH = np.dot(rotation(-theta), H) if 1: # just debug visualization debug_hist = (255*hist/hist.max()).astype('uint8') debug_hist = cv2.cvtColor(debug_hist, cv2.COLOR_GRAY2RGB) cv2.line(debug_hist, (best_theta_idx, 0), (best_theta_idx, bin_max), (255,0,0), 1, cv2.LINE_AA) debug_show('histogram', debug_hist) p0 = np.array((u0, v0)) t = np.array((np.cos(theta), np.sin(theta))) warped = cv2.warpPerspective(img, TH, (shape[1], shape[0]), borderMode=cv2.BORDER_REPLICATE) debug_show('prerotate_noline', warped) cv2.line(warped,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42]), 885 assert find_shortest_sub_array2([1, 2, 2, 1, 2, 1, 1, 1, 1, 2, 2, 2]), 9 # 2 assert find_shortest_sub_array2([1, 3, 2, 2, 3, 1]), 2 # 2 assert find_shortest_sub_array2([1, 2, 2, 3, 1]), 2 # 2 assert find_shortest_sub_array2( [55, 84, 47, 52, 62, 95, 78, 2, 60, 57, 89, 26, 20, 86, 50, 69, 14, 39, 62, 91, 55, 22, 44, 4, 21, 60, 59, 39, 43, 56, 43, 97, 3, 26, 75, 19, 24, 92, 13, 98, 85, 37, 0, 37, 58, 71, 63, 100, 46, 31, 7, 47, 14, 61, 100, 64, 13, 89, 67, 40, 6, 44, 13, 18, 37, 16, 85, 8, 86, 60, 26, 24, 6, 69, 7, 51, 82, 41, 43, 61, 51, 58, 18, 9, 19, 58, 89, 67, 91, 1, 41, 91, 3, 69, 82, 87, 100, 80, 14, 3, 9, 8, 57, 50, 85, 45, 25, 30, 1, 40, 54, 87, 67, 91, 2, 42, 6, 8, 92, 39, 89, 29, 83, 43, 33, 98, 4, 73, 91, 29, 98, 94, 72, 36, 62, 63, 78, 52, 76, 94, 20, 80, 67, 90, 31, 59, 55, 48, 16, 49, 69, 14, 66, 53, 32, 47, 41, 39, 40, 71, 70, 89, 50, 77, 18, 61, 21, 6, 85, 42, 67, 70, 7, 66, 87, 34, 81, 85, 62, 58, 1, 83, 62, 66, 7, 57, 65, 15, 46, 28, 52, 35, 59, 75, 93, 18, 40, 76, 19, 29, 51, 23, 43, 43, 55, 32, 60, 48, 54, 78, 64, 84, 68, 30, 6, 34, 74, 44, 48, 68, 42, 98, 58, 93, 42, 83, 65, 12, 3, 45, 67, 20, 7, 49, 63, 73, 25, 28, 93, 56, 44, 32, 43, 17, 12, 41, 13, 23, 4, 68, 55, 68, 24, 12, 79, 51, 32, 19, 100, 50, 36, 73, 54, 93, 85, 30, 51, 56, 15, 49, 71, 11, 82, 75, 64, 56, 85, 68, 44, 43, 100, 28, 7, 65, 42, 23, 100, 66, 38, 49, 94, 29, 9, 83, 41, 85, 42, 21, 45, 99, 38, 10, 3, 25, 67, 76, 90, 36, 53, 60, 88, 44, 51, 3, 66, 0, 87, 46, 39, 9, 88, 76, 15, 76, 67, 39, 79, 71, 35, 7, 51, 91, 7, 11, 22, 70, 92, 78, 49, 36, 4, 24, 58, 10, 56, 91, 48, 23, 80, 73, 27, 43, 70, 92, 80, 37, 41, 97, 80, 58, 5, 99, 44, 15, 99, 63, 79, 10, 41, 31, 72, 24, 38, 74, 30, 19, 39, 53, 45, 95, 28, 0, 95, 95, 58, 56, 72, 20, 25, 48, 59, 90, 23, 63, 99, 12, 66, 99, 94, 25, 66, 7, 81, 76, 60, 64, 95, 24, 6, 97, 62, 54, 52, 19, 20, 71, 75, 16, 73, 24, 98, 83, 1, 30, 6, 33, 36, 90, 21, 31, 54, 62, 35, 38, 50, 84, 37, 29, 63, 81, 70, 46, 66, 68, 10, 63, 80, 39, 25, 34, 28, 14, 64, 32, 39, 95, 6, 35, 54, 76, 4, 15, 14, 34, 14, 22, 78, 28, 72, 10, 85, 77, 100, 51, 72, 41, 84, 98, 46, 1, 100, 13, 33, 49, 10, 19, 85, 74, 86, 77, 82, 68, 34, 90, 69, 8, 94, 74, 36, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
/ (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N))) g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m g2 *= (M-1)*M*M g2 += 6.*n*N*(M-N)*m*(5.*M-6) g2 /= n * N * (M-N) * m * (M-2.) * (M-3.) return mu, var, g1, g2 def _entropy(self, M, n, N): k = np.r_[N - (M - n):min(n, N) + 1] vals = self.pmf(k, M, n, N) return np.sum(entr(vals), axis=0) def _sf(self, k, M, n, N): # This for loop is needed because `k` can be an array. If that's the # case, the sf() method makes M, n and N arrays of the same shape. We # therefore unpack all inputs args, so we can do the manual # integration. res = [] for quant, tot, good, draw in zip(k, M, n, N): # Manual integration over probability mass function. More accurate # than integrate.quad. k2 = np.arange(quant + 1, draw + 1) res.append(np.sum(self._pmf(k2, tot, good, draw))) return np.asarray(res) def _logsf(self, k, M, n, N): res = [] for quant, tot, good, draw in zip(k, M, n, N): if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5): # Less terms to sum if we calculate log(1-cdf) res.append(log1p(-exp(self.logcdf(quant, tot, good, draw)))) else: # Integration over probability mass function using logsumexp k2 = np.arange(quant + 1, draw + 1) res.append(logsumexp(self._logpmf(k2, tot, good, draw))) return np.asarray(res) def _logcdf(self, k, M, n, N): res = [] for quant, tot, good, draw in zip(k, M, n, N): if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5): # Less terms to sum if we calculate log(1-sf) res.append(log1p(-exp(self.logsf(quant, tot, good, draw)))) else: # Integration over probability mass function using logsumexp k2 = np.arange(0, quant + 1) res.append(logsumexp(self._logpmf(k2, tot, good, draw))) return np.asarray(res) hypergeom = hypergeom_gen(name='hypergeom') # FIXME: Fails _cdfvec class logser_gen(rv_discrete): r"""A Logarithmic (Log-Series, Series) discrete random variable. %(before_notes)s Notes ----- The probability mass function for `logser` is: .. math:: f(k) = - \frac{p^k}{k \log(1-p)} for :math:`k \ge 1`. `logser` takes :math:`p` as shape parameter. %(after_notes)s %(example)s """ def _rvs(self, p): # looks wrong for p>0.5, too few k=1 # trying to use generic is worse, no k=1 at all return self._random_state.logseries(p, size=self._size) def _argcheck(self, p): return (p > 0) & (p < 1) def _pmf(self, k, p): # logser.pmf(k) = - p**k / (k*log(1-p)) return -np.power(p, k) * 1.0 / k / special.log1p(-p) def _stats(self, p): r = special.log1p(-p) mu = p / (p - 1.0) / r mu2p = -p / r / (p - 1.0)**2 var = mu2p - mu*mu mu3p = -p / r * (1.0+p) / (1.0 - p)**3 mu3 = mu3p - 3*mu*mu2p + 2*mu**3 g1 = mu3 / np.power(var, 1.5) mu4p = -p / r * ( 1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4) mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 g2 = mu4 / var**2 - 3.0 return mu, var, g1, g2 logser = logser_gen(a=1, name='logser', longname='A logarithmic') class poisson_gen(rv_discrete): r"""A Poisson discrete random variable. %(before_notes)s Notes ----- The probability mass function for `poisson` is: .. math:: f(k) = \exp(-\mu) \frac{\mu^k}{k!} for :math:`k \ge 0`. `poisson` takes :math:`\mu` as shape parameter. %(after_notes)s %(example)s """ # Override rv_discrete._argcheck to allow mu=0. def _argcheck(self, mu): return mu >= 0 def _rvs(self, mu): return self._random_state.poisson(mu, self._size) def _logpmf(self, k, mu): Pk = special.xlogy(k, mu) - gamln(k + 1) - mu return Pk def _pmf(self, k, mu): # poisson.pmf(k) = exp(-mu) * mu**k / k! return exp(self._logpmf(k, mu)) def _cdf(self, x, mu): k = floor(x) return special.pdtr(k, mu) def _sf(self, x, mu): k = floor(x) return special.pdtrc(k, mu) def _ppf(self, q, mu): vals = ceil(special.pdtrik(q, mu)) vals1 = np.maximum(vals - 1, 0) temp = special.pdtr(vals1, mu) return np.where(temp >= q, vals1, vals) def _stats(self, mu): var = mu tmp = np.asarray(mu) mu_nonzero = tmp > 0 g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf) g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf) return mu, var, g1, g2 poisson = poisson_gen(name="poisson", longname='A Poisson') class planck_gen(rv_discrete): r"""A Planck discrete exponential random variable. %(before_notes)s Notes ----- The probability mass function for `planck` is: .. math:: f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) for :math:`k \ge 0` and :math:`\lambda > 0`. `planck` takes :math:`\lambda` as shape parameter. The Planck distribution can be written as a geometric distribution (`geom`) with :math:`p = 1 - \exp(-\lambda)` shifted by `loc = -1`. %(after_notes)s See Also -------- geom %(example)s """ def _argcheck(self, lambda_): return lambda_ > 0 def _pmf(self, k, lambda_): return -expm1(-lambda_)*exp(-lambda_*k) def _cdf(self, x, lambda_): k = floor(x) return -expm1(-lambda_*(k+1)) def _sf(self, x, lambda_): return exp(self._logsf(x, lambda_)) def _logsf(self, x, lambda_): k = floor(x) return -lambda_*(k+1) def _ppf(self, q, lambda_): vals = ceil(-1.0/lambda_ * log1p(-q)-1) vals1 = (vals-1).clip(*(self._get_support(lambda_))) temp = self._cdf(vals1, lambda_) return np.where(temp >= q, vals1, vals) def _rvs(self, lambda_): # use relation to geometric distribution for sampling p = -expm1(-lambda_) return self._random_state.geometric(p, size=self._size) - 1.0 def _stats(self, lambda_): mu = 1/expm1(lambda_) var = exp(-lambda_)/(expm1(-lambda_))**2 g1 = 2*cosh(lambda_/2.0) g2 = 4+2*cosh(lambda_) return mu, var, g1, g2 def _entropy(self, lambda_): C = -expm1(-lambda_) return lambda_*exp(-lambda_)/C - log(C) planck = planck_gen(a=0, name='planck', longname='A discrete exponential ') class boltzmann_gen(rv_discrete): r"""A Boltzmann (Truncated Discrete Exponential) random variable. %(before_notes)s Notes ----- The probability mass function for `boltzmann` is: .. math:: f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N)) for :math:`k = 0,..., N-1`. `boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, lambda_, N): return (lambda_ > 0) & (N > 0) def _get_support(self, lambda_, N): return self.a, N - 1 def _pmf(self, k, lambda_, N): # boltzmann.pmf(k) = # (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N)) fact = (1-exp(-lambda_))/(1-exp(-lambda_*N)) return fact*exp(-lambda_*k) def _cdf(self, x, lambda_, N): k = floor(x) return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N)) def _ppf(self, q, lambda_, N): qnew = q*(1-exp(-lambda_*N)) vals = ceil(-1.0/lambda_ * log(1-qnew)-1) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, lambda_, N) return np.where(temp >= q, vals1, vals) def _stats(self, lambda_, N): z = exp(-lambda_) zN = exp(-lambda_*N) mu = z/(1.0-z)-N*zN/(1-zN) var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2 trm = (1-zN)/(1-z) trm2 = (z*trm**2 - N*N*zN) g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN) g1 = g1 / trm2**(1.5) g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN) g2 = g2 / trm2 / trm2 return mu, var, g1, g2 boltzmann = boltzmann_gen(name='boltzmann', a=0, longname='A truncated discrete exponential ') class randint_gen(rv_discrete): r"""A uniform discrete random variable. %(before_notes)s Notes ----- The probability mass function for `randint` is: .. math:: f(k) = \frac{1}{high - low} for ``k = low, ..., high - 1``. `randint` takes ``low`` and ``high`` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, low, high): return (high > low) def _get_support(self, low, high): return low, high-1 def _pmf(self, k, low, high): # randint.pmf(k) = 1./(high - low) p = np.ones_like(k) / (high - low) return np.where((k >= low) & (k < high), p, 0.) def _cdf(self, x, low, high): k = floor(x) return (k - low + 1.) / (high - low) def _ppf(self, q, low, high): vals = ceil(q * (high - low) + low) - 1 vals1 = (vals - 1).clip(low, high) temp = self._cdf(vals1, low, high) return np.where(temp >= q, vals1, vals) def _stats(self, low, high): m2, m1 = np.asarray(high), np.asarray(low) mu = (m2 + m1 - 1.0) / 2 d = m2 - m1 var = (d*d - 1) / 12.0 g1 = 0.0 g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0) return mu, var, g1, g2 def _rvs(self, low, high): """An array of *size* random integers >= ``low`` and < ``high``.""" if self._size is not None: # NumPy's RandomState.randint() doesn't broadcast its arguments. # Use `broadcast_to()` to extend the shapes of low and high # up to self._size. Then we can use the numpy.vectorize'd # randint without needing to pass it a `size` argument. low = broadcast_to(low, self._size) high = broadcast_to(high, self._size) randint = np.vectorize(self._random_state.randint, otypes=[np.int_])
of 1.2 to 1.5. However # under these conditions there will be cases where two adjacent pixels will # both be marked as maxima. Accordingly there is a final morphological # thinning step to correct this. # This function is slow. It uses bilinear interpolation to estimate # intensity values at ideal, real-valued pixel locations on each side of # pixels to determine if they are local maxima. # Copyright (c) 1996-2013 <NAME> """ im = np.zeros(gradient.shape) if(radius<1): print 'ERROR: radius should be bigger than 1' return iradius = int(math.ceil(radius)) # Precalculate x and y offsets relative to centre pixel for each orientation angle angle = range(0,181,1) angle = (np.array(angle)*np.pi)/180 # Array of angles in 1 degree increments (but in radians). xoff = radius*np.cos(angle) # x and y offset of points at specified radius and angle yoff = radius*np.sin(angle) # from each reference position. hfrac = xoff - np.floor(xoff) # Fractional offset of xoff relative to integer location vfrac = yoff - np.floor(yoff) # Fractional offset of yoff relative to integer location orient = np.fix(orientation) # Orientations start at 0 degrees but arrays start # with index 1. orient = np.array(orient,dtype=np.int16) # Now run through the image interpolating grey values on each side # of the centre pixel to be used for the non-maximal suppression. [rows,cols] = gradient.shape nrow = range(iradius+1,rows - iradius) ncol = range(iradius+1,cols - iradius) for elr in nrow: for elc in ncol: ori = orient[elr,elc] # Index into precomputed arrays x = elc + xoff[ori] # x, y location on one side of the point in question y = elr - yoff[ori] fx = int(np.floor(x)) # Get integer pixel locations that surround location x,y cx = int(np.ceil(x)) fy = int(np.floor(y)) cy = int(np.ceil(y)) tl = gradient[fy,fx] # Value at top left integer pixel location. tr = gradient[fy,cx] # top right bl = gradient[cy,fx] # bottom left br = gradient[cy,cx] # bottom right upperavg = tl + hfrac[ori]*(tr - tl) # Now use bilinear interpolation to loweravg = bl + hfrac[ori]*(br - bl) # estimate value at x,y v1 = upperavg + vfrac[ori]*(loweravg - upperavg) if (gradient[elr, elc] > v1): # We need to check the value on the other side... x = elc - xoff[ori] # x, y location on the `other side' of the point in question y = elr + yoff[ori] fx = int(np.floor(x)) cx = int(np.ceil(x)) fy = int(np.floor(y)) cy = int(np.ceil(y)) tl = gradient[fy,fx] # % Value at top left integer pixel location. tr = gradient[fy,cx] # % top right bl = gradient[cy,fx] # % bottom left br = gradient[cy,cx] # % bottom right upperavg = tl + hfrac[ori]*(tr - tl) loweravg = bl + hfrac[ori]*(br - bl) v2 = upperavg + vfrac[ori]*(loweravg - upperavg) if (gradient[elr,elc] > v2): # This is a local maximum. im[elr, elc] = gradient[elr, elc] # Record value in the output # Finally thin the 'nonmaximally suppressed' image by pointwise # multiplying itself with a morphological skeletonization of itself. # I know it is oxymoronic to thin a nonmaximally supressed image but # fixes the multiple adjacent peaks that can arise from using a radius # value > 1. # # skel = bwmorph(im>0,'skel',Inf); # im2 = (im>0).astype(np.int8) skel= morphology.skeletonize(im2) im = np.multiply(im,skel) return im def hysthresh(self,image,T1,T2): if T1 < T2 : # T1 and T2 reversed - swap values tmp = T1 T1 = T2 T2 = tmp aboveT2 = image > T2; # Edge points above lower threshold. [aboveT1r,aboveT1c] = np.nonzero(image > T1); # Row and colum coords of points above upper threshold. # Obtain all connected regions in aboveT2 that include a point that has a # value above T1 bw = self.floodfill(aboveT2, aboveT1r, aboveT1c, 8) return bw def floodfill(self,bw, r, c, N=8): filled = np.zeros(bw.shape) theStack = deque(zip(r,c)) while len(theStack) > 0: x, y = theStack.pop() if filled[x, y] == 1: continue if bw[x, y] == 0: continue filled[x, y] = 1 theStack.append((x + 1, y)) # right theStack.append((x - 1, y)) # left theStack.append((x, y + 1)) # down theStack.append((x, y - 1)) # up if (N == 8): theStack.append((x + 1, y + 1)) # d right theStack.append((x - 1, y - 1)) # d left theStack.append((x - 1, y + 1)) # down theStack.append((x + 1, y - 1)) # up return filled def borderEnhancer(self,img,filtersize): # Estimate the local mean of f. prod_fs = reduce(lambda x, y: x * y, filtersize, 1) localMean = convolve2d(img,np.ones(filtersize),'same') / prod_fs; # Estimate of the local variance of f. img_2 = np.multiply(img,img) localMean_2 = localMean*localMean localVar = convolve2d(img_2,np.ones(filtersize),'same') / prod_fs - localMean_2; localVar = localVar>0 return localVar def ridgeorient(self,im,gradientsigma,blocksigma,orientsmoothsigma): # Arguments: im - A normalised input image. # gradientsigma - Sigma of the derivative of Gaussian # used to compute image gradients. # blocksigma - Sigma of the Gaussian weighting used to # sum the gradient moments. # orientsmoothsigma - Sigma of the Gaussian used to smooth # the final orientation vector field. # Optional: if ommitted it defaults to 0 # Returns: orientim - The orientation image in radians. # Orientation values are +ve clockwise # and give the direction *along* the # ridges. # reliability - Measure of the reliability of the # orientation measure. This is a value # between 0 and 1. I think a value above # about 0.5 can be considered 'reliable'. # reliability = 1 - Imin./(Imax+.001); # coherence - A measure of the degree to which the local # area is oriented. # coherence = ((Imax-Imin)./(Imax+Imin)).^2; rows,cols = im.shape # Calculate image gradients. sze = int(np.fix(6*gradientsigma)) if(sze%2 == 0): sze = sze+1 h = self.fspecial_gauss2D((sze,sze),gradientsigma) fx,fy = np.gradient(h) # Gradient of Gausian. Gx = convolve2d(im, fx,'same') # Gradient of the image in x Gy = convolve2d(im, fy, 'same') # ... and y # Estimate the local ridge orientation at each point by finding the # principal axis of variation in the image gradients. Gxx = np.multiply(Gx,Gx) # Covariance data for the image gradients Gxy = np.multiply(Gx,Gy) Gyy = np.multiply(Gy,Gy) # Now smooth the covariance data to perform a weighted summation of the data. sze = int(np.fix(6*blocksigma)) if(sze%2 == 0): sze = sze+1 h = self.fspecial_gauss2D((sze,sze),blocksigma) Gxx = convolve2d(Gxx, h,'same'); Gxy = 2*convolve2d(Gxy,h,'same'); Gyy = convolve2d(Gyy,h,'same'); # Analytic solution of principal direction Gxy_2 = np.multiply(Gxy,Gxy) Gm = Gxx-Gyy Gm = np.multiply(Gm,Gm) denom = np.sqrt(Gxy_2 + Gm) + np.spacing(1) sin2theta = np.divide(Gxy,denom) # Sine and cosine of doubled angles cos2theta = np.divide(Gxx-Gyy,denom) sze = int(np.fix(6*orientsmoothsigma)) if(sze%2 == 0): sze = sze+1 h = self.fspecial_gauss2D((sze,sze),orientsmoothsigma) cos2theta = convolve2d(cos2theta,h,'same')# Smoothed sine and cosine of sin2theta = convolve2d(sin2theta,h,'same'); # doubled angles orientim = np.pi/2 + np.arctan2(sin2theta,cos2theta)/2; # Calculate 'reliability' of orientation data. Here we calculate the # area moment of inertia about the orientation axis found (this will # be the minimum inertia) and an axis perpendicular (which will be # the maximum inertia). The reliability measure is given by # 1.0-min_inertia/max_inertia. The reasoning being that if the ratio # of the minimum to maximum inertia is close to one we have little # orientation information. Imin = (Gyy+Gxx)/2 Imin = Imin - np.multiply((Gxx-Gyy),cos2theta)/2 - np.multiply(Gxy,sin2theta)/2 Imax = Gyy+Gxx - Imin reliability = 1 - np.divide(Imin,(Imax+.001)) # aux = Imax+Imin # aux = np.multiply(aux,aux) # coherence = np.divide((Imax-Imin),aux) # Finally mask reliability to exclude regions where the denominator # in the orientation calculation above was small. Here I have set # the value to 0.001, adjust this if you feel the need reliability = np.multiply(reliability,(denom>.001)) return orientim,reliability def SWT(self,i_img,edgeImage,orientim,stroke_width=20,angle=np.pi/6): im = self.gaussfilt(i_img,1) Ix,Iy = self.derivative5(im) Ix_2 = np.multiply(Ix,Ix) Iy_2 = np.multiply(Iy,Iy) g_mag = np.sqrt(Ix_2 + Iy_2) # Gradient magnitude. Ix = np.divide(Ix,g_mag) Iy = np.divide(Iy,g_mag) cres = 0 prec = 0.4 mSWT = -np.ones(i_img.shape) count = 1 h_stroke = stroke_width*0.5 rows,cols
<filename>PROGRAMS/IGCSEPhysicsSorting.py #IMPORTING NECESSARY LIBRARIES import tkinter as tk from tkinter import * from tkinter import filedialog from tkinter import Tk from array import * import os import time import PyPDF2 from tkinter import ttk from ttkthemes import themed_tk as theme #DEFINING IMPORTANT FUNCTIONS WHICH THE BUTTONS WOULD PERFORM def browsefn(): global file_name window1.destroy() file_name = filedialog.askopenfilename(initialdir = "/", title = "SELECT A FILE", filetypes = (("PDF","*.pdf*"), ("ALL FILES", "*.*"))) print(file_name) global window2 window2 = theme.ThemedTk() window2.get_themes() window2.set_theme("plastik") window2.title("USER INPUTS NEEDED") window2.geometry("500x200") ttk.Label(window2, text='Enter the Type of Paper (1 for Core and 2 for Extended)').grid(row=0) ttk.Label(window2, text = 'Year of this Paper').grid(row = 1) ttk.Label(window2, text = "NUMBER OF QUESTIONS").grid(row = 2) ttk.Label(window2, text = "ENTER Session - 1 for June, 2 for Novermber and 3 for March").grid(row = 3) global entry1 global entry2 global entry3 global entry4 entry1 = ttk.Entry(window2) entry2 = ttk.Entry(window2) entry3 = ttk.Entry(window2) entry4 = ttk.Entry(window2) entry1.grid(row = 0, column = 1) entry2.grid(row = 1, column = 1) entry3.grid(row = 2, column = 1) entry4.grid(row = 3, column = 1) done = ttk.Button(window2, text = "SAVE", width = 20, command = savefn) done.grid(row = 4, column = 1) exitbutton = ttk.Button(window2, text = "EXIT", width = 20, command = window2.destroy) exitbutton.grid(row = 5, column = 1) window2.mainloop() #CHECKED ABOVE - BROWSEFN HAS NO ISSUES. def savefn(): global s1 global s2 global s3 global s4 s1 = entry1.get() s2 = entry2.get() s3 = entry3.get() s4 = entry4.get() global window3 window3 = theme.ThemedTk() window3.get_themes() window3.set_theme("plastik") window3.title("QUESTION DETAILS") window3.geometry("800x600") print(s1) print(s2) if s3 == '13': if s4 == '2': s2 = int(s2) + 50 s2 = str(s2) if s4 == '3': s2 = int(s2) + 100 s2 = str(s2) fn13() elif s3 == '9': if s4 == '2': s2 = int(s2) + 50 s2 = str(s2) if s4 == '3': s2 = int(s2) + 100 s2 = str(s2) fn9() elif s3== '14': if s4 == '2': s2 = int(s2) + 50 s2 = str(s2) if s4 == '3': s2 = int(s2) + 100 s2 = str(s2) fn14() elif s3 == '10': if s4 == '2': s2 = int(s2) + 50 s2 = str(s2) if s4 == '3': s2 = int(s2) + 100 s2 = str(s2) fn10() elif s3 == '11': if s4 == '2': s2 = int(s2) + 50 s2 = str(s2) if s4 == '3': s2 = int(s2) + 100 s2 = str(s2) fn11() elif s3 == '12': if s4 == '2': s2 = int(s2) + 50 s2 = str(s2) if s4 == '3': s2 = int(s2) + 100 s2 = str(s2) fn12() elif s3 == '15': if s4 == '2': s2 = int(s2) + 50 s2 = str(s2) if s4 == '3': s2 = int(s2) + 100 s2 = str(s2) fn15() else: print("ERROR IN PAPER TYPE") #CHECKED ABOVE - SAVEFN HAS NO ISSUES. def fn15(): j = 15 i = 1 while i<=j: ttk.Label(window3, text = "ENTER TOPIC OF Q" + str(i) + " ----->").grid(row = +i, column = 0) ttk.Label(window3, text = "ENTER PAGE OF THE BEGINNING OF THE QUESTION ----->").grid(row = +i, column = 2) i+=1 global e1 global e2 global e3 global e4 global e5 global e6 global e7 global e8 global e9 global e10 global e11 global e12 global e13 global e14 global e15 global p1 global p2 global p3 global p4 global p5 global p6 global p7 global p8 global p9 global p10 global p11 global p12 global p13 global p14 global p15 e1 = ttk.Entry(window3) e1.grid(row = 1, column = 1) e2 = ttk.Entry(window3) e2.grid(row = 2, column = 1) e3 = ttk.Entry(window3) e3.grid(row = 3, column = 1) e4 = ttk.Entry(window3) e4.grid(row = 4, column = 1) e5 = ttk.Entry(window3) e5.grid(row = 5, column = 1) e6 = ttk.Entry(window3) e6.grid(row = 6, column = 1) e7 = ttk.Entry(window3) e7.grid(row = 7, column = 1) e8 = ttk.Entry(window3) e8.grid(row = 8, column = 1) e9 = ttk.Entry(window3) e9.grid(row = 9, column = 1) e10 = ttk.Entry(window3) e10.grid(row = 10, column = 1) e11 = ttk.Entry(window3) e11.grid(row = 11, column = 1) e12 = ttk.Entry(window3) e12.grid(row = 12, column = 1) e13 = ttk.Entry(window3) e13.grid(row = 13, column = 1) e14 = ttk.Entry(window3) e14.grid(row = 14, column = 1) e15 = ttk.Entry(window3) e15.grid(row = 15, column = 1) p1 = ttk.Entry(window3) p1.grid(row = 1, column = 3) p2 = ttk.Entry(window3) p2.grid(row = 2, column = 3) p3 = ttk.Entry(window3) p3.grid(row = 3, column = 3) p4 = ttk.Entry(window3) p4.grid(row = 4, column = 3) p5 = ttk.Entry(window3) p5.grid(row = 5, column = 3) p6 = ttk.Entry(window3) p6.grid(row = 6, column = 3) p7 = ttk.Entry(window3) p7.grid(row = 7, column = 3) p8 = ttk.Entry(window3) p8.grid(row = 8, column = 3) p9 = ttk.Entry(window3) p9.grid(row = 9, column = 3) p10 = ttk.Entry(window3) p10.grid(row = 10, column = 3) p11 = ttk.Entry(window3) p11.grid(row = 11, column = 3) p12 = ttk.Entry(window3) p12.grid(row = 12, column = 3) p13 = ttk.Entry(window3) p13.grid(row = 13, column = 3) p14 = ttk.Entry(window3) p14.grid(row = 14, column = 3) p15 = ttk.Entry(window3) p15.grid(row = 15, column = 3) save = ttk.Button(window3, text = "SAVE + QUIT", width = 20, command = savedata15) save.grid(row = 16, column = 1) ttk.Label(window3, text = "Note: 1. When entering multiple topics").grid(row = 17, column = 0) ttk.Label(window3, text = "seperate both with a space").grid(row = 18, column = 0) ttk.Label(window3, text = "2. When entering the PDF baginning page").grid(row = 19, column = 0) ttk.Label(window3, text = "Enter the page at the top, but subtract 1 from it").grid(row = 20, column = 0) ttk.Label(window3, text = "So, if the page number at top is 2").grid(row = 21, column = 0) ttk.Label(window3, text = "enter the beginning page as 1").grid(row = 22, column = 0) #FN15 HAS NO ISSUES def fn14(): global window3 j = 14 i = 1 while i<=j: ttk.Label(window3, text = "ENTER TOPIC OF Q" + str(i) + " ----->").grid(row = +i, column = 0) ttk.Label(window3, text = "ENTER PAGE OF THE BEGINNING OF THE QUESTION ----->").grid(row = +i, column = 2) i+=1 global e1 global e2 global e3 global e4 global e5 global e6 global e7 global e8 global e9 global e10 global e11 global e12 global e13 global e14 global p1 global p2 global p3 global p4 global p5 global p6 global p7 global p8 global p9 global p10 global p11 global p12 global p13 global p14 e1 = ttk.Entry(window3) e1.grid(row = 1, column = 1) e2 = ttk.Entry(window3) e2.grid(row = 2, column = 1) e3 = ttk.Entry(window3) e3.grid(row = 3, column = 1) e4 = ttk.Entry(window3) e4.grid(row = 4, column = 1) e5 = ttk.Entry(window3) e5.grid(row = 5, column = 1) e6 = ttk.Entry(window3) e6.grid(row = 6, column = 1) e7 = ttk.Entry(window3) e7.grid(row = 7, column = 1) e8 = ttk.Entry(window3) e8.grid(row = 8, column = 1) e9 = ttk.Entry(window3) e9.grid(row = 9, column = 1) e10 = ttk.Entry(window3) e10.grid(row = 10, column = 1) e11 = ttk.Entry(window3) e11.grid(row = 11, column = 1) e12 = ttk.Entry(window3) e12.grid(row = 12, column = 1) e13 = ttk.Entry(window3) e13.grid(row = 13, column = 1) e14 = ttk.Entry(window3) e14.grid(row = 14, column = 1) p1 = ttk.Entry(window3) p1.grid(row = 1, column = 3) p2 = ttk.Entry(window3) p2.grid(row = 2, column = 3) p3 = ttk.Entry(window3) p3.grid(row = 3, column = 3) p4 = ttk.Entry(window3) p4.grid(row = 4, column = 3) p5 = ttk.Entry(window3) p5.grid(row = 5, column = 3) p6 = ttk.Entry(window3) p6.grid(row = 6, column = 3) p7 = ttk.Entry(window3) p7.grid(row = 7, column = 3) p8 = ttk.Entry(window3) p8.grid(row = 8, column = 3) p9 = ttk.Entry(window3) p9.grid(row = 9, column = 3) p10 = ttk.Entry(window3) p10.grid(row = 10, column = 3) p11 = ttk.Entry(window3) p11.grid(row = 11, column = 3) p12 = ttk.Entry(window3) p12.grid(row = 12, column = 3)
<filename>reVX/least_cost_xmission/least_cost_xmission.py # -*- coding: utf-8 -*- """ Module to compute least cost xmission paths, distances, and costs one or more SC points """ from concurrent.futures import as_completed import geopandas as gpd import json import logging import numpy as np import os import pandas as pd from pyproj.crs import CRS import rasterio from scipy.spatial import cKDTree from shapely.geometry import Point import time from reV.handlers.exclusions import ExclusionLayers from reV.supply_curve.points import SupplyCurveExtent from rex.utilities.execution import SpawnProcessPool from rex.utilities.loggers import log_mem from reVX.least_cost_xmission.config import (TRANS_LINE_CAT, LOAD_CENTER_CAT, SINK_CAT, SUBSTATION_CAT) from reVX.least_cost_xmission.least_cost_paths import LeastCostPaths from reVX.least_cost_xmission.trans_cap_costs import TransCapCosts logger = logging.getLogger(__name__) class LeastCostXmission(LeastCostPaths): """ Compute Least Cost tie-line paths and full transmission cap cost for all possible connections to all supply curve points - """ REQUIRED_LAYRES = ['transmission_barrier', 'ISO_regions'] def __init__(self, cost_fpath, features_fpath, resolution=128, xmission_config=None): """ Parameters ---------- cost_fpath : str Path to h5 file with cost rasters and other required layers features_fpath : str Path to geopackage with transmission features resolution : int, optional SC point resolution, by default 128 xmission_config : str | dict | XmissionConfig, optional Path to Xmission config .json, dictionary of Xmission config .jsons, or preloaded XmissionConfig objects, by default None """ self._check_layers(cost_fpath) self._config = TransCapCosts._parse_config( xmission_config=xmission_config) (self._sc_points, self._features, self._sub_lines_mapping, self._shape) =\ self._map_to_costs(cost_fpath, features_fpath, resolution=resolution) self._cost_fpath = cost_fpath self._tree = None self._sink_coords = None self._min_line_len = (resolution * 0.09) / 2 logger.debug('{} initialized'.format(self)) def __repr__(self): msg = ("{} to be computed for {} sc_points and {} features" .format(self.__class__.__name__, len(self.sc_points), len(self.features))) return msg @property def sc_points(self): """ Table of supply curve points Returns ------- gpd.GeoDataFrame """ return self._sc_points @property def features(self): """ Table of features to compute paths for Returns ------- pandas.DataFrame """ return self._features @property def sub_lines_mapping(self): """ Series mapping substations to the transmission lines connected to each substation Returns ------- pandas.Series """ return self._sub_lines_mapping @property def sink_coords(self): """ Inf sink coordinates (row, col) Returns ------- ndarray """ if self._sink_coords is None: mask = self.features['category'] == SINK_CAT self._sink_coords = self.features.loc[mask, ['row', 'col']].values return self._sink_coords @property def sink_tree(self): """ cKDTree for infinite sinks Returns ------- cKDTree """ if self._tree is None: self._tree = cKDTree(self.sink_coords) return self._tree @staticmethod def _load_trans_feats(features_fpath): """ Load existing transmission features from disk. Substations will be loaded from cache file if it exists Parameters ---------- features_fpath : str Path to geopackage with trans features Returns ------- features : gpd.GeoDataFrame DataFrame of transmission features sub_line_map : pandas.Series Mapping of sub-station trans_gid to connected tranmission line trans_gids """ logger.debug('Loading transmission features') features = gpd.read_file(features_fpath) features = features.drop(columns=['bgid', 'egid', 'cap_left'], errors='ignore') mapping = {'gid': 'trans_gid', 'trans_gids': 'trans_line_gids'} features = features.rename(columns=mapping) features['min_volts'] = 0 features['max_volts'] = 0 # Transmission lines mask = features['category'] == TRANS_LINE_CAT voltage = features.loc[mask, 'voltage'].values features.loc[mask, 'min_volts'] = voltage features.loc[mask, 'max_volts'] = voltage # Load Center and Sinks mask = features['category'].isin([LOAD_CENTER_CAT, SINK_CAT]) features.loc[mask, 'min_volts'] = 1 features.loc[mask, 'max_volts'] = 9999 sub_lines_map = {} mask = features['category'] == SUBSTATION_CAT bad_subs = np.zeros(len(features), dtype=bool) for idx, row in features.loc[mask].iterrows(): gid = row['trans_gid'] lines = row['trans_line_gids'] if isinstance(lines, str): lines = json.loads(lines) sub_lines_map[gid] = lines lines_mask = features['trans_gid'].isin(lines) voltage = features.loc[lines_mask, 'voltage'].values if np.max(voltage) >= 69: features.loc[idx, 'min_volts'] = np.min(voltage) features.loc[idx, 'max_volts'] = np.max(voltage) else: bad_subs[idx] = True if any(bad_subs): msg = ("The following sub-stations do not have the minimum " "required voltage of 69 kV and will be dropped:\n{}" .format(features.loc[bad_subs, 'trans_gid'])) logger.warning(msg) features = features.loc[~bad_subs].reset_index(drop=True) return features, pd.Series(sub_lines_map) @staticmethod def _create_sc_points(cost_fpath, resolution=128): """ Load SC points, covert row/col to array wide, and determine x/y for reV projection Parameters ---------- cost_fpath : str Path to h5 file with cost rasters and other required layers resolution : int, optional SC point resolution, by default 128 Returns sc_points : gpd.GeoDataFrame SC points """ logger.debug('Loading Supply Curve Points') sce = SupplyCurveExtent(cost_fpath, resolution=resolution) sc_points = sce.points.rename(columns={'row_ind': 'sc_row_ind', 'col_ind': 'sc_col_ind'}) shape = sce.excl_shape sc_points['sc_point_gid'] = sc_points.index.values row = np.round(sc_points['sc_row_ind'] * resolution + resolution / 2) row = np.where(row >= shape[0], shape[0] - 1, row) sc_points['row'] = row.astype(int) col = np.round(sc_points['sc_col_ind'] * resolution + resolution / 2) col = np.where(col >= shape[1], shape[1] - 1, col) sc_points['col'] = col.astype(int) return sc_points @staticmethod def _get_feature_cost_indices(features, crs, transform, shape): """ Map features to cost row, col indicies using rasterio transform Parameters ---------- features : gpd.GeoDataFrame GeoDataFrame of features to map to cost raster crs : pyproj.crs.CRS CRS of cost raster transform : raster.Affine Transform of cost raster shape : tuple Cost raster shape Returns ------- row : ndarray Vector of row indicies for each feature col : ndarray Vector of col indicies for each features mask : ndarray Boolean mask of features with indicies outside of cost raster """ row, col, mask = super(LeastCostXmission, LeastCostXmission)._get_feature_cost_indices( features, crs, transform, shape) t_lines = features['category'] == TRANS_LINE_CAT mask |= t_lines row[t_lines] = np.where(row[t_lines] >= 0, row[t_lines], 0) row[t_lines] = np.where(row[t_lines] < shape[0], row[t_lines], shape[0] - 1) col[t_lines] = np.where(col[t_lines] >= 0, col[t_lines], 0) col[t_lines] = np.where(col[t_lines] < shape[1], col[t_lines], shape[1] - 1) return row, col, mask @classmethod def _map_to_costs(cls, cost_fpath, features_fpath, resolution=128): """ Map supply curve points and transmission features to cost array pixel indices Parameters ---------- cost_fpath : str Path to h5 file with cost rasters and other required layers features_fpath : str Path to geopackage with transmission features resolution : int, optional SC point resolution, by default 128 Returns ------- sc_point : gpd.GeoDataFrame Table of supply curve points to connect to tranmission features : gpd.GeoDataFrame Table of transmission features sub_lines_map : pandas.Series Series mapping substations to the transmission lines connected to each substation """ with ExclusionLayers(cost_fpath) as f: crs = CRS.from_string(f.crs) transform = rasterio.Affine(*f.profile['transform']) shape = f.shape regions = f['ISO_regions'] features, sub_lines_map = cls._load_trans_feats(features_fpath) row, col, mask = cls._get_feature_cost_indices(features, crs, transform, shape) if any(~mask): msg = ("The following features are outside of the cost exclusion " "domain and will be dropped:\n{}" .format(features.loc[~mask, 'trans_gid'])) logger.warning(msg) row = row[mask] col = col[mask] features = features.loc[mask].reset_index(drop=True) features['row'] = row features['col'] = col features['region'] = regions[row, col] logger.debug('Converting SC points to GeoDataFrame') sc_points = cls._create_sc_points(cost_fpath, resolution=resolution) x, y = rasterio.transform.xy(transform, sc_points['row'].values, sc_points['col'].values) geo = [Point(xy) for xy in zip(x, y)] sc_points = gpd.GeoDataFrame(sc_points, crs=features.crs, geometry=geo) return sc_points, features, sub_lines_map, shape def _clip_to_sc_point(self, sc_point, tie_line_voltage, nn_sinks=2, clipping_buffer=1.05): """ Clip costs raster to AOI around SC point, and get substations, load centers, and sinks within the clipped region. Parameters ---------- sc_point : gpd.GeoSeries SC point to clip raster around nn_sinks : int, optional Number of nearest neighbor sinks to clip to clipping_buffer : float, optional Buffer to increase clipping radius by, by default 1.05 Returns ------- radius : int Clipping radius in cost raster pixels x_feats : pd.DataFrame Substatations, load centers, sinks, and nearest points on t-lines to SC point """ logger.debug('Clipping features to sc_point {}'.format(sc_point.name)) if len(self.sink_coords) > 2: row, col = sc_point[['row', 'col']].values _, pos = self.sink_tree.query([row, col], k=nn_sinks) radius = np.abs(self.sink_coords[pos] - np.array([row, col])).max() radius = int(np.ceil(radius * clipping_buffer)) logger.debug('Radius to {} nearest sink is: {}' .format(nn_sinks, radius)) row_min = max(row - radius, 0) row_max = min(row + radius, self._shape[0]) col_min = max(col - radius, 0) col_max = min(col + radius, self._shape[1]) logger.debug('Extracting all transmission features in the row ' 'slice {}:{} and column slice {}:{}' .format(row_min, row_max, col_min, col_max)) # Clip transmission features mask = self.features['row'] >= row_min mask &= self.features['row'] < row_max mask &= self.features['col'] >= col_min mask &= self.features['col'] < col_max sc_features = self.features.loc[mask].copy(deep=True) logger.debug('{} transmission features found in clipped area with ' 'radius {}' .format(len(sc_features), radius)) else: radius = None sc_features = self.features.copy(deep=True) mask = self.features['max_volts'] >= tie_line_voltage sc_features = sc_features.loc[mask].copy(deep=True) logger.debug('{} transmission features found in clipped area with ' 'minimum max voltage of {}' .format(len(sc_features), tie_line_voltage)) # Find t-lines connected to substations within clip logger.debug('Collecting transmission lines connected to substations') mask = sc_features['category'] == SUBSTATION_CAT if mask.any(): trans_gids = sc_features.loc[mask, 'trans_gid'].values trans_gids = \ np.concatenate(self.sub_lines_mapping.loc[trans_gids].values) trans_gids = np.unique(trans_gids) line_mask = self.features['trans_gid'].isin(trans_gids) trans_lines = self.features.loc[line_mask].copy(deep=True) line_mask = trans_lines['trans_gid'].isin(sc_features['trans_gid']) trans_lines = trans_lines.loc[~line_mask] logger.debug('Adding all {} transmission lines connected to ' 'substations with minimum max voltage of {}' .format(len(trans_lines), tie_line_voltage)) sc_features = sc_features.append(trans_lines) return sc_features, radius def process_sc_points(self,
import logging import re from django.apps import apps from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User from django.contrib.admin import AdminSite from django.contrib.admin.models import LogEntry from django.contrib.admin.views.main import ChangeList from django.contrib.contenttypes.models import ContentType from django.core.exceptions import FieldError, FieldDoesNotExist from django.db import connection from django.db.models.functions import Lower from django.db.utils import OperationalError from django.forms import modelform_factory from django.utils.html import mark_safe, format_html from django.views.decorators.cache import never_cache from import_export.admin import ExportMixin from social_django.models import Association, Nonce, UserSocialAuth from taggit.models import Tag from taggit.apps import TaggitAppConfig from collaborative.export import collaborative_modelresource_factory from collaborative.filters import TagListFilter from django_models_from_csv.admin import AdminAutoRegistration from django_models_from_csv.forms import create_taggable_form from django_models_from_csv.models import DynamicModel, CredentialStore logger = logging.getLogger(__name__) class NewUserAdmin(UserAdmin): list_display = ("username", "email", "first_name", "last_name") add_form_template = 'admin/auth/user/add_form.html' def add_view(self, request, *args, **kwargs): if request.method != "POST": return super().add_view(request, *args, **kwargs) password1 = request.POST.get("password1") password2 = request.POST.get("password2") if not password1 and not password2: newpass = User.objects.make_random_password(length=32) request.POST._mutable = True request.POST["password1"] = <PASSWORD> request.POST["password2"] = <PASSWORD> request.POST._mutable = False return super().add_view(request, *args, **kwargs) def widget_for_object_field(obj, field_name): FieldForm = modelform_factory( obj.source_dynmodel().get_model(), fields=(field_name,) ) widget = FieldForm().fields[field_name].widget return widget def make_getter(rel_name, attr_name, getter_name, field=None): """ Build a reverse lookup getter, to be attached to the custom dynamic lookup admin class. """ def getter(self): if not hasattr(self, rel_name): return None rel = getattr(self, rel_name).first() if not rel: return None fieldname = "%s__%s" % (rel_name, attr_name) content_type_id = ContentType.objects.get_for_model(self).id # handle tagging separately if attr_name == "tags": all_tags = rel.tags.all() tags_html = [] for t in all_tags: name = t.name html = ( "<span class='tag-bubble'>" "<span class='remtag'>x</span>" "%s</span>" ) % (name) tags_html.append(html) return mark_safe(format_html( "".join(tags_html) )) # try to lookup choices for field choices = getattr( rel, "%s_CHOICES" % attr_name.upper(), [] ) value = getattr(rel, attr_name) for pk, txt in choices: if pk == value: widget = widget_for_object_field(rel, attr_name) html = widget.render(fieldname, value) return mark_safe(format_html( "<span content_type_id='{}' class='inline-editable'>{}</span>", content_type_id, html, )) # no choice found, return field value widget = widget_for_object_field(rel, attr_name) html = widget.render(fieldname, value) return mark_safe(format_html( "<span content_type_id='{}' class='inline-editable'>{}</span>", content_type_id, html, )) # the header in django admin is named after the function name. if # this line is removed, the header will be "GETTER" for all derived # reverse lookup columns getter.__name__ = getter_name return getter class ReimportMixin(ExportMixin): """ Mixin for displaying re-import button on admin list view, alongside the export button (from import_export module). """ change_list_template = 'django_models_from_csv/change_list_dynmodel.html' class CaseInsensitiveChangeList(ChangeList): """ Provides case-insensitive ordering for admin list view. """ def get_ordering(self, request, queryset): ordering = super().get_ordering(request, queryset) for i in range(len(ordering)): desc = False fieldname = ordering[i] if fieldname.startswith("-"): fieldname = fieldname[1:] desc = True try: field = queryset.model()._meta.get_field( "id" if fieldname == "pk" else fieldname ) except FieldDoesNotExist: continue f_type = field.db_type(connection) if f_type != "text": continue if desc: ordering[i] = Lower(fieldname).desc() else: ordering[i] = Lower(fieldname) return ordering class ReverseFKAdmin(admin.ModelAdmin): def __init__(self, *args, **kwargs): """ Build relations lookup methods, like metadata__status, but for the reverse foreignkey direction. """ super().__init__(*args, **kwargs) Model, site = args if "DynamicModel" == Model._meta.object_name: return # setup reverse related attr getters so we can do things like # metadata__status in the reverse direction for rel in Model._meta.related_objects: rel_name = rel.get_accessor_name() # "metadata", etc, related_name rel_model = rel.related_model if not rel_model: logger.warning("No related model found!") continue for rel_field in rel_model._meta.get_fields(): # build a getter for this relation attribute attr_name = rel_field.name # remove auto fields and other fields of that nature. we # only want the directly acessible fields of this method if attr_name != "tags": if rel_field.is_relation: continue if not hasattr(rel_field, "auto_created"): continue if rel_field.auto_created: continue getter_name = "%s_%s" % (rel_name, attr_name) short_desc = re.sub(r"[\-_]+", " ", attr_name).replace( "assignee", "assigned to" ) getter = make_getter( rel_name, attr_name, getter_name, field=rel_field ) setattr(self, getter_name, getter) getattr(self, getter_name).short_description = short_desc getattr( self, getter_name ).admin_order_field = "%s__%s" % (rel_name, attr_name) def get_view_label(self, obj): return "View" get_view_label.short_description = 'Records' def get_changelist(self, request, **kwargs): # This controls how the admin list view works. Override the # ChangeList to modify ordering, template, etc return CaseInsensitiveChangeList class DynamicModelAdmin(admin.ModelAdmin): def get_queryset(self, request): return DynamicModel.objects.exclude(name__icontains="metadata") def get_full_deletion_set(self, queryset, only_meta=False): """ This is called when a user selects some dynamic models to be deleted. Since the admin queryset only displays the main models, not the metadata models, each item in the queryset can be assumed to be a primary data source model. Here, we want to also add the corresponding meta models. """ pks = [] for model in queryset: name = model.name meta = "%smetadata" % (name) contact_meta = "%scontactmetadata" % (name) names = (meta, contact_meta) if not only_meta: names = (name, meta, contact_meta) for dynmodel in DynamicModel.objects.filter(name__in=names): pks.append(dynmodel.pk) # order this by descending id, since the original model gets # created first, and we need to delete the reverse fk attached # models first to avoid a cascade return DynamicModel.objects.filter( pk__in=pks ).order_by("-id") def get_deleted_objects(self, queryset, request): extended_queryset = self.get_full_deletion_set(queryset) return super().get_deleted_objects(extended_queryset, request) def delete_queryset(self, request, queryset): # for model in queryset: # for model in self.get_full_deletion_set(queryset): for model in queryset: Model = model.get_model() model_qs = DynamicModel.objects.filter(pk=model.pk) # wipe all relations, by truncating table for related in self.get_full_deletion_set(model_qs, only_meta=True): RelatedModel = related.get_model() for obj in RelatedModel.objects.all(): obj.delete() model.delete() # NOTE: we have to delete these *after* we wipe the original. # otherwise django throws all kinds of errors or will gracefuly # succeed but throw errors later during normal admin operation for metamodel in self.get_full_deletion_set(model_qs, only_meta=True): metamodel.delete() class AdminMetaAutoRegistration(AdminAutoRegistration): def should_register_admin(self, Model): # metadata models get admin created along with the base model name = Model._meta.object_name if name.endswith("metadata"): return False return super().should_register_admin(Model) def create_dynmodel_admin(self, Model): name = Model._meta.object_name inheritance = (DynamicModelAdmin,) return type("%sAdmin" % name, inheritance, {}) def create_admin(self, Model): name = Model._meta.object_name if "metadata" in name: return if name == "DynamicModel": return self.create_dynmodel_admin(Model) meta = [] # find the Metadata model corresponding to the # csv-backed model we're creating admin for. # this will end up as an inline admin for MetaModel in apps.get_models(): meta_name = MetaModel._meta.object_name # all our additonal related models are in this pattern: # [model-name][contact|]metadata if not meta_name.startswith(name) or \ not meta_name.endswith("metadata"): continue dynmodel_meta = MetaModel.source_dynmodel(MetaModel) # for contact log, always show a blank one for easy access extra = 0 if meta_name.endswith("contactmetadata"): extra = 1 meta_attrs = { "model": MetaModel, "extra": extra, } if not meta_name.endswith("contactmetadata"): fields_meta = self.get_fields(MetaModel, dynmodel=dynmodel_meta) try: form_meta = create_taggable_form(MetaModel, fields=fields_meta) meta_attrs["form"] = form_meta # no tags on this model except FieldError: pass MetaModelInline = type( "%sInlineAdmin" % meta_name, (admin.StackedInline,), meta_attrs) meta.append(MetaModelInline) # get searchable and filterable (from column attributes) # should we order by something? number of results? try: model_desc = DynamicModel.objects.get(name=name) except OperationalError: return None except DynamicModel.DoesNotExist: logger.warning("Model with name: %s doesn't exist. Skipping" % name) # return super().create_admin(Model) return None cols = list(reversed(model_desc.columns)) searchable = [c.get("name") for c in cols if c.get("searchable")] filterable = [c.get("name") for c in cols if c.get("filterable")] # Build our CSV-backed admin, attaching inline meta model dynmodel = Model.source_dynmodel(Model) fields = self.get_fields(Model, dynmodel=dynmodel) associated_fields = ["get_view_label"] if name != "DynamicModel": test_item = Model.objects.first() if test_item and hasattr(test_item, "metadata"): associated_fields.append("metadata_status") filterable.append("metadata__status") test_metadata = test_item.metadata.first() if hasattr(test_metadata, "assigned_to"): associated_fields.append("metadata_assigned_to") filterable.append("metadata__assigned_to") elif hasattr(test_metadata, "assignee"): associated_fields.append("metadata_assignee") filterable.append("metadata__assignee") if test_metadata and hasattr(test_metadata, "tags"): associated_fields.append("metadata_tags") filterable.append(TagListFilter) list_display = associated_fields + fields[:5] exporter = collaborative_modelresource_factory( model=Model, ) # Note that ExportMixin needs to be declared before ReverseFKAdmin inheritance = (ReimportMixin, ReverseFKAdmin,) return type("%sAdmin" % name, inheritance, { "inlines": meta, "readonly_fields": fields, "list_display": list_display, "search_fields": searchable, "list_filter": filterable, "resource_class": exporter, }) # Hide "taggit" name TaggitAppConfig.verbose_name = "Tagging" # Remove tagged item inline class TagAdmin(admin.ModelAdmin): list_display = ["name", "slug"] ordering = ["name", "slug"] search_fields = ["name"] prepopulated_fields = {"slug": ["name"]} class Meta: verbose_name = "Tags" verbose_name_plural = "Tags" app_label = "Tags" @never_cache def login(*args, **kwargs): """ Override login view to hide Google Sign In button if no OAuth credentials added. """ extra_context = kwargs.get("extra_context", {}) have_oauth_creds = CredentialStore.objects.filter( name="google_oauth_credentials" ).count() extra_context["google_oauth_credentials"] = have_oauth_creds > 0 if "first_login" in extra_context: extra_context["first_login"] = False kwargs["extra_context"] = extra_context return AdminSite().login(*args, **kwargs) admin.site.login = login admin.site.site_header = "Collaborate" admin.site.index_title = "Welcome" admin.site.site_title = "Collaborate" # Remove the "view site" link from the admin header admin.site.site_url =
# -*- coding: utf-8 -*- """Test suite for assets.""" import copy import pytest from axonius_api_client.constants import AGG_ADAPTER_ALTS, AGG_ADAPTER_NAME from axonius_api_client.exceptions import ApiError, NotFoundError from ...meta import FIELD_FORMATS, SCHEMA_FIELD_FORMATS, SCHEMA_TYPES from ...utils import get_schemas class FieldsPrivate: def test_private_get(self, apiobj): fields = apiobj.fields._get() self.val_raw_fields(fields=fields) def val_raw_fields(self, fields): fields = copy.deepcopy(fields) assert isinstance(fields, dict) schema = fields.pop("schema") assert isinstance(schema, dict) generic = fields.pop("generic") assert isinstance(generic, list) self.val_raw_adapter_fields(adapter="generic", adapter_fields=generic) generic_schema = schema.pop("generic") assert isinstance(generic_schema, dict) self.val_raw_schema(adapter="generic", schema=generic_schema) specific = fields.pop("specific") assert isinstance(specific, dict) specific_schema = schema.pop("specific") assert isinstance(specific_schema, dict) for adapter, adapter_fields in specific.items(): self.val_raw_adapter_fields(adapter=adapter, adapter_fields=adapter_fields) adapter_schema = specific_schema.pop(adapter) self.val_raw_schema(adapter=adapter, schema=adapter_schema) assert not fields assert not schema def val_raw_schema(self, adapter, schema): assert isinstance(schema, dict) items = schema.pop("items") assert isinstance(items, list) required = schema.pop("required") assert isinstance(required, list) stype = schema.pop("type") assert stype == "array" assert not schema, list(schema) for req in required: assert isinstance(req, str) for item in items: assert item self.val_raw_items(adapter=adapter, items=item) def val_raw_adapter_fields(self, adapter, adapter_fields): assert isinstance(adapter_fields, list) for field in adapter_fields: assert isinstance(field, dict) name = field.pop("name") assert isinstance(name, str) and name title = field.pop("title") assert isinstance(title, str) and title ftype = field.pop("type") assert isinstance(ftype, str) and ftype assert ftype in SCHEMA_TYPES description = field.pop("description", "") assert isinstance(description, str) sort = field.pop("sort", False) assert isinstance(sort, bool) unique = field.pop("unique", False) assert isinstance(unique, bool) branched = field.pop("branched", False) assert isinstance(branched, bool) dynamic = field.pop("dynamic", False) assert isinstance(dynamic, bool) fformat = field.pop("format", "") assert isinstance(fformat, str) assert fformat in FIELD_FORMATS or fformat == "" enums = field.pop("enum", []) assert isinstance(enums, list) for enum in enums: assert isinstance(enum, str) or isinstance(enum, int) items = field.pop("items", {}) assert isinstance(items, dict) self.val_raw_items(adapter=f"{adapter}:{name}", items=items) assert not field, list(field) def val_raw_items(self, adapter, items): assert isinstance(items, dict) if items: ftype = items.pop("type") assert isinstance(ftype, str) assert ftype in SCHEMA_TYPES description = items.pop("description", "") assert isinstance(description, str) title = items.pop("title", "") assert isinstance(title, str) name = items.pop("name", "") assert isinstance(name, str) sort = items.pop("sort", False) assert isinstance(sort, bool) unique = items.pop("unique", False) assert isinstance(unique, bool) branched = items.pop("branched", False) assert isinstance(branched, bool) dynamic = items.pop("dynamic", False) assert isinstance(dynamic, bool) hidden = items.pop("hidden", False) assert isinstance(hidden, bool) iformat = items.pop("format", "") assert isinstance(iformat, str) assert iformat in SCHEMA_FIELD_FORMATS or iformat == "" val_source(obj=items) enums = items.pop("enum", []) assert isinstance(enums, list) for enum in enums: assert isinstance(enum, str) or isinstance(enum, int) sub_items = items.pop("items", []) assert isinstance(sub_items, list) or isinstance(sub_items, dict) assert not items, list(items) if isinstance(sub_items, dict): self.val_raw_items(adapter=adapter, items=sub_items) else: for sub_item in sub_items: self.val_raw_items(adapter=adapter, items=sub_item) def test_private_prettify_schemas(self, apiobj): schemas = get_schemas(apiobj=apiobj) pretty = apiobj.fields._prettify_schemas(schemas=schemas) assert isinstance(pretty, list) for p in pretty: assert isinstance(p, str) assert "->" in p class FieldsPublic: def test_get(self, apiobj): fields = apiobj.fields.get() self.val_parsed_fields(fields=fields) def val_parsed_fields(self, fields): fields = copy.deepcopy(fields) assert isinstance(fields, dict) for adapter, schemas in fields.items(): assert not adapter.endswith("_adapter") assert isinstance(schemas, list) for schema in schemas: self.val_parsed_schema(schema=schema, adapter=adapter) def val_parsed_schema(self, schema, adapter): schema = copy.deepcopy(schema) assert isinstance(schema, dict) name = schema.pop("name") assert isinstance(name, str) and name ftype = schema.pop("type") assert isinstance(ftype, str) and ftype assert ftype in SCHEMA_TYPES fformat = schema.pop("format", "") assert isinstance(fformat, str) assert fformat in FIELD_FORMATS or fformat == "" adapter_name = schema.pop("adapter_name") assert isinstance(adapter_name, str) and adapter_name adapter_name_raw = schema.pop("adapter_name_raw") assert isinstance(adapter_name_raw, str) and adapter_name_raw adapter_prefix = schema.pop("adapter_prefix") assert isinstance(adapter_prefix, str) and adapter_prefix adapter_title = schema.pop("adapter_title") assert isinstance(adapter_title, str) and adapter_title column_name = schema.pop("column_name") assert isinstance(column_name, str) and column_name column_title = schema.pop("column_title") assert isinstance(column_title, str) and column_title name_base = schema.pop("name_base") assert isinstance(name_base, str) and name_base name_qual = schema.pop("name_qual") assert isinstance(name_qual, str) and name_qual title = schema.pop("title") assert isinstance(title, str) and title type_norm = schema.pop("type_norm") assert isinstance(type_norm, str) and type_norm parent = schema.pop("parent") assert isinstance(parent, str) and parent is_root = schema.pop("is_root") assert isinstance(is_root, bool) is_list = schema.pop("is_list") assert isinstance(is_list, bool) selectable = schema.pop("selectable", False) assert isinstance(selectable, bool) description = schema.pop("description", "") assert isinstance(description, str) sort = schema.pop("sort", False) assert isinstance(sort, bool) unique = schema.pop("unique", False) assert isinstance(unique, bool) branched = schema.pop("branched", False) assert isinstance(branched, bool) dynamic = schema.pop("dynamic", False) assert isinstance(dynamic, bool) is_complex = schema.pop("is_complex") assert isinstance(is_complex, bool) enums = schema.pop("enum", []) assert isinstance(enums, list) is_agg = schema.pop("is_agg") assert isinstance(is_agg, bool) is_all = schema.pop("is_all") assert isinstance(is_all, bool) is_details = schema.pop("is_details") assert isinstance(is_details, bool) expr_field_type = schema.pop("expr_field_type") assert isinstance(expr_field_type, str) for enum in enums: assert isinstance(enum, str) or isinstance(enum, int) sub_fields = schema.pop("sub_fields", []) assert isinstance(sub_fields, list) items = schema.pop("items", {}) assert isinstance(items, dict) if is_complex: if name != "all": assert sub_fields for sub_field in sub_fields: self.val_parsed_schema(adapter=f"{adapter}:{name}", schema=sub_field) else: dynamic = items.pop("dynamic", False) assert isinstance(dynamic, bool) iformat = items.pop("format", "") assert isinstance(iformat, str) assert iformat in SCHEMA_FIELD_FORMATS or iformat == "" itype = items.pop("type", "") assert isinstance(itype, str) assert itype in SCHEMA_TYPES or itype == "" val_source(obj=items) enums = items.pop("enum", []) assert isinstance(enums, list) for enum in enums: assert isinstance(enum, str) or isinstance(enum, int) assert not items assert not schema, list(schema) def test_get_adapter_name(self, apiobj): search = AGG_ADAPTER_ALTS[0] exp = AGG_ADAPTER_NAME adapter = apiobj.fields.get_adapter_name(value=search) assert adapter == exp def test_get_adapter_name_error(self, apiobj): search = "badwolf" with pytest.raises(NotFoundError): apiobj.fields.get_adapter_name(value=search) def test_get_adapter_names_single(self, apiobj): search = AGG_ADAPTER_ALTS[0] exp = [AGG_ADAPTER_NAME] adapters = apiobj.fields.get_adapter_names(value=search) assert adapters == exp def test_get_adapter_names_multi(self, apiobj): search = "a" adapters = apiobj.fields.get_adapter_names(value=search) assert AGG_ADAPTER_NAME in adapters assert len(adapters) > 1 def test_get_adapter_names_error(self, apiobj): search = "badwolf" with pytest.raises(NotFoundError): apiobj.fields.get_adapter_names(value=search) def test_get_field_schema(self, apiobj): search = "last_seen" schemas = get_schemas(apiobj=apiobj) exp = [x for x in schemas if x["name_base"] == search][0] result = apiobj.fields.get_field_schema(value=search, schemas=schemas) assert exp == result def test_get_field_names_re(self, apiobj): search = ["seen"] result = apiobj.fields.get_field_names_re(value=search) assert "specific_data.data.last_seen" in result def test_get_field_names_eq(self, apiobj): search = ["specific_data.data.id", "last_seen"] exp = [] schemas = get_schemas(apiobj=apiobj) for i in search: exp += [x["name_qual"] for x in schemas if x["name_base"] == i or x["name_qual"] == i] result = apiobj.fields.get_field_names_eq(value=search) assert exp == result def test_get_field_schemas(self, apiobj): # schemas = get_schemas(apiobj=apiobj) search = "l" result = [ x["name_qual"] for x in apiobj.fields.get_field_schemas( value=search, schemas=get_schemas(apiobj=apiobj) ) ] assert len(result) >= 1 def test_get_field_schema_error(self, apiobj): search = "badwolf" schemas = get_schemas(apiobj=apiobj) with pytest.raises(NotFoundError): apiobj.fields.get_field_schema(value=search, schemas=schemas) @pytest.mark.parametrize( "test_data", [ (f"{AGG_ADAPTER_NAME}:host", (AGG_ADAPTER_NAME, ["host"])), ( f"{AGG_ADAPTER_NAME}:host, ip, other", (AGG_ADAPTER_NAME, ["host", "ip", "other"]), ), ("host, ip, other", (AGG_ADAPTER_NAME, ["host", "ip", "other"])), ("adapter1:host, ip, other", ("adapter1", ["host", "ip", "other"])), (":host", (AGG_ADAPTER_NAME, ["host"])), ], scope="class", ) def test_split_search(self, apiobj, test_data): search, exp = test_data result = apiobj.fields.split_search(value=search) assert result == exp def test_split_search_adapter_specific(self, apiobj): exp = ("tanium_asset", ["adapters_data.tanium_asset_adapter.installed_software"]) search = "adapters_data.tanium_asset_adapter.installed_software" result = apiobj.fields.split_search(value=search) assert result == exp @pytest.mark.parametrize( "test_data", [ (f"{AGG_ADAPTER_NAME}:host", [(AGG_ADAPTER_NAME, ["host"])]), ( f"{AGG_ADAPTER_NAME}:host, ip, other", [(AGG_ADAPTER_NAME, ["host", "ip", "other"])], ), ("host, ip, other", [(AGG_ADAPTER_NAME, ["host", "ip", "other"])]), ("adapter1:host, ip, other", [("adapter1", ["host", "ip", "other"])]), ( [f"{AGG_ADAPTER_NAME}:host", "adapter1:host, ip, other"], [(AGG_ADAPTER_NAME, ["host"]), ("adapter1", ["host", "ip", "other"])], ), ], scope="class", ) def test_split_searches(self, apiobj, test_data): searches, exp = test_data result = apiobj.fields.split_searches(value=searches) assert result == exp def test_split_search_error(self, apiobj): search = f"{AGG_ADAPTER_NAME}:" with pytest.raises(ApiError): apiobj.fields.split_search(value=search) def test_get_field_name_manual(self, apiobj): search = "test" result = apiobj.fields.get_field_name( value=search, field_manual=True, ) assert search == result def test_get_field_name_error(self, apiobj): search = "bad,wolf" with pytest.raises(ApiError): apiobj.fields.get_field_name(value=search) def test_get_field_name(self, apiobj): search = "last_seen" exp = "specific_data.data.last_seen" result = apiobj.fields.get_field_name(value=search) assert result == exp def test_validate(self, apiobj): exp = apiobj.fields_default + [ "specific_data.data", "specific_data.data.first_fetch_time", ] result = apiobj.fields.validate( fields=["last_seen"], fields_regex=["^first_fetch_time$"], fields_manual=["specific_data.data"], fields_default=True, ) assert result == exp def test_validate_root(self, apiobj): result = apiobj.fields.validate(fields_root="agg") assert len(result) > 1 def test_validate_defaults(self, apiobj): exp = apiobj.fields_default result = apiobj.fields.validate() assert exp == result def test_validate_fuzzy(self, apiobj): result = apiobj.fields.validate(fields_fuzzy="last seen", fields_default=False) assert "specific_data.data.last_seen" in result def test_validate_error(self, apiobj): with pytest.raises(ApiError): apiobj.fields.validate(fields_default=False) def test_fuzzy_filter_contains(self, apiobj): schemas = apiobj.fields.get()["agg"] matches = apiobj.fields.fuzzy_filter(search="last", schemas=schemas, names=True) assert isinstance(matches, list) and matches for x in matches: assert isinstance(x, str) assert len(matches) > 1 assert "specific_data.data.last_seen" in matches def test_fuzzy_filter_token(self, apiobj): schemas = apiobj.fields.get()["agg"] matches = apiobj.fields.fuzzy_filter(search="last seen", schemas=schemas, names=True) assert isinstance(matches, list) and matches for x in matches: assert isinstance(x, str) assert len(matches) > 1 assert "specific_data.data.last_seen" in matches def test_fuzzy_filter_partial(self, apiobj): schemas = apiobj.fields.get()["agg"] matches = apiobj.fields.fuzzy_filter(search="bd", schemas=schemas, names=True) assert isinstance(matches,
<reponame>FZJ-IEK3-VSA/HiSim # Generic/Built-in import datetime import math import os import numpy as np import matplotlib.pyplot as plt import pandas as pd import pvlib from dataclasses_json import dataclass_json from typing import Optional from dataclasses import dataclass from functools import lru_cache from hisim.simulationparameters import SimulationParameters # Owned from hisim import component as cp from hisim import loadtypes as lt from hisim import utils from hisim import log from hisim.components.weather import Weather __authors__ = "<NAME>" __copyright__ = "Copyright 2021, the House Infrastructure Project" __credits__ = ["<NAME>"] __license__ = "MIT" __version__ = "0.1" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "development" """ The functions cited in this module are at some degree based on the tsib project: [tsib-kotzur]: <NAME>, <NAME>, and <NAME>. Future grid load of the residential building sector. No. RWTH-2018-231872. Lehrstuhl für Brennstoffzellen (FZ Jülich), 2019. ID: http://hdl.handle.net/2128/21115 http://nbn-resolving.org/resolver?verb=redirect&identifier=urn:nbn:de:0001-2019020614 The implementation of the tsib project can be found under the following repository: https://github.com/FZJ-IEK3-VSA/tsib """ temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"] @lru_cache(maxsize=16) def simPhotovoltaicFast( dni_extra=None, DNI=None, DHI=None, GHI=None, azimuth=None, apparent_zenith=None, temperature=None, wind_speed=None, surface_azimuth : float = 180, surface_tilt : float = 30 ): """ Simulates a defined PV array with the Sandia PV Array Performance Model. The implementation is done in accordance with following tutorial: https://github.com/pvlib/pvlib-python/blob/master/docs/tutorials/tmy_to_power.ipynb Parameters ---------- surface_tilt: int or float, optional (default:30) Tilt angle of of the array in degree. surface_azimuth: int or float, optional (default:180) Azimuth angle of of the array in degree. 180 degree means south, 90 degree east and 270 west. losses: float, optional (default: 0.1) Losses due to soiling, mismatch, diode connections, dc wiring etc. Returns -------- """ poa_irrad = pvlib.irradiance.get_total_irradiance( surface_tilt, surface_azimuth, apparent_zenith, azimuth, DNI, GHI, DHI, dni_extra ) pvtemps = pvlib.temperature.sapm_cell(poa_irrad["poa_global"], temperature, wind_speed, **temp_model) pv_dc = pvlib.pvsystem.pvwatts_dc( poa_irrad[ "poa_global" ], temp_cell = pvtemps, pdc0 = 1, gamma_pdc = -0.002, temp_ref = 25.0 ) if math.isnan(pv_dc): pv_dc = 0 return pv_dc def simPhotovoltaicSimple( dni_extra=None, DNI=None, DHI=None, GHI=None, azimuth=None, apparent_zenith=None, temperature=None, wind_speed=None, surface_tilt=30, surface_azimuth=180, albedo=0.2): """ Simulates a defined PV array with the Sandia PV Array Performance Model. The implementation is done in accordance with following tutorial: https://github.com/pvlib/pvlib-python/blob/master/docs/tutorials/tmy_to_power.ipynb Based on the tsib project @[tsib-kotzur] (Check header) Parameters ---------- tmy_data: pandas.DataFrame(), required Weatherfile in the format of a tmy file. surface_tilt: int or float, optional (default:30) Tilt angle of of the array in degree. surface_azimuth: int or float, optional (default:180) Azimuth angle of of the array in degree. 180 degree means south, 90 degree east and 270 west. albedo: float, optional (default: 0.2) Reflection coefficient of the surrounding area. losses: float, optional (default: 0.1) Losses due to soiling, mismatch, diode connections, dc wiring etc. load_module_data: Boolean, optional (default: False) If True the module data base is loaded from the Sandia Website. Otherwise it is loaded from this relative path '\\profiles\\PV-Modules\\sandia_modules.csv'. module_name: str, optional (default:'Hanwha_HSL60P6_PA_4_250T__2013_') Module name. The string must be existens in Sandia Module database. integrateInverter: bool, optional (default: True) If an inverter shall be added to the simulation, providing the photovoltaic output after the inverter. inverter_name: str, optional (default: 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_') Type of inverter. Returns -------- """ # automatic pd time series in future pvlib version # calculate airmass airmass = pvlib.atmosphere.get_relative_airmass(apparent_zenith) # use perez model to calculate the plane of array diffuse sky radiation poa_sky_diffuse = pvlib.irradiance.perez( surface_tilt, surface_azimuth, DHI, np.float64(DNI), dni_extra, apparent_zenith, azimuth, airmass, ) # calculate ground diffuse with specified albedo poa_ground_diffuse = pvlib.irradiance.get_ground_diffuse( surface_tilt, GHI, albedo=albedo ) # calculate angle of incidence aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth, apparent_zenith, azimuth) # calculate plane of array irradiance poa_irrad = pvlib.irradiance.poa_components(aoi, np.float64(DNI), poa_sky_diffuse, poa_ground_diffuse) # calculate pv cell and module temperature temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"] pvtemps = pvlib.temperature.sapm_cell(poa_irrad["poa_global"], temperature, wind_speed, **temp_model) pv_dc = pvlib.pvsystem.pvwatts_dc(poa_irrad["poa_global"], temp_cell=pvtemps, pdc0=1, gamma_pdc=-0.002, temp_ref=25.0) if math.isnan(pv_dc): pv_dc = 0 return pv_dc @dataclass_json @dataclass class PVSystemConfig: parameter_string: str time: int location: str module_name:str integrate_inverter: bool inverter_name:str power: float def __init__(self, my_simulation_parameters: SimulationParameters, time:int, location:str, power:float, module_name:str, integrate_inverter:bool, inverter_name:str ): self.parameter_string = my_simulation_parameters.get_unique_key() self.time = time self.location = location self.module_name = module_name self.integrate_inverter = integrate_inverter self.inverter_name = inverter_name self.power = power class PVSystem(cp.Component): """ Parameters: ----------------------------------------------------- time: simulation timeline location: Location object Location with temperature and solar data power: float Power in kWp to be provided by the PV System Returns: ----------------------------------------------------- pass """ # Inputs TemperatureOutside = "TemperatureOutside" DirectNormalIrradiance = "DirectNormalIrradiance" DirectNormalIrradianceExtra = "DirectNormalIrradianceExtra" DiffuseHorizontalIrradiance = "DiffuseHorizontalIrradiance" GlobalHorizontalIrradiance = "GlobalHorizontalIrradiance" Azimuth = "Azimuth" ApparentZenith = "ApparentZenith" WindSpeed = "WindSpeed" # Outputs ElectricityOutput = "ElectricityOutput" #Forecasts PV_Forecast_24h = "PV_Forecast_24h" # Similar components to connect to: # 1. Weather @utils.measure_execution_time def __init__(self, my_simulation_parameters: SimulationParameters, my_simulation_repository : Optional[ cp.SimRepository ] = None, time : int = 2019, location : str = "Aachen", power : float = 10E3, load_module_data : bool = False, module_name : str = "Hanwha_HSL60P6_PA_4_250T__2013_", integrateInverter : bool = True, inverter_name : str = "ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_" ): super().__init__( "PVSystem", my_simulation_parameters = my_simulation_parameters ) self.pvconfig = PVSystemConfig(my_simulation_parameters=my_simulation_parameters, location=location, power = power, module_name=module_name, integrate_inverter=integrateInverter, inverter_name=inverter_name, time=time) self.build( load_module_data, my_simulation_repository ) self.t_outC : cp.ComponentInput = self.add_input(self.ComponentName, self.TemperatureOutside, lt.LoadTypes.Temperature, lt.Units.Celsius, True) self.DNIC : cp.ComponentInput = self.add_input(self.ComponentName, self.DirectNormalIrradiance, lt.LoadTypes.Irradiance, lt.Units.Wm2, True) self.DNIextraC : cp.ComponentInput = self.add_input(self.ComponentName, self.DirectNormalIrradianceExtra, lt.LoadTypes.Irradiance, lt.Units.Wm2, True) self.DHIC: cp.ComponentInput = self.add_input(self.ComponentName, self.DiffuseHorizontalIrradiance, lt.LoadTypes.Irradiance, lt.Units.Wm2, True) self.GHIC: cp.ComponentInput = self.add_input(self.ComponentName, self.GlobalHorizontalIrradiance, lt.LoadTypes.Irradiance, lt.Units.Wm2, True) self.azimuthC : cp.ComponentInput = self.add_input(self.ComponentName, self.Azimuth, lt.LoadTypes.Any, lt.Units.Degrees, True) self.apparent_zenithC : cp.ComponentInput = self.add_input(self.ComponentName, self.ApparentZenith, lt.LoadTypes.Any, lt.Units.Degrees, True) self.wind_speedC: cp.ComponentInput = self.add_input(self.ComponentName, self.WindSpeed, lt.LoadTypes.Speed, lt.Units.MeterPerSecond, True) self.electricity_outputC : cp.ComponentOutput = self.add_output(self.ComponentName, PVSystem.ElectricityOutput, lt.LoadTypes.Electricity, lt.Units.Watt, False) self.add_default_connections(Weather, self.get_weather_default_connections()) def get_weather_default_connections(self): log.information("setting weather default connections") connections = [] weather_classname = Weather.get_classname() connections.append(cp.ComponentConnection(PVSystem.TemperatureOutside,weather_classname, Weather.TemperatureOutside)) connections.append(cp.ComponentConnection(PVSystem.DirectNormalIrradiance,weather_classname, Weather.DirectNormalIrradiance)) connections.append(cp.ComponentConnection(PVSystem.DirectNormalIrradianceExtra,weather_classname, Weather.DirectNormalIrradianceExtra)) connections.append(cp.ComponentConnection(PVSystem.DiffuseHorizontalIrradiance,weather_classname, Weather.DiffuseHorizontalIrradiance)) connections.append(cp.ComponentConnection(PVSystem.GlobalHorizontalIrradiance,weather_classname, Weather.GlobalHorizontalIrradiance)) connections.append(cp.ComponentConnection(PVSystem.Azimuth,weather_classname, Weather.Azimuth)) connections.append(cp.ComponentConnection(PVSystem.ApparentZenith,weather_classname, Weather.ApparentZenith)) connections.append(cp.ComponentConnection(PVSystem.WindSpeed,weather_classname, Weather.WindSpeed)) return connections def i_restore_state(self): pass def write_to_report(self): lines = [] lines.append("Name: {}".format(self.ComponentName)) lines.append("Power: {:3.0f} kWp".format(self.pvconfig.power*1E-3)) lines.append("Module: {}".format(self.pvconfig.module_name)) lines.append("Inverter: {}".format(self.pvconfig.inverter_name)) return lines def i_simulate(self, timestep: int, stsv: cp.SingleTimeStepValues, force_convergence: bool): if hasattr(self, "output"): #if(len(self.output) < timestep) # raise Exception("Somehow the precalculated list of values for the PV system seems to be incorrect. Please delete the cache.") stsv.set_output_value(self.electricity_outputC, self.output[timestep] * self.pvconfig.power) else: DNI = stsv.get_input_value(self.DNIC) dni_extra = stsv.get_input_value(self.DNIextraC) DHI = stsv.get_input_value(self.DHIC) GHI = stsv.get_input_value(self.GHIC) azimuth = stsv.get_input_value(self.azimuthC) temperature = stsv.get_input_value(self.t_outC) wind_speed = stsv.get_input_value(self.wind_speedC) apparent_zenith = stsv.get_input_value(self.apparent_zenithC) #ac_power = self.simPhotovoltaic2(dni_extra=dni_extra, # DNI=DNI, # DHI=DHI, # GHI=GHI, # azimuth=azimuth, # apparent_zenith=apparent_zenith, # temperature=temperature, # wind_speed=wind_speed) #ac_power = simPhotovoltaicSimple( # dni_extra=dni_extra, # DNI=DNI, # DHI=DHI, # GHI=GHI, # azimuth=azimuth, # apparent_zenith=apparent_zenith, # temperature=temperature, # wind_speed=wind_speed) ac_power = simPhotovoltaicFast( dni_extra=dni_extra, DNI=DNI, DHI=DHI, GHI=GHI, azimuth=azimuth, apparent_zenith=apparent_zenith, temperature=temperature, wind_speed=wind_speed) resultingvalue = ac_power * self.pvconfig.power # if you wanted to access the temperature forecast from the weather component: # val = self.simulation_repository.get_entry(Weather.Weather_Temperature_Forecast_24h) stsv.set_output_value(self.electricity_outputC, resultingvalue) self.data[timestep] = ac_power if timestep + 1 == self.data_length: database = pd.DataFrame(self.data, columns=["output"]) database.to_csv(self.cache_filepath, sep=",", decimal=".", index=False) if self.my_simulation_parameters.system_config.predictive == True: last_forecast_timestep = int( timestep + 24 * 3600 / self.my_simulation_parameters.seconds_per_timestep ) if ( last_forecast_timestep > len( self.output ) ): last_forecast_timestep = len( self.output ) pvforecast = [ self.output[ t ] * self.pvconfig.power for t in range( timestep, last_forecast_timestep ) ] self.simulation_repository.set_entry( self.PV_Forecast_24h, pvforecast ) def get_coordinates(self, location="Aachen", year=2019): """ Reads a test reference year file and gets the GHI, DHI and DNI from it. Based on the tsib project @[tsib-kotzur] (Check header) Parameters ------- try_num: int (default: 4) The region number of the test reference year. year: int (default: 2010) The year. Only data for 2010 and 2030 available """ # get the correct file path filepath = os.path.join(utils.HISIMPATH["weather"][location]) # get the geoposition with open(filepath + ".dat", encoding="utf-8") as fp: lines = fp.readlines() location_name = lines[0].split(maxsplit=2)[2].replace('\n', '') lat = float(lines[1][20:37]) lon = float(lines[2][15:30]) self.location = {"name": location_name, "latitude": lat, "longitude": lon} self.index = pd.date_range( "{}-01-01 00:00:00".format(year), periods=60*24*365, freq="T", tz="Europe/Berlin" ) def i_save_state(self): pass def i_doublecheck(self, timestep: int, stsv: cp.SingleTimeStepValues): pass def build( self, load_module_data : bool, my_simulation_repository : Optional[ cp.SimRepository ] ): log.information(self.pvconfig.to_json()) # type: ignore file_exists, self.cache_filepath = utils.get_cache_file("PVSystem", self.pvconfig) if file_exists: self.output = pd.read_csv(self.cache_filepath, sep=',', decimal='.')['output'].tolist() if len(self.output) != self.my_simulation_parameters.timesteps: raise Exception("Reading the cached PV values seems to have failed. Expected " + str(self.my_simulation_parameters.timesteps) + " values, but got " + str(len(self.output ))) else: self.get_coordinates(location = self.pvconfig.location, year = self.pvconfig.time) # Factor to guarantee peak power based on module with 250 Wh
an ancestor - used to find the ancestor country location. """ country = current.gis.get_parent_country(id) s3db = current.s3db table = s3db.gis_hierarchy fieldname = "edit_%s" % level # Read the system default query = (table.uuid == "SITE_DEFAULT") if country: # Try the Location's Country, but ensure we have the fallback available in a single query query |= (table.location_id == country) limitby = (0, 2) else: limitby = (0, 1) rows = current.db(query).select(table[fieldname], table.uuid, limitby=limitby, cache=s3db.cache) if len(rows) > 1: # Remove the Site Default filter = lambda row: row.uuid == "SITE_DEFAULT" rows.exclude(filter) row = rows.first() editable = row[fieldname] return editable # ============================================================================= def gis_location_filter(r): """ Filter resources to those for a specified location @ToDo: Migrate to Context """ lfilter = current.session.s3.location_filter if not lfilter: return db = current.db s3db = current.s3db gtable = s3db.gis_location query = (gtable.id == lfilter) row = db(query).select(gtable.id, gtable.name, gtable.level, gtable.path, limitby=(0, 1)).first() if row and row.level: resource = r.resource if resource.name == "organisation": selector = "organisation.country" if row.level != "L0": code = current.gis.get_parent_country(row, key_type="code") else: ttable = s3db.gis_location_tag query = (ttable.tag == "ISO2") & \ (ttable.location_id == row.id) tag = db(query).select(ttable.value, limitby=(0, 1)).first() code = tag.value filter = (FS(selector) == code) elif resource.name == "project": # Go via project_location link table selector = "location.location_id$%s" % row.level filter = (FS(selector) == row.name) else: # Normal case: resource with location_id selector = "%s.location_id$%s" % (resource.name, row.level) filter = (FS(selector) == row.name) resource.add_filter(filter) # ============================================================================= class gis_LocationRepresent(S3Represent): """ Representation of Locations """ def __init__(self, show_link = False, multiple = False, address_only = False, sep = None, show_name = False, # Show name in location for level==None when sep is used ): settings = current.deployment_settings # Translation uses gis_location_name & not T() translate = settings.get_L10n_translate_gis_location() language = current.session.s3.language if language == settings.get_L10n_default_language(): translate = False # Iframe height(Link) self.iheight = settings.get_gis_map_selector_height() address_only = address_only or \ settings.get_gis_location_represent_address_only() show_marker_icon = True if address_only == "icon" else False self.address_only = address_only self.show_marker_icon = show_marker_icon self.sep = sep if sep: self.multi_country = len(settings.get_gis_countries()) != 1 self.show_name = show_name super(gis_LocationRepresent, self).__init__(lookup="gis_location", show_link=show_link, translate=translate, multiple=multiple) # ------------------------------------------------------------------------- @staticmethod def link(k, v, row=None): """ Represent a (key, value) as hypertext link. @param k: the key @param v: the representation of the key @param row: the row with this key (unused here) """ if k is None: return "-" settings = current.deployment_settings iheight = settings.get_gis_map_selector_height() popup = settings.get_gis_popup_location_link() return A(v, _style="cursor:pointer;cursor:hand", _onclick="s3_viewMap(%i,%i,'%s');return false" % (k, iheight, popup), ) # ------------------------------------------------------------------------- @staticmethod def lat_lon_format(coord): """ Represent a coordinate (latitude or longitude) according to a format provided from deployment_settings. """ degrees = abs(coord) minutes = (degrees - int(degrees)) * 60 seconds = (minutes - int(minutes)) * 60 # truncate (floor) degrees and minutes degrees, minutes = (int(coord), int(minutes)) format = current.deployment_settings.get_L10n_lat_lon_format() formatted = format.replace("%d", "%d" % degrees) \ .replace("%m", "%d" % minutes) \ .replace("%s", "%lf" % seconds) \ .replace("%f", "%lf" % coord) return formatted # ------------------------------------------------------------------------- def lat_lon_represent(self, row): lat = row.lat lon = row.lon if lat is not None and lon is not None: if lat > 0: lat_suffix = "N" else: lat_suffix = "S" lat = -lat if lon > 0: lon_suffix = "E" else: lon_suffix = "W" lon = -lon text = "%s %s, %s %s" % (self.lat_lon_format(lat), lat_suffix, self.lat_lon_format(lon), lon_suffix) return text # ------------------------------------------------------------------------- def lookup_rows(self, key, values, fields=None): """ Custom lookup method for Location(GIS) rows.Parameters key and fields are not used, but are kept for API compatiblity reasons. @param values: the gis_location IDs """ db = current.db s3db = current.s3db ltable = s3db.gis_location table = s3db.gis_location_name count = len(values) sep = self.sep translate = self.translate # Initialized here to keep the init lightweight fields = [ltable.id, ltable.name, ltable.level, ltable.path, ltable.L0, ltable.L1, ltable.L2, ltable.L3, ltable.L4, ltable.L5, ] if sep: # Separator to place between all elements in the hierarchy gis_fields = fields elif self.address_only and not self.show_marker_icon: gis_fields = fields + [ltable.parent, ltable.addr_street, ltable.addr_postcode, ] else: gis_fields = fields + [ltable.parent, ltable.addr_street, ltable.addr_postcode, ltable.inherited, ltable.lat, ltable.lon, ] if count == 1: query = (ltable.id == values[0]) else: query = (ltable.id.belongs(values)) rows = db(query).select(limitby = (0, count), *gis_fields) location_ids = [] paths = self.paths = {} if sep or translate: for row in rows: path = row.path if not path: path = current.gis.update_location_tree(row) split_path = path.split("/") paths[row.id] = split_path location_ids += split_path location_ids = set(location_ids) if translate: query = (table.deleted == False) & \ (table.language == current.session.s3.language) count = len(location_ids) if count == 1: query &= (table.location_id == row.id) else: query &= (table.location_id.belongs(location_ids)) self.l10n = db(query).select(table.location_id, table.name_l10n, limitby = (0, count), ).as_dict(key="location_id") return rows # ------------------------------------------------------------------------- def alt_represent_row(self, row): """ Different Entry point for S3LocationSelector(intends to use represent_row) - Lookup L10n, path - then call represent_row """ sep = self.sep translate = self.translate self.paths = {} if sep or translate: path = row.path if not path: path = current.gis.update_location_tree(row) split_path = path.split("/") self.paths[row.id] = split_path location_ids = set(split_path) # Lookup l10n if translate: table = current.s3db.gis_location_name query = (table.deleted == False) & \ (table.language == current.session.s3.language) count = len(location_ids) if count == 1: query &= (table.location_id == row.id) else: query &= (table.location_id.belongs(location_ids)) self.l10n = current.db(query).select(table.location_id, table.name_l10n, limitby = (0, count), ).as_dict(key="location_id") return self.represent_row(row) # ------------------------------------------------------------------------- def represent_row(self, row): """ Represent a single Row - assumes that Path & Lx have been populated correctly by gis.update_location_tree() @param row: the gis_location Row """ sep = self.sep translate = self.translate ids = self.paths.get(row.id) if translate: l10n = self.l10n loc = l10n.get(row.id) if loc: name = loc["name_l10n"] else: name = row.name or "" else: name = row.name or "" level = row.level if sep: if level == "L0": return name # Remove the last ID as this is 'name' ids.pop() if self.show_name or level is not None: locations = [name] else: locations = [] lappend = locations.append L5 = row.L5 if L5 and level != "L5": if translate: loc = l10n.get(int(ids.pop()), None) if loc: L5 = loc["name_l10n"] lappend(L5) L4 = row.L4 if L4 and level != "L4": if translate: loc = l10n.get(int(ids.pop()), None) if loc: L4 = loc["name_l10n"] lappend(L4) L3 = row.L3 if L3 and level != "L3": if translate: loc = l10n.get(int(ids.pop()), None) if loc: L3 = loc["name_l10n"] lappend(L3) L2 = row.L2 if L2 and level != "L2": if translate: loc = l10n.get(int(ids.pop()), None) if loc: L2 = loc["name_l10n"] lappend(L2) L1 = row.L1 if L1 and level != "L1": if translate: loc = l10n.get(int(ids.pop()), None) if loc: L1 = loc["name_l10n"] lappend(L1) if self.multi_country: L0 = row.L0 if L0: if translate: loc = l10n.get(int(ids.pop()), None) if loc: L0 = loc["name_l10n"] lappend(L0) if locations: represent = sep.join(locations) else: # Fallback to name even if show_name is False represent = name else: # @ToDo: Support translate=True if level == "L0": represent = "%s (%s)" % (name, current.messages.COUNTRY) elif level in ("L1", "L2", "L3", "L4", "L5"): # Lookup the hierarchy for labels s3db = current.s3db htable = s3db.gis_hierarchy L0_name = row.L0 if L0_name: path = row.path.split("/") L0_id = path[0] level_name = current.gis.get_location_hierarchy(level, L0_id) else: # Fallback to system default level_name = current.gis.get_location_hierarchy(level) represent = name if level_name: represent = "%s (%s)" % (represent, level_name) if row.parent: parent_level = "L%s" % (int(level[1]) - 1) parent_name = row[parent_level] if parent_name: represent = "%s, %s" % (represent, parent_name) else: # Specific location: # Don't duplicate the Resource Name # Street address or lat/lon as base has_lat_lon = row.get("lat") is not None and \ row.get("lon") is not None represent = "" if row.addr_street: # Get the 1st line of the street address. represent = row.addr_street.splitlines()[0] if row.addr_postcode: represent = "%s, %s" % (represent, row.addr_postcode) if (not represent) and \ (not self.address_only) and \ (row.inherited == False) and \ has_lat_lon: represent = self.lat_lon_represent(row) if row.parent: if row.path: path = row.path else: # Not yet been built, so do it now path = current.gis.update_location_tree(row) path = path.split("/") path_len = len(path) if path_len > 1:
<gh_stars>0 import argparse import numpy as np import pandas as pd import scipy.stats as stats from sklearn.ensemble import RandomForestClassifier #from sklearn.mixture import GaussianMixture from sklearn.mixture import GMM from statsmodels.sandbox.stats.multicomp import fdrcorrection0 from pyemd import emd import Features as eff def main(): parser = argparse.ArgumentParser(description="Calculate difference features between two fractionation experiments") parser.add_argument("--elution_files", action="store", nargs='+', dest="elution_files", required=True, help="Elution files (.elut)") parser.add_argument("--features", action="store", nargs='+', dest="features", required=False, default=['diffrac'], help="Features to calculate: diffrac (L1-norm of difference) diffrac_percent diffrac_normalized pearsonr poisson mean_abundance emd zscore sliding_zscore fdr_correct sliding_fdr_correct") parser.add_argument("--annotated_list", action="store", dest="annotated_list", required=False, default=None, help="Filename of annotated ids, used for calculating zscores from compliment of list, default=None") parser.add_argument("--contaminate_tag", action="store", dest="contaminate_tag", required=False, default='CONTAMINANT', help="Filters entries with tag, default=CONTAMINANT") parser.add_argument("--use_gmm", action="store_true", dest="use_gmm", required=False, default=False, help="Fit sliding window distributions to Gaussian Mixture Model and use largest gaussian for calculating zscore, default=False") parser.add_argument("--log_transform", action="store_true", dest="log_transform", required=False, default=False, help="Use the log transform of the diffrac score to calculate sliding zscore, default=False") parser.add_argument("--window_size", action="store", type=int, dest="window_size", required=False, default=100, help="Window size to use for calculating sliding zscore, default=100") parser.add_argument("--output_file", action="store", dest="out_filename", required=False, default=None, help="Filename of output file, default=None which prints to stdout") args = parser.parse_args() elutions = [] for efile in args.elution_files: elut = eff.Elut() elut.load(efile,format='tsv') elut.threshold(thresh=1) elutions.append(elut) feature_df = pd.DataFrame() if len(elutions) >= 2: if 'diffrac' in args.features: feature_series = calc_diffrac(elutions[0], elutions[1], normalize_totalCounts=False) feature_series.name = 'diffrac' feature_df = join_feature(feature_df,feature_series) if 'diffrac_percent' in args.features: feature_series = calc_diffrac(elutions[0], elutions[1], percent_totalCounts=True) feature_series.name = 'diffrac_percent' feature_df = join_feature(feature_df,feature_series) if 'diffrac_normalized' in args.features: feature_series = calc_diffrac(elutions[0], elutions[1], normalize_totalCounts=True) feature_series.name = 'diffrac_normalized' feature_df = join_feature(feature_df,feature_series) if 'emd' in args.features: feature_series = calc_emd(elutions[0], elutions[1]) feature_series.name = 'emd' feature_df = join_feature(feature_df,feature_series) if 'pearsonr' in args.features: feature_series = calc_correlation(elutions[0], elutions[1], correlation_func=lambda x,y: stats.pearsonr(x,y)[0]) feature_series.name = 'pearsonr' feature_df = join_feature(feature_df,feature_series) if 'poisson' in args.features: print("WARNING: poisson not implemented") #feature_series = calc_correlation(elutions[0], elutions[1]) #feature_series.name = 'poisson' #feature_df = join_feature(feature_df,feature_series) if 'mean_abundance' in args.features: feature_series = calc_mean_abundance(elutions[0], elutions[1]) feature_series.name = 'mean_abundance' feature_df = join_feature(feature_df,feature_series) if args.annotated_list != None: #kdrew: add in training labels annotated_df = pd.read_table(args.annotated_list, header=None, names=['annotated']) annotated = [i in annotated_df['annotated'].values for i in feature_df.index] feature_df['annotated'] = annotated print len(feature_df) try: feature_df = feature_df[~feature_df.index.str.contains('CONTAMINANT')] except AttributeError: print "No contaminants" print len(feature_df) if 'zscore' in args.features: if 'diffrac_normalized' not in args.features: #kdrew: calculating diffrac_normalized feature_series = calc_diffrac(elutions[0], elutions[1], normalize_totalCounts=False) feature_series.name = 'diffrac' feature_df = join_feature(feature_df,feature_series) feature_series = calc_zscore(feature_df) feature_series.name = 'zscore' feature_df = join_feature(feature_df,feature_series) if 'sliding_zscore' in args.features: feature_series = calc_sliding_zscore(feature_df, window=args.window_size, use_gmm=args.use_gmm, log_transform=args.log_transform) feature_series.name = 'sliding_zscore' feature_df = join_feature(feature_df,feature_series) if 'fdr_correct' in args.features: fdr_df = calc_fdr_correct(feature_df) feature_df = join_feature(feature_df,fdr_df) if 'sliding_fdr_correct' in args.features: sliding_fdr_df = calc_sliding_fdr_correct(feature_df) feature_df = join_feature(feature_df, sliding_fdr_df) if args.out_filename != None: feature_df.sort_values(args.features[0], ascending=False).to_csv(args.out_filename) else: print feature_df.sort_values(args.features[0], ascending=False) def join_feature(df,feature): return df.join(feature, how='outer') def calc_diffrac(elut1, elut2, percent_totalCounts=False, normalize_totalCounts=False): #kdrew: set columns to be the same, do some error checking to ensure lengths match, also if any realignment is necessary this is the place to do it. assert(len(elut2.df.columns) == len(elut1.df.columns)) elut2.df.columns = elut1.df.columns #kdrew: add empty rows for the ids in elut1 that are not in elut2 and vice versa elut1_ids = set(elut1.df.index) elut2_ids = set(elut2.df.index) #kdrew: add rows in elut1 in elut2 as 0.0 elut1_not_elut2_ids = elut1_ids - elut2_ids elut1_not_elut2 = elut1.df.loc[list(elut1_not_elut2_ids)] elut1_not_elut2[:] = 0.0 elut2.df = elut2.df.append(elut1_not_elut2) #kdrew: add rows in elut1 in elut2 as 0.0 elut2_not_elut1_ids = elut2_ids - elut1_ids elut2_not_elut1 = elut2.df.loc[list(elut2_not_elut1_ids)] elut2_not_elut1[:] = 0.0 elut1.df = elut1.df.append(elut2_not_elut1) elut_diff = elut1.df.subtract(elut2.df) diffrac_sum = np.abs(elut_diff).sum(axis='columns') #kdrew: measures how much of the total counts shifted, 1.0 total shift -> 0.0 no shift if percent_totalCounts: diffrac_sum = diffrac_sum/(elut1.df.sum(axis='columns') + elut2.df.sum(axis='columns')) elif normalize_totalCounts: diffrac_sum = diffrac_sum * diffrac_sum/(elut1.df.sum(axis='columns') + elut2.df.sum(axis='columns')) return diffrac_sum def calc_emd(elut1, elut2): #kdrew: set columns to be the same, do some error checking to ensure lengths match, also if any realignment is necessary this is the place to do it. assert(len(elut2.df.columns) == len(elut1.df.columns)) elut2.df.columns = elut1.df.columns #kdrew: add empty rows for the ids in elut1 that are not in elut2 and vice versa elut1_ids = set(elut1.df.index) elut2_ids = set(elut2.df.index) #kdrew: add rows in elut1 in elut2 as 0.0 elut1_not_elut2_ids = elut1_ids - elut2_ids elut1_not_elut2 = elut1.df.loc[list(elut1_not_elut2_ids)] elut1_not_elut2[:] = 0.0 elut2.df = elut2.df.append(elut1_not_elut2) #kdrew: add rows in elut1 in elut2 as 0.0 elut2_not_elut1_ids = elut2_ids - elut1_ids elut2_not_elut1 = elut2.df.loc[list(elut2_not_elut1_ids)] elut2_not_elut1[:] = 0.0 elut1.df = elut1.df.append(elut2_not_elut1) #kdrew: setup distance matrix, every transition costs 1.0 dmat = np.ones((len(elut2.df.columns),len(elut2.df.columns))) #kdrew: make identity transitions cost 0.0 np.fill_diagonal(dmat,0.0) emd_results = [] for idx in elut1.df.index: x = np.ascontiguousarray(elut1.df.loc[idx]) #print(x) y = np.ascontiguousarray(elut2.df.loc[idx]) #print(y) emd_result = emd(x, y, dmat) emd_results.append(emd_result) #kdrew: annoying trick to compare the two dataframes using a function #emd_result = elut1.df.apply(lambda x: emd_func(x, elut2.df, dmat), axis=1) emd_results = pd.Series(emd_results) emd_results.index = elut1.df.index return emd_results def emd_func(x, df2, dmat): print(x.values.flags) y = df2.loc[x.name] print(y) emd_result = emd(x.values, y.values, dmat) print emd_result return emd_result def calc_correlation(elut1, elut2, correlation_func=stats.pearsonr, default=0.0): intersection_ids = set(elut1.df.index).intersection(set(elut2.df.index)) union_ids = set(elut1.df.index).union(set(elut2.df.index)) correlation_dict = {uid:default for uid in union_ids} for uid in intersection_ids: pcoeff = correlation_func(elut1.df.ix[uid],elut2.df.ix[uid]) correlation_dict[uid] = pcoeff df = pd.Series(correlation_dict.values(), index=correlation_dict.keys()) return df def calc_mean_abundance(elut1, elut2): elut1_sum = elut1.df.sum(axis=1) elut2_sum = elut2.df.sum(axis=1) df = (elut1_sum.add(elut2_sum, fill_value=0.0)) / 2.0 return df def calc_zscore(feat_df): if 'annotated' in feat_df.columns: mean = np.mean(feat_df.query("~annotated")['diffrac_normalized']) std = np.std(feat_df.query("~annotated")['diffrac_normalized']) else: print "WARNING: Couldn't find column 'annotated', using all rows for distribution" mean = np.mean(feat_df['diffrac_normalized']) std = np.std(feat_df['diffrac_normalized']) df = (feat_df['diffrac_normalized'] - mean)/std return df #kdrew: min_weight_threshold : mixture model weight has to be above threshold in order to use def calc_sliding_zscore(feat_df, window=100, use_gmm=False, min_weight_threshold=0.6, log_transform=False): sliding_zscore_dict = dict() for id1 in feat_df.sort_values("mean_abundance",ascending=False).query("mean_abundance == mean_abundance").index: i_abnd = feat_df.ix[id1]['mean_abundance'] #kdrew: entries greater than current id if 'annotated' in feat_df.columns: gt_entries = feat_df.query("~annotated and (mean_abundance >= %s)" % i_abnd).sort_values('mean_abundance')['mean_abundance'] lt_entries = feat_df.query("~annotated and (mean_abundance < %s)" % i_abnd).sort_values('mean_abundance', ascending=False)['mean_abundance'] else: print "WARNING: Couldn't find column 'annotated', using all rows for distribution" gt_entries = feat_df.query("(mean_abundance >= %s)" % i_abnd).sort_values('mean_abundance')['mean_abundance'] lt_entries = feat_df.query("(mean_abundance < %s)" % i_abnd).sort_values('mean_abundance', ascending=False)['mean_abundance'] print "gt_entries" print gt_entries print "lt_entries" print lt_entries h = window j = window #kdrew: if not enough entries, adjust the other index if len(gt_entries) < h: j = j + (h - len(gt_entries)); h = len(gt_entries) if len(lt_entries) < j: h = h + (j - len(lt_entries)); j = len(lt_entries) entries = list(gt_entries.index[:h]) + list(lt_entries.index[:j]) if log_transform: diffrac_normalized_list = (feat_df.ix[entries]['diffrac_normalized'].fillna(0.0)+0.1).apply(np.log10) else: diffrac_normalized_list = feat_df.ix[entries]['diffrac_normalized'].values if use_gmm: #kdrew: probably should be careful about using GMM's interface, originally was using GaussianMixture but that only exists in newer versions of sklearn #kdrew: create two models, one with a single gaussian and one with two gaussians gmm1 = GMM(n_components=1, covariance_type='spherical').fit(diffrac_normalized_list.reshape(-1,1)) gmm2 = GMM(n_components=2, covariance_type='spherical').fit(diffrac_normalized_list.reshape(-1,1)) #kdrew: Calculate their Baysian Information Criterion which penalizes additional parameters gmm1_bic = gmm1.bic(diffrac_normalized_list.reshape(-1,1)) gmm2_bic = gmm2.bic(diffrac_normalized_list.reshape(-1,1)) print "gmm1 BIC: %s" % gmm1_bic print "gmm2 BIC: %s" % gmm2_bic print "gmm2.means_ %s" % gmm2.means_ min_mean_model = np.argmin(gmm2.means_) print "gmm2.weights_ %s" % gmm2.weights_ max_weight_model = np.argmax(gmm2.weights_) print "[np.sqrt(x) for x in gmm2.covars_] %s" % [np.sqrt(x) for x in gmm2.covars_] #kdrew: use Baysian Information Criterion for model selection #kdrew: also tests to make sure the model with the lowest mean is the dominant peak, #kdrew: also checks that the dominant peak is above some threshold of dominance (might not be necessary anymore with BIC selection but nice to have option if gmm1_bic < gmm2_bic or min_mean_model != max_weight_model or np.max(gmm2.weights_) < min_weight_threshold: print "WARNING: Two-component GMM has higher BIC than one-component GMM or \n highest weighted model does not equal lowest mean model or \n min_weight_threshold not satisfied, *not* using gaussian mixture model" mean_tmp = np.mean(diffrac_normalized_list) std_tmp = np.std(diffrac_normalized_list) else: mean_tmp = gmm2.means_[min_mean_model][0] std_tmp = np.sqrt(gmm2.covars_[min_mean_model][0]) else: mean_tmp = np.mean(diffrac_normalized_list) std_tmp = np.std(diffrac_normalized_list) print "diffrac_normalized_list %s" % diffrac_normalized_list print "id: %s mean_tmp: %s std_tmp: %s" % (id1, mean_tmp, std_tmp) if log_transform: #kdrew: add pseudo-count of 0.1 i_diffrac_normalized = np.log10(feat_df.ix[id1]['diffrac_normalized']+0.1) else: i_diffrac_normalized = feat_df.ix[id1]['diffrac_normalized'] zscore = (i_diffrac_normalized - mean_tmp)/std_tmp sliding_zscore_dict[id1] = zscore df = pd.DataFrame(sliding_zscore_dict.items(), columns=['ACC', 'sliding_zscore']).set_index('ACC') print df return df def calc_fdr_correct(feat_df, unannotated_only=False): df = feat_df.fillna(0.0) if unannotated_only: df = df[~df.annotated] #df['pvalues'] =
<filename>tests/test_georaster.py import pytest import os from tempfile import TemporaryDirectory, NamedTemporaryFile from copy import deepcopy import numpy as np from affine import Affine from rasterio.enums import Resampling, MaskFlags from unittest.mock import Mock from PIL import Image from shapely.geometry import Point, Polygon from rasterio.crs import CRS from rasterio.rpc import RPC from rasterio.errors import NotGeoreferencedWarning from rasterio.windows import Window from telluric.constants import WGS84_CRS, WEB_MERCATOR_CRS from telluric.georaster import GeoRaster2, GeoRaster2Error, GeoRaster2Warning, join, MutableGeoRaster, \ GeoRaster2IOError from telluric.vectors import GeoVector from telluric.features import GeoFeature from telluric.collections import FeatureCollection from common_for_tests import make_test_raster some_array = np.array([[0, 1, 2], [3, 4, 5]], dtype=np.uint8) some_mask = np.array([[False, False, False], [False, False, True]], dtype=bool) some_image_2d = np.ma.array(some_array, mask=some_mask) some_image_2d_alt = np.ma.array(np.array([[0, 1, 2], [3, 4, 99]], dtype=np.uint8), mask=some_mask) some_image_3d = np.ma.array(some_array[np.newaxis, :, :], mask=some_mask[np.newaxis, :, :]) some_image_3d_multiband = np.ma.array( np.array([some_array, some_array, some_array]), mask=np.array([some_mask, some_mask, some_mask])) raster_origin = Point(2, 3) some_affine = Affine.translation(raster_origin.x, raster_origin.y) some_crs = CRS({'init': 'epsg:32620'}) some_rpcs = RPC(height_off=10.0, height_scale=10.0, lat_off=30.0, lat_scale=30.0, line_den_coeff=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, -1.0, -2.0, -3.0, 6.0, 1.0, 2.0, 4.0, 5.0, 6.0, 7.0], line_num_coeff=[2.0, 5.0, 3.0, 4.0, 5.0, 9.0, 7.0, 8.0, 9.0, 10.0, -1.0, -2.0, -3.0, 6.0, 9.0, 2.0, 3.0, 5.0, 6.0, 7.0], line_off=0.0, line_scale=0.0, long_off=0.0, long_scale=0.0, samp_den_coeff=[4.0, 7.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 1.0, -2.0, -3.0, 7.0, 1.0, 2.0, 7.0, 5.0, 6.0, 7.0], samp_num_coeff=[8.0, 9.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 8.0, -2.0, -3.0, 6.0, 1.0, 5.0, 4.0, 5.0, 6.0, 7.0], samp_off=50.0, samp_scale=50.0) some_raster = GeoRaster2(some_image_2d, affine=some_affine, crs=some_crs, band_names=['r']) some_raster_alt = GeoRaster2(some_image_2d_alt, affine=some_affine, crs=some_crs, band_names=['r']) some_raster_multiband = GeoRaster2( some_image_3d_multiband, band_names=['r', 'g', 'b'], affine=some_affine, crs=some_crs) default_factors = [2, 4, 8, 16] some_float32_array = np.array([[[0.0, 0.2], [0.4, 0.6]], [[0.7, 0.8], [0.9, 1.0]]], dtype=np.float32) some_float32_raster = GeoRaster2(some_float32_array, band_names=[1, 2], affine=some_affine, crs=some_crs, nodata=None) some_raster_shrunk_mask = some_raster_multiband.copy_with( image=np.ma.array(some_raster_multiband.image.data, mask=np.ma.nomask)) def test_construction(): # test image - different formats yield identical rasters: raster_masked_2d = GeoRaster2(some_image_2d, affine=some_affine, crs=some_crs, rpcs=some_rpcs) raster_masked_3d = GeoRaster2(some_image_3d, affine=some_affine, crs=some_crs, rpcs=some_rpcs) raster_masked_array = GeoRaster2(some_array, nodata=5, affine=some_affine, crs=some_crs, rpcs=some_rpcs) assert raster_masked_2d == raster_masked_3d assert raster_masked_2d == raster_masked_array assert np.array_equal(raster_masked_2d.image, some_image_3d) assert raster_masked_2d.affine == some_affine assert raster_masked_2d.crs == some_crs assert raster_masked_2d.rpcs == some_rpcs assert raster_masked_2d.dtype == some_image_2d.dtype # test bandnames: assert GeoRaster2(some_image_2d, affine=some_affine, crs=some_crs, band_names='gray').band_names == ['gray'] assert GeoRaster2(some_image_2d, affine=some_affine, crs=some_crs, band_names=['gray']).band_names == ['gray'] assert GeoRaster2(some_image_2d, affine=some_affine, crs=some_crs).band_names == [0] with pytest.raises(GeoRaster2Error): GeoRaster2(some_image_2d, affine=some_affine, crs=some_crs, band_names=['gray', 'red']) def test_rpcs_init(): # test rpcs initilization by passing rpcs in different forms rpcs_dict = some_rpcs.to_dict() rpcs_dict = {k.upper(): v for k, v in rpcs_dict.items()} georaster1 = GeoRaster2(some_image_2d, rpcs=some_rpcs) georaster2 = GeoRaster2(some_image_2d, rpcs=rpcs_dict) assert georaster1.rpcs == georaster2.rpcs # rpcs defined as dictionary of str new_rpcs_dict = {} for k, v in rpcs_dict.items(): if isinstance(v, float): new_rpcs_dict[k] = str(v) line_num_coeff = rpcs_dict['LINE_NUM_COEFF'] new_rpcs_dict['LINE_NUM_COEFF'] = ' '.join(str(e) for e in line_num_coeff) line_den_coeff = rpcs_dict['LINE_DEN_COEFF'] new_rpcs_dict['LINE_DEN_COEFF'] = ' '.join(str(e) for e in line_den_coeff) samp_num_coeff = rpcs_dict['SAMP_NUM_COEFF'] new_rpcs_dict['SAMP_NUM_COEFF'] = ' '.join(str(e) for e in samp_num_coeff) samp_den_coeff = rpcs_dict['SAMP_DEN_COEFF'] new_rpcs_dict['SAMP_DEN_COEFF'] = ' '.join(str(e) for e in samp_den_coeff) georaster3 = GeoRaster2(some_image_2d, rpcs=new_rpcs_dict) assert georaster2.rpcs == georaster3.rpcs def test_eq(): """ test ._eq_ """ assert some_raster == some_raster assert some_raster != some_raster.copy_with(image=some_raster.image + 1) assert some_raster != some_raster.copy_with(affine=Affine.translation(42, 42)) assert some_raster != some_raster.copy_with(crs={'init': 'epsg:32621'}) # np.ma.nomask assert ( some_raster.copy_with(image=np.ma.masked_array( some_raster.image.data, mask=np.ma.nomask)) == some_raster.copy_with(image=np.ma.masked_array( some_raster.image.data, mask=np.zeros_like(some_raster.image.data, dtype=bool)))) assert ( some_raster.copy_with(image=np.ma.masked_array( some_raster.image.data, mask=np.zeros_like(some_raster.image.data, dtype=bool))) == some_raster.copy_with(image=np.ma.masked_array( some_raster.image.data, mask=np.ma.nomask))) def test_copy(): assert some_raster == some_raster.copy() assert isinstance(some_raster.copy(mutable=True), MutableGeoRaster) raster = GeoRaster2.open('./tests/data/raster/overlap1.tif') assert raster == raster.copy() # assert raster.copy().not_loaded() raster.image assert raster == raster.copy() assert not raster.copy().not_loaded() def test_eq_ignores_masked_values(): assert some_raster == some_raster_alt def test_read_write(): for extension in ['tif', 'png']: with TemporaryDirectory() as folder: path = os.path.join(folder, 'test.%s' % extension) some_raster_multiband.save(path, factors=default_factors) read = GeoRaster2.open(path) assert read == some_raster_multiband def test_read_non_georeferenced(recwarn): crs = CRS(init='epsg:3857') affine = Affine(10.0, 0.0, -6425941.63996855, 0.0, -10.0, -3169315.69478084) raster = GeoRaster2.open('tests/data/raster/no_georef.png', crs=crs, affine=affine, lazy_load=False) assert raster.crs == crs assert raster.affine == affine def test_read_write_internal_external_mask(): with TemporaryDirectory() as folder: # internal mask (default) leaves no .msk file: internal_path = os.path.join(folder, 'internal.tif') some_raster_multiband.save(internal_path, factors=default_factors) assert not os.path.exists(internal_path + '.msk') # external mask leaves .msk file: external_path = os.path.join(folder, 'external.tif') some_raster_multiband.save(external_path, GDAL_TIFF_INTERNAL_MASK=False, factors=default_factors) assert os.path.exists(external_path + '.msk') # other than that, both rasters are identical: assert GeoRaster2.open(internal_path) == GeoRaster2.open(external_path) def test_read_respects_nodata(): expected_nodata = 100 path = '/vsimem/raster_for_test.tif' some_raster.save(path, nodata=expected_nodata) assert expected_nodata == GeoRaster2.open(path).nodata_value def test_save_preserves_nodata(): expected_nodata = 200 path = '/vsimem/raster_for_test.tif' raster_nodata = GeoRaster2(some_array, nodata=expected_nodata, affine=some_affine, crs=some_crs) raster_nodata.save(path) assert expected_nodata == GeoRaster2.open(path).nodata_value def test_tags(): with TemporaryDirectory() as folder: path = os.path.join(folder, 'test.tif') some_raster_multiband.save(path, tags={'foo': 'bar'}, factors=default_factors) assert GeoRaster2.tags(path) == {'AREA_OR_POINT': 'Area', 'foo': 'bar', 'telluric_band_names': '["r", "g", "b"]'} # namespace=default assert GeoRaster2.tags(path, 'IMAGE_STRUCTURE') == {'COMPRESSION': 'LZW', 'INTERLEAVE': 'PIXEL'} def test_deepcopy(): """ Tests .__copy__() and .__deepcopy__() """ a_raster = some_raster.deepcopy_with() deep_copy = deepcopy(a_raster) a_raster.image.setflags(write=1) a_raster.image.data[0, 0, 0] += 1 a_raster.image.setflags(write=0) assert deep_copy.image[0, 0, 0] == a_raster.image[0, 0, 0] - 1 def test_copy_with(): new_affine = Affine.translation(42, 42) assert some_raster.copy_with(affine=new_affine).affine == new_affine @pytest.mark.parametrize("raster", [some_raster, some_raster.save("/vsimem/test_resize.tif")]) def test_resize(raster): resampling_modes = [m.name for m in Resampling] for resampling_name in resampling_modes: resampling = Resampling[resampling_name] print('\nresampling name:', resampling_name) if resampling_name in ['min', 'max', 'med', 'q1', 'q3', 'sum']: print('\nskipping', resampling_name) continue if resampling_name not in ['bilinear', 'cubic', 'cubic_spline', 'lanczos', 'gauss']: resized_raster = raster.resize(2, resampling=resampling).resize(.5, resampling=resampling) assert (raster.image == resized_raster.image).all() assert raster == resized_raster # continue if resampling_name not in ['cubic_spline']: assert raster.resize(ratio=1, resampling=resampling) == some_raster assert raster.resize(ratio=2, resampling=resampling).width == 2 * some_raster.width assert raster.resize(ratio=2, resampling=resampling).shape == (1, 4, 6) assert raster.resize(dest_height=42, resampling=resampling).height == 42 assert raster.resize(dest_width=42, resampling=resampling).width == 42 assert raster.resize(dest_width=42, dest_height=42, resampling=resampling).width == 42 assert raster.resize(dest_width=42, dest_height=42, resampling=resampling).height == 42 assert raster.resize(dest_resolution=42, resampling=resampling).resolution() == 42 with pytest.raises(GeoRaster2Error): raster.resize(ratio=1, dest_width=2) with pytest.raises(GeoRaster2Error): raster.resize(ratio_x=2) def test_to_pillow_image(): # without mask: img = some_raster_multiband.to_pillow_image() assert img.height == some_raster_multiband.height assert img.width == some_raster_multiband.width assert len(img.getbands()) == some_raster_multiband.num_bands # with mask: img, mask = some_raster_multiband.to_pillow_image(return_mask=True) assert mask.height == some_raster_multiband.height assert mask.width == some_raster_multiband.width assert len(mask.getbands()) == 1 # with shrunk mask: img, mask = some_raster_shrunk_mask.to_pillow_image(return_mask=True) assert mask.height == some_raster_multiband.height assert mask.width == some_raster_multiband.width assert len(mask.getbands()) == 1 def test_num_pixels(): assert some_raster_multiband.num_pixels() == 6 assert some_raster_multiband.num_pixels_data() == 5 assert some_raster_multiband.num_pixels_nodata() == 1 def test_limit_to_bands(): with pytest.raises(GeoRaster2Error) as error: some_raster_multiband.limit_to_bands(['not-existing']) assert "requested bands {'not-existing'} that are not found in raster" in error.exconly() bands = ['g', 'b'] selected = some_raster_multiband.limit_to_bands(bands) assert selected.band_names == bands def test_limit_to_bands_off_memory(): r1 = GeoRaster2.open("tests/data/raster/rgb.tif", band_names=['r', 'g', 'b'], lazy_load=False) r2 = GeoRaster2.open("tests/data/raster/rgb.tif", band_names=['r', 'g', 'b']) assert r1.limit_to_bands(['b', 'r']) == r2.limit_to_bands(['b', 'r']) assert r1.limit_to_bands(['r', 'b']) != r2.limit_to_bands(['b', 'r']) assert r2._image is None def test_to_png_singleband(): pytest.importorskip("matplotlib") raster = some_raster png_bytes = raster.colorize("gray").to_png(transparent=True, thumbnail_size=512) img = Image.frombytes('RGBA', (raster.width, raster.height), png_bytes) assert img.size == raster.to_pillow_image().size def test_to_png_multiband(): raster = some_raster_multiband png_bytes = raster.to_png(transparent=True, thumbnail_size=512) img = Image.frombytes('RGBA', (raster.width, raster.height), png_bytes) assert img.size == raster.to_pillow_image().size def test_to_png_uint16(recwarn): raster = make_test_raster(257 * 42, band_names=[1, 2, 3], dtype=np.uint16) png_bytes = raster.to_png(transparent=True) w = recwarn.pop(GeoRaster2Warning) assert str(w.message) == "downscaling dtype to 'uint8' to convert to png" img = Image.frombytes('RGBA', (raster.width, raster.height), png_bytes) expected_image_size = raster.astype(np.uint8).to_pillow_image().size assert img.size == expected_image_size assert raster.astype(np.uint8) == GeoRaster2.from_bytes(png_bytes, affine=raster.affine, crs=raster.crs, band_names=raster.band_names) def test_to_png_int32(recwarn): raster = make_test_raster(257 * 42, band_names=[1, 2, 3], dtype=np.int32) png_bytes = raster.to_png(transparent=True) w = recwarn.pop(GeoRaster2Warning) assert str(w.message) == "downscaling dtype to 'uint8' to convert to png" img = Image.frombytes('RGBA', (raster.width, raster.height), png_bytes) expected_image_size = raster.astype(np.uint8).to_pillow_image().size assert img.size == expected_image_size assert raster.astype(np.uint8) == GeoRaster2.from_bytes(png_bytes, affine=raster.affine, crs=raster.crs, band_names=raster.band_names) def test_to_png_from_bytes(): arr = np.array([np.full((3, 5), 1), np.full((3, 5), 5), np.full((3, 5), 10)], dtype=np.uint8) raster = GeoRaster2(image=arr, affine=Affine.identity(), crs=WEB_MERCATOR_CRS, band_names=['r', 'g', 'b']) png_bytes = raster.to_png() assert raster == GeoRaster2.from_bytes(png_bytes, affine=raster.affine, crs=raster.crs, band_names=raster.band_names) def test_to_png_uses_the_first_band_for_a_two_bands_raster(recwarn): raster = make_test_raster(257 * 42, band_names=[1, 2], dtype=np.int32) png_bytes = raster.to_png(transparent=True, thumbnail_size=512) w = recwarn.pop(GeoRaster2Warning) assert str(w.message) == "Deprecation: to_png of less then three bands raster will be not be supported in next \ release, please use: .colorize('gray').to_png()" w = recwarn.pop(GeoRaster2Warning) assert str(w.message) == "Limiting two bands raster to use the first band to generate png" w = recwarn.pop(GeoRaster2Warning) assert str(w.message) == "downscaling dtype to 'uint8' to convert to png" img = Image.frombytes('RGBA', (raster.width, raster.height), png_bytes) expected_image_size = raster.limit_to_bands([1]).astype(np.uint8).to_pillow_image().size assert img.size == expected_image_size def test_to_png_uses_warns_on_single_bands_raster(recwarn): raster = make_test_raster(42, band_names=[1], dtype=np.uint8) png_bytes = raster.to_png(transparent=True, thumbnail_size=512) w = recwarn.pop(GeoRaster2Warning) assert str(w.message) == "Deprecation: to_png of less then three bands raster will be not be supported in next \ release, please use: .colorize('gray').to_png()" img = Image.frombytes('RGBA', (raster.width, raster.height), png_bytes) expected_image_size = raster.limit_to_bands([1]).astype(np.uint8).to_pillow_image().size assert img.size == expected_image_size def test_to_png_uses_the_first_three_bands_for_a_more_than_three_bands_raster(recwarn): raster = make_test_raster(257 * 42, band_names=[1, 2, 3,
import pendulum as pdl import sys sys.path.append(".") # the memoization-related library import loguru import itertools import portion import klepto.keymaps import CacheIntervals as ci from CacheIntervals.utils import flatten from CacheIntervals.utils import pdl2pd, pd2pdl from CacheIntervals.utils import Timer from CacheIntervals.Intervals import pd2po, po2pd from CacheIntervals.RecordInterval import RecordIntervals, RecordIntervalsPandas class QueryRecorder: ''' A helper class ''' pass class MemoizationWithIntervals(object): ''' The purpose of this class is to optimise the number of call to a function retrieving possibly disjoint intervals: - do standard caching for a given function - additively call for a date posterior to one already cached is supposed to yield a pandas Frame which can be obtained by concatenating the cached result and a -- hopefully much -- smaller query Maintains a list of intervals that have been called. With a new interval: - ''' keymapper = klepto.keymaps.stringmap(typed=False, flat=False) def __init__(self, pos_args=None, names_kwarg=None, classrecorder=RecordIntervalsPandas, aggregation=lambda listdfs: pd.concat(listdfs, axis=0), debug=False, # memoization=klepto.lru_cache( # cache=klepto.archives.hdf_archive( # f'{pdl.today().to_date_string()}_memoization.hdf5'), # keymap=keymapper), memoization=klepto.lru_cache( cache=klepto.archives.dict_archive(), keymap=keymapper), **kwargs): ''' :param pos_args: the indices of the positional arguments that will be handled as intervals :param names_kwarg: the name of the named parameters that will be handled as intervals :param classrecorder: the interval recorder type we want to use :param memoization: a memoization algorithm ''' # A dictionary of positional arguments indices # that are intervals self.argsi = {} self.kwargsi = {} # if pos_args is not None: # for posarg in pos_args: # self.argsi[posarg] = classrecorder(**kwargs) self.pos_args_itvl = pos_args if pos_args is not None else [] #print(self.args) # if names_kwarg is not None: # for namedarg in names_kwarg: # self.kwargsi[namedarg] = classrecorder(**kwargs) self.names_kwargs_itvl = names_kwarg if names_kwarg is not None else {} #print(self.kwargs) self.memoization = memoization self.aggregation = aggregation self.debugQ = debug self.argsdflt = None self.kwargsdflt = None self.time_last_call = pdl.today() self.classrecorder = classrecorder self.kwargsrecorder = kwargs self.argssolver = None self.query_recorder = QueryRecorder() def __call__(self, f): ''' The interval memoization leads to several calls to the standard memoised function and generates a list of return values. The aggregation is needed for the doubly lazy function to have the same signature as the To access, the underlying memoized function pass get_function_cachedQ=True to the kwargs of the overloaded call (not of this function :param f: the function to memoize :return: the wrapper to the memoized function ''' if self.argssolver is None: self.argssolver = ci.Functions.ArgsSolver(f, split_args_kwargsQ=True) @self.memoization def f_cached(*args, **kwargs): ''' The cached function is used for a double purpose: 1. for standard calls, will act as the memoised function in a traditional way 2. Additively when pass parameters of type QueryRecorder, it will create or retrieve the interval recorders associated with the values of non-interval parameters. In this context, we use the cached function as we would a dictionary. ''' QueryRecorderQ = False args_new = [] kwargs_new = {} ''' check whether this is a standard call to the user function or a request for the interval recorders ''' for i,arg in enumerate(args): if isinstance(arg, QueryRecorder): args_new.append(self.classrecorder(**self.kwargsrecorder)) QueryRecorderQ = True else: args_new.append(args[i]) for name in kwargs: if isinstance(kwargs[name], QueryRecorder): kwargs_new[name] = self.classrecorder(**self.kwargsrecorder) QueryRecorderQ = True else: kwargs_new[name] = kwargs[name] if QueryRecorderQ: return args_new, kwargs_new return f(*args, **kwargs) def wrapper(*args, **kwargs): if kwargs.get('get_function_cachedQ', False): return f_cached #loguru.logger.debug(f'function passed: {f_cached}') loguru.logger.debug(f'args passed: {args}') loguru.logger.debug(f'kwargs passed: {kwargs}') # First pass: resolve the recorders dargs_exp, kwargs_exp = self.argssolver(*args, **kwargs) # Intervals are identified by position and keyword name # 1. First get the interval recorders args_exp = list(dargs_exp.values()) args_exp_copy = args_exp.copy() kwargs_exp_copy = kwargs_exp.copy() for i in self.pos_args_itvl: args_exp_copy[i] = self.query_recorder for name in self.names_kwargs_itvl: kwargs_exp_copy[name] = self.query_recorder args_with_ri, kwargs_with_ri = f_cached(*args_exp_copy, **kwargs_exp_copy) # 2. Now get the the actual list of intervals for i in self.pos_args_itvl: # reuse args_exp_copy to store the list args_exp_copy[i] = args_with_ri[i](args_exp[i]) for name in self.names_kwargs_itvl: # reuse kwargs_exp_copy to store the list kwargs_exp_copy[name] = kwargs_with_ri[name](kwargs_exp[name]) '''3. Then generate all combination of parameters 3.a - args''' ns_args = range(len(args_exp)) lists_possible_args = [[args_exp[i]] if i not in self.pos_args_itvl else args_exp_copy[i] for i in ns_args] # Take the cartesian product of these calls_args = list( map(list,itertools.product(*lists_possible_args))) '''3.b kwargs''' #kwargs_exp_vals = kwargs_exp_copy.values() names_kwargs = list(kwargs_exp_copy.keys()) lists_possible_kwargs = [[kwargs_exp[name]] if name not in self.names_kwargs_itvl else kwargs_exp_copy[name] for name in names_kwargs] calls_kwargs = list(map(lambda l: dict(zip(names_kwargs,l)), itertools.product(*lists_possible_kwargs))) calls = list(itertools.product(calls_args, calls_kwargs)) if self.debugQ: results = [] for call in calls: with Timer() as timer: results.append(f_cached(*call[0], **call[1]) ) print('Timer to demonstrate caching:') timer.display(printQ=True) else: results = [f_cached(*call[0], **call[1]) for call in calls] result = self.aggregation(results) return result return wrapper if __name__ == "__main__": import logging import daiquiri import pandas as pd import time daiquiri.setup(logging.DEBUG) logging.getLogger('OneTick64').setLevel(logging.WARNING) logging.getLogger('databnpp.ODCB').setLevel(logging.WARNING) logging.getLogger('requests_kerberos').setLevel(logging.WARNING) pd.set_option('display.max_rows', 200) pd.set_option('display.width', 600) pd.set_option('display.max_columns', 200) tssixdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-5)) tsfivedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-4)) tsfourdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-3)) tsthreedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-2)) tstwodaysago = pdl2pd(pdl.yesterday('UTC').add(days=-1)) tsyesterday = pdl2pd(pdl.yesterday('UTC')) tstoday = pdl2pd(pdl.today('UTC')) tstomorrow = pdl2pd(pdl.tomorrow('UTC')) tsintwodays = pdl2pd(pdl.tomorrow('UTC').add(days=1)) tsinthreedays = pdl2pd(pdl.tomorrow('UTC').add(days=2)) def print_calls(calls): print( list( map( lambda i: (i.left, i.right), calls))) def print_calls_dates(calls): print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls))) def display_calls(calls): loguru.logger.info( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls))) # Testing record intervals -> ok if True: itvals = RecordIntervals() calls = itvals(portion.closed(pdl.yesterday(), pdl.today())) print(list(map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()), calls))) print(list(map(lambda i: type(i), calls))) calls = itvals( portion.closed(pdl.yesterday().add(days=-1), pdl.today().add(days=1))) #print(calls) print( list( map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()), calls))) # Testing record intervals pandas -> ok if True: itvals = RecordIntervalsPandas() # yesterday -> today calls = itvals(pd.Interval(pdl2pd(pdl.yesterday()), pdl2pd(pdl.today()), closed='left')) print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls))) # day before yesterday -> tomorrow: should yield 3 intervals calls = itvals(pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)), pdl2pd(pdl.today().add(days=1)))) print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls))) # day before yesterday -> day after tomorrow: should yield 4 intervals calls = itvals( pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)), pdl2pd(pdl.tomorrow().add(days=1)))) print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls))) # 2 days before yesterday -> 2day after tomorrow: should yield 6 intervals calls = itvals( pd.Interval(pdl2pd(pdl.yesterday().add(days=-2)), pdl2pd(pdl.tomorrow().add(days=2)))) print(list(map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls))) # Further tests on record intervals pandas if False: itvals = RecordIntervalsPandas() calls = itvals(pd.Interval(tstwodaysago, tstomorrow, closed='left')) display_calls(calls) calls = itvals( pd.Interval(tstwodaysago, tsyesterday)) display_calls(calls) calls = itvals( pd.Interval(tstwodaysago, tsintwodays)) display_calls(calls) calls = itvals( pd.Interval(pdl2pd(pdl.yesterday().add(days=-2)), pdl2pd(pdl.tomorrow().add(days=2)))) display_calls(calls) # proof-of_concept of decorator to modify function parameters if False: class dector_arg: # a toy model def __init__(self, pos_arg=None, f_arg=None, name_kwarg=None, f_kwarg=None): ''' :param pos_arg: the positional argument :param f_arg: the function to apply to the positional argument :param name_kwarg: the keyword argument :param f_kwarg: the function to apply to the keyword argument ''' self.args = {} self.kwargs = {} if pos_arg: self.args[pos_arg] = f_arg print(self.args) if name_kwarg: self.kwargs[name_kwarg] = f_kwarg print(self.kwargs) def __call__(self, f): ''' the decorator action :param f: the function to decorate :return: a function whose arguments have the function f_args and f_kwargs pre-applied. ''' self.f = f def inner_func(*args, **kwargs): print(f'function passed: {self.f}') print(f'args passed: {args}') print(f'kwargs passed: {kwargs}') largs = list(args) for i, f in self.args.items(): print(i) print(args[i]) largs[i] = f(args[i]) for name, f in self.kwargs.items(): kwargs[name] = f(kwargs[name]) return self.f(*largs, **kwargs) return inner_func dec = dector_arg(pos_arg=0, f_arg=lambda x: x + 1, name_kwarg='z', f_kwarg=lambda x: x + 1) @dector_arg(1, lambda x: x + 1, 'z', lambda x: x + 1) def g(x, y, z=3): ''' The decorated function should add one to the second positional argument and :param x: :param y: :param z: :return: ''' print(f'x->{x}') print(f'y->{y}') print(f'z->{z}') g(1, 10, z=100) if False: memo = MemoizationWithIntervals() # testing MemoizationWithIntervals # typical mechanism if False: @MemoizationWithIntervals( None, ['interval'], aggregation=list, debug=True, memoization=klepto.lru_cache( maxsize=200, cache=klepto.archives.hdf_archive( f'{pdl.today().to_date_string()}_memoisation.hdf5'), keymap=klepto.keymaps.stringmap(typed=False, flat=False))) def function_with_interval_param(dummy1,dummy2, kdummy=1, interval=pd.Interval(tstwodaysago, tstomorrow)): time.sleep(1) print('****') print(f'dummy1: {dummy1}, dummy2: {dummy2}') print(f'kdummy: {kdummy}') print(f'interval: {interval}') return [dummy1, dummy2, kdummy, interval] print('=*=*=*=* MECHANISM DEMONSTRATION =*=*=*=*') print('==== First pass ===') print("initialisation with an interval from yesterday to today") # function_with_interval_params(pd.Interval(pdl.yesterday(), pdl.today(),closed='left'), # interval1 = pd.Interval(pdl.yesterday().add(days=0), # pdl.today(), closed='both') # ) print( f'Final result:\n{function_with_interval_param(0, 1, interval=pd.Interval(tsyesterday, tstoday))}') print('==== Second pass ===') print("request for data from the day before yesterday to today") print("expected split in two intervals with results from yesterday to today being cached") print( f'Final result:
y) assert len(mock_estimator_fit.call_args[0][0]) == len( mock_estimator_fit.call_args[0][1] ) assert len(mock_estimator_fit.call_args[0][0]) == int(1.25 * 90) def test_component_graph_equality(example_graph): different_graph = { "Target Imputer": [TargetImputer, "X", "y"], "OneHot": [OneHotEncoder, "Target Imputer.x", "Target Imputer.y"], "Random Forest": [RandomForestClassifier, "OneHot.x", "Target Imputer.y"], "Elastic Net": [ElasticNetClassifier, "OneHot.x", "Target Imputer.y"], "Logistic Regression Classifier": [ LogisticRegressionClassifier, "Random Forest.x", "Elastic Net.x", "Target Imputer.y", ], } same_graph_different_order = { "Imputer": [Imputer, "X", "y"], "OneHot_ElasticNet": [OneHotEncoder, "Imputer.x", "y"], "OneHot_RandomForest": [OneHotEncoder, "Imputer.x", "y"], "Random Forest": [RandomForestClassifier, "OneHot_RandomForest.x", "y"], "Elastic Net": [ElasticNetClassifier, "OneHot_ElasticNet.x", "y"], "Logistic Regression Classifier": [ LogisticRegressionClassifier, "Random Forest.x", "Elastic Net.x", "y", ], } component_graph = ComponentGraph(example_graph, random_seed=0) component_graph_eq = ComponentGraph(example_graph, random_seed=0) component_graph_different_seed = ComponentGraph(example_graph, random_seed=5) component_graph_not_eq = ComponentGraph(different_graph, random_seed=0) component_graph_different_order = ComponentGraph( same_graph_different_order, random_seed=0 ) component_graph.instantiate() component_graph_eq.instantiate() component_graph_different_seed.instantiate() component_graph_not_eq.instantiate() component_graph_different_order.instantiate() assert component_graph == component_graph assert component_graph == component_graph_eq assert component_graph != "not a component graph" assert component_graph != component_graph_different_seed assert component_graph != component_graph_not_eq assert component_graph != component_graph_different_order def test_component_graph_equality_same_graph(): # Same component nodes and edges, just specified in a different order in the input dictionary component_graph = ComponentGraph( { "Component B": [OneHotEncoder, "X", "y"], "Component A": [DateTimeFeaturizer, "Component B.x", "y"], "Random Forest": [ RandomForestClassifier, "Component A.x", "Component B.x", "y", ], } ) equal_component_graph = ComponentGraph( { "Component B": [OneHotEncoder, "X", "y"], "Component A": [DateTimeFeaturizer, "Component B.x", "y"], "Random Forest": [ RandomForestClassifier, "Component B.x", "Component A.x", "y", ], } ) component_graph == equal_component_graph @pytest.mark.parametrize("return_dict", [True, False]) def test_describe_component_graph(return_dict, example_graph, caplog): component_graph = ComponentGraph(example_graph, random_seed=0) component_graph.instantiate() expected_component_graph_dict = { "Imputer": { "name": "Imputer", "parameters": { "categorical_impute_strategy": "most_frequent", "numeric_impute_strategy": "mean", "categorical_fill_value": None, "numeric_fill_value": None, }, }, "One Hot Encoder": { "name": "One Hot Encoder", "parameters": { "top_n": 10, "features_to_encode": None, "categories": None, "drop": "if_binary", "handle_unknown": "ignore", "handle_missing": "error", }, }, "Random Forest Classifier": { "name": "Random Forest Classifier", "parameters": {"n_estimators": 100, "max_depth": 6, "n_jobs": -1}, }, "Elastic Net Classifier": { "name": "Elastic Net Classifier", "parameters": { "C": 1, "l1_ratio": 0.15, "n_jobs": -1, "solver": "saga", "penalty": "elasticnet", "multi_class": "auto", }, }, "Logistic Regression Classifier": { "name": "Logistic Regression Classifier", "parameters": { "penalty": "l2", "C": 1.0, "n_jobs": -1, "multi_class": "auto", "solver": "lbfgs", }, }, } component_graph_dict = component_graph.describe(return_dict=return_dict) if return_dict: assert component_graph_dict == expected_component_graph_dict else: assert component_graph_dict is None out = caplog.text for component in component_graph.component_instances.values(): if component.hyperparameter_ranges: for parameter in component.hyperparameter_ranges: assert parameter in out assert component.name in out class LogTransform(Transformer): name = "Log Transform" modifies_features = False modifies_target = True def __init__(self, parameters=None, random_seed=0): super().__init__(parameters={}, component_obj=None, random_seed=random_seed) def fit(self, X, y): return self def transform(self, X, y=None): if y is None: return X, y y = infer_feature_types(y) return X, infer_feature_types(np.log(y)) def inverse_transform(self, y): y = infer_feature_types(y) return infer_feature_types(np.exp(y)) class DoubleTransform(Transformer): name = "Double Transform" modifies_features = False modifies_target = True def __init__(self, parameters=None, random_seed=0): super().__init__(parameters={}, component_obj=None, random_seed=random_seed) def fit(self, X, y): return self def transform(self, X, y=None): if y is None: return X, y y = infer_feature_types(y) return X, infer_feature_types(y * 2) def inverse_transform(self, y): y = infer_feature_types(y) return infer_feature_types(y / 2) class SubsetData(Transformer): """To simulate a transformer that modifies the target but is not a target transformer, e.g. a sampler.""" name = "Subset Data" modifies_target = True def __init__(self, parameters=None, random_seed=0): super().__init__(parameters={}, component_obj=None, random_seed=random_seed) def fit(self, X, y=None): return self def transform(self, X, y=None): X_new = X.iloc[:50] y_new = None if y is not None: y_new = y.iloc[:50] return X_new, y_new @pytest.mark.parametrize( "component_graph,answer_func", [ ( ComponentGraph( { "Imputer": [Imputer, "X", "y"], "Log": [LogTransform, "X", "y"], "Random Forest": ["Random Forest Regressor", "Imputer.x", "Log.y"], } ), lambda y: infer_feature_types(np.exp(y)), ), ( ComponentGraph( { "Imputer": [Imputer, "X", "y"], "Log": [LogTransform, "X", "y"], "Double": [DoubleTransform, "X", "Log.y"], "Random Forest": [ "Random Forest Regressor", "Imputer.x", "Double.y", ], } ), lambda y: infer_feature_types(np.exp(y / 2)), ), ( ComponentGraph( { "Imputer": [Imputer, "X", "y"], "Log": [LogTransform, "Imputer.x", "y"], "Double": [DoubleTransform, "Log.x", "Log.y"], "Random Forest": [ "Random Forest Regressor", "Double.x", "Double.y", ], } ), lambda y: infer_feature_types(np.exp(y / 2)), ), ( ComponentGraph( { "Imputer": [Imputer, "X", "y"], "OneHot": [OneHotEncoder, "Imputer.x", "y"], "DateTime": [DateTimeFeaturizer, "OneHot.x", "y"], "Log": [LogTransform, "X", "y"], "Double": [DoubleTransform, "DateTime.x", "Log.y"], "Random Forest": [ "Random Forest Regressor", "DateTime.x", "Double.y", ], } ), lambda y: infer_feature_types(np.exp(y / 2)), ), ( ComponentGraph( { "Imputer": [Imputer, "X", "y"], "OneHot": [OneHotEncoder, "Imputer.x", "y"], "DateTime": [DateTimeFeaturizer, "OneHot.x", "y"], "Log": [LogTransform, "X", "y"], "Double": [DoubleTransform, "X", "Log.y"], "Double2": [DoubleTransform, "X", "Double.y"], "Random Forest": [ "Random Forest Regressor", "DateTime.x", "Double2.y", ], } ), lambda y: infer_feature_types(np.exp(y / 4)), ), ( ComponentGraph( { "Imputer": ["Imputer", "X", "y"], "Double": [DoubleTransform, "X", "y"], "DateTime 1": [ "DateTime Featurizer", "Imputer.x", "y", ], "ET": ["Extra Trees Regressor", "DateTime 1.x", "Double.y"], "Double 2": [DoubleTransform, "X", "y"], "DateTime 2": [ "DateTime Featurizer", "Imputer.x", "y", ], "Double 3": [DoubleTransform, "X", "Double 2.y"], "RandomForest": [ "Random Forest Regressor", "DateTime 2.x", "Double 3.y", ], "DateTime 3": [ "DateTime Featurizer", "Imputer.x", "y", ], "Double 4": [DoubleTransform, "X", "y"], "Catboost": [ "Random Forest Regressor", "DateTime 3.x", "Double 4.y", ], "Logistic Regression Classifier": [ "Linear Regressor", "Catboost.x", "RandomForest.x", "ET.x", "Double 3.y", ], } ), lambda y: infer_feature_types(y / 4), ), ( ComponentGraph( { "Imputer": [Imputer, "X", "y"], "OneHot": [OneHotEncoder, "Imputer.x", "y"], "DateTime": [DateTimeFeaturizer, "OneHot.x", "y"], "Log": [LogTransform, "X", "y"], "Double": [DoubleTransform, "X", "Log.y"], "Double2": [DoubleTransform, "X", "Double.y"], "Subset": [SubsetData, "DateTime.x", "Double2.y"], "Random Forest": [ "Random Forest Regressor", "Subset.x", "Subset.y", ], } ), lambda y: infer_feature_types(np.exp(y / 4)), ), ( ComponentGraph( { "Imputer": [Imputer, "X", "y"], "Random Forest": ["Random Forest Regressor", "Imputer.x", "y"], } ), lambda y: y, ), ( ComponentGraph( { "Imputer": [Imputer, "X", "y"], "DateTime": [DateTimeFeaturizer, "Imputer.x", "y"], "OneHot": [OneHotEncoder, "DateTime.x", "y"], "Random Forest": ["Random Forest Regressor", "OneHot.x", "y"], } ), lambda y: y, ), ( ComponentGraph({"Random Forest": ["Random Forest Regressor", "X", "y"]}), lambda y: y, ), ( ComponentGraph( { "Imputer": ["Imputer", "X", "y"], "Double": [DoubleTransform, "X", "y"], "DateTime 1": [ "DateTime Featurizer", "Imputer.x", "y", ], "ET": ["Extra Trees Regressor", "DateTime 1.x", "Double.y"], "Double 2": [DoubleTransform, "X", "y"], "DateTime 2": [ "DateTime Featurizer", "Imputer.x", "y", ], "Double 3": [DoubleTransform, "X", "Double 2.y"], "RandomForest": [ "Random Forest Regressor", "DateTime 2.x", "Double 3.y", ], "DateTime 3": [ "DateTime Featurizer", "Imputer.x", "y", ], "Double 4": [DoubleTransform, "X", "y"], "Linear": ["Linear Regressor", "DateTime 3.x", "Double 4.y"], "Logistic Regression Classifier": [ "Linear Regressor", "Linear.x", "RandomForest.x", "ET.x", "y", ], } ), lambda y: y, ), ], ) def test_component_graph_inverse_transform( component_graph, answer_func, X_y_regression ): X, y = X_y_regression y = pd.Series(np.abs(y)) X = pd.DataFrame(X) component_graph.instantiate() component_graph.fit(X, y) predictions = component_graph.predict(X) answer = component_graph.inverse_transform(predictions) expected = answer_func(predictions) pd.testing.assert_series_equal(answer, expected) def test_final_component_features_does_not_have_target(): X = pd.DataFrame( { "column_1": ["a", "b", "c", "d", "a", "a", "b", "c", "b"], "column_2": [1, 2, 3, 4, 5, 6, 5, 4, 3], } ) y = pd.Series([1, 0, 1, 0, 1, 1, 0, 0, 0]) X.ww.init(logical_types={"column_1": "categorical"}) cg = ComponentGraph( { "Imputer": ["Imputer", "X", "y"], "OneHot": ["One Hot Encoder", "Imputer.x", "y"], "TargetImputer": ["Target Imputer", "OneHot.x", "y"], "Logistic Regression Classifier": [ "Logistic Regression Classifier", "TargetImputer.x", "TargetImputer.y", ], } ) cg.instantiate() cg.fit(X, y) final_features = cg.transform_all_but_final(X, y) assert "TargetImputer.y" not in final_features.columns @patch("rayml.pipelines.components.Imputer.fit_transform") def test_component_graph_with_X_y_inputs_X(mock_fit): class DummyColumnNameTransformer(Transformer): name = "Dummy Column Name Transform" def __init__(self, parameters=None, random_seed=0): super().__init__(parameters={}, component_obj=None, random_seed=random_seed) def fit(self, X, y): return self def transform(self, X, y=None): return X.rename(columns=lambda x: x + "_new", inplace=False) X = pd.DataFrame( { "column_1": [0, 2, 3, 1, 5, 6, 5, 4, 3], "column_2": [1, 2, 3, 4, 5, 6, 5, 4, 3], } ) y = pd.Series([1, 0, 1, 0, 1, 1, 0, 0, 0]) graph = { "DummyColumnNameTransformer": [DummyColumnNameTransformer, "X", "y"], "Imputer": ["Imputer", "DummyColumnNameTransformer.x", "X", "y"], "Random Forest": ["Random Forest Classifier", "Imputer.x", "y"], } component_graph = ComponentGraph(graph) component_graph.instantiate() mock_fit.return_value = X assert component_graph.get_inputs("DummyColumnNameTransformer") == ["X", "y"] assert component_graph.get_inputs("Imputer") == [ "DummyColumnNameTransformer.x", "X", "y", ] component_graph.fit(X, y) # Check that we have columns from both the output of DummyColumnNameTransformer as well as the original columns since "X" was specified assert list(mock_fit.call_args[0][0].columns) == [ "column_1_new", "column_2_new", "column_1", "column_2", ] @patch("rayml.pipelines.components.Imputer.fit_transform") @patch("rayml.pipelines.components.Estimator.fit") def test_component_graph_with_X_y_inputs_y(mock_fit, mock_fit_transform): X = pd.DataFrame( { "column_1": [0, 2, 3, 1, 5, 6, 5, 4, 3], "column_2": [1, 2, 3, 4, 5, 6,
t.start() else: # value 2+: the 2nd thread in queue will include changes for this sync_storage_databases request logger.info("%s::%s: PASS THREAD, sync_queue full: nb=%s" % (__class__.__name__, __name__, len(sync_queue))) @staticmethod def thread_sync_storage_databases(): """ One sync at a time is possible. Only 2 threads are in sync_queue: #0 in current sync operation, #1 that will wait for its turn to sync :return: """ try: # be in queue while len(sync_queue) == 2 and sync_in_progress[0] == 1: sleep(1) logger.info("%s::%s: WAIT THREAD, current sync in progress, thread is waiting in queue" % (__class__.__name__, __name__)) except Exception as e: logger.error("%s::%s: ERROR THREAD, error raised by the thread in queue. Error: %s" % (__class__.__name__, __name__, e)) sync_queue.pop(0) return # Start sync logger.info("%s::%s: START THREAD, thread chose to start" % (__class__.__name__, __name__)) sync_in_progress[0] = 1 try: # sync logger.info("%s::%s: SYNC THREAD, thread start to sync all databases" % (__class__.__name__, __name__)) for storage_engine_type in storage_engine_list.values(): for storage_engine in storage_engine_type: storage_engine.sync() except Exception as e: logger.error("%s::%s: ERROR THREAD, error raised by the thread during sync. Error: %s" % (__class__.__name__, __name__, e)) sync_queue.pop(0) sync_in_progress[0] = 0 else: # Ending normaly logger.info("%s::%s: STOP THREAD, thread ended to sync all databases" % (__class__.__name__, __name__)) # End sync sync_queue.pop(0) sync_in_progress[0] = 0 @staticmethod def sync_f5_storage_databases(): logger.info("%s::%s: synchronize F5 databases" % (__class__.__name__, __name__)) Generic.sync_storage_databases() # TODO change to not sync to PAN """ for storage_engine_type in storage_engine_list.values(): for storage_engine in storage_engine_type: storage_engine.sync() """ class ApiNuagePolicyGroupTemplateCreate(Resource): @staticmethod def put(): args = parser_generic.parse_args() pgt_vsd_id = args['ID'] pgt_name = args['name'] ent_vsd_id = args['sourceEnterpriseID'] dt_vsd_id = args['parentID'] # Sanity check on enterprise if not Generic.sanity_check_enterprise(ent_vsd_id): return "no database update needed", 200 # load database db_pgt = nuage_db.get_policy_group_template(vsd_id=pgt_vsd_id) if db_pgt is None: # unknown policy group template db_dt = nuage_db.get_domain_template(vsd_id=dt_vsd_id) if db_dt is None: # unknown domain template Generic.reset_nuage_storage_database(dt_vsd_id) return "database updated", 201 else: # Domain in db # new PolicyGroupTemplate db_pgt = storage_engine_nuage.NuagePolicyGroupTemplate(vsd_id=pgt_vsd_id, logger=logger) db_pgt.name = pgt_name db_dt.create_child(db_pgt) return "nuage database updated", 201 else: # policy group template already exist Generic.log_nuage_storage_engine_already_synchronized(name=pgt_name, vsd_id=pgt_vsd_id) return "database already synchronized", 200 class ApiNuagePolicyGroupTemplateUpdate(Resource): @staticmethod def put(): args = parser_generic.parse_args() pgt_vsd_id = args['ID'] pgt_name = args['name'] dt_vsd_id = args['parentID'] ent_vsd_id = args['sourceEnterpriseID'] # Sanity check on enterprise if not Generic.sanity_check_enterprise(ent_vsd_id): return "no database update needed", 200 # load domain in database db_pgt = nuage_db.get_policy_group_template(vsd_id=pgt_vsd_id) if db_pgt is None: # unknown policy group template db_dt = nuage_db.get_domain_template(vsd_id=dt_vsd_id) if db_dt is None: # unknown domain template Generic.reset_nuage_storage_database(dt_vsd_id) return "database updated", 201 else: # domain Template in db logger.info("%s: Unexpected state for policy group template '%s %s', fetch domain template '%s'" % (__class__.__name__, pgt_vsd_id, pgt_name, dt_vsd_id)) # update db from current config db_dt.fetch() # load policy_group from Nuage storage database db_pgt = storage_engine_nuage.NuagePolicyGroupTemplate(vsd_id=pgt_vsd_id, logger=logger) if db_pgt is None: Generic.log_object_not_found_in_nuage(pgt_name, pgt_vsd_id) return "no database update needed", 200 else: return "database updated", 201 else: # check for name update if db_pgt.name != pgt_name: # Update Nuage storage database logger.info("%s: update name: pg_id=%s; old_pg_name=%s; new_pg_name=%s" % (__class__.__name__, pgt_vsd_id, db_pgt.name, pgt_name)) db_pgt.name = pgt_name return "database updated", 201 else: return "no database update needed", 200 class ApiNuagePolicyGroupTemplateDelete(Resource): @staticmethod def put(): args = parser_generic.parse_args() pgt_vsd_id = args['ID'] pgt_name = args['name'] ent_vsd_id = args['sourceEnterpriseID'] # Sanity check on enterprise if not Generic.sanity_check_enterprise(ent_vsd_id): return "no database update needed", 200 # load policy group template in database db_pgt = nuage_db.get_policy_group_template(vsd_id=pgt_vsd_id) if db_pgt is None: # unknown policy group template Generic.log_nuage_storage_engine_already_synchronized(name=pgt_name, vsd_id=pgt_vsd_id) return "database already synchronized", 201 else: # existing policy group template db_pgt.delete() logger.info("%s::%s: database updated: name=%s; id=%s" % (__class__.__name__, __name__, pgt_name, pgt_vsd_id)) return "database updated", 201 class ApiNuagePolicyGroupCreate(Resource): @staticmethod def put(): # get parameter in payload args = parser_policygroup.parse_args() name = str(args['name']) policy_group_id = str(args['policyGroupID']) pg_vsd_id = args['ID'] domain_vsd_id = args['parentID'] pgt_vsd_id = args['templateID'] ent_vsd_id = args['sourceEnterpriseID'] # Sanity check on enterprise if not Generic.sanity_check_enterprise(ent_vsd_id): return "no database update needed", 200 # load policy_group from Nuage storage database db_pg = nuage_db.get_policy_group(vsd_id=pg_vsd_id) if db_pg is None: # unknown policy group db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id) if db_domain is None: # unknown domain if not Generic.sanity_check_domain(domain_vsd_id): return "no database update needed", 200 else: Generic.reset_nuage_storage_database(domain_vsd_id) Generic.sync_f5_storage_databases() return "database updated", 201 else: # create policy group and fetch logger.info("%s::%s: create and fetch policy group: pg_id=%s; pg_name=%s; domain_id=%s" % (__class__.__name__, __name__, policy_group_id, name, domain_vsd_id)) cur_pg = storage_engine_nuage.NuagePolicyGroup(vsd_id=pg_vsd_id, logger=logger ) cur_pg.name = name db_domain.create_child(cur_pg) # Associate policy_group_template if pgt_vsd_id != "null": for domain_template in nuage_db.domain_templates: if pgt_vsd_id in domain_template.children['policy_group_template'].keys() and \ pgt_vsd_id not in cur_pg.associated_objects['policy_group_template'].keys(): # known policy_group_template # Create a relation with policy_group_template cur_pg.assign(domain_template.children['policy_group_template'][pgt_vsd_id]) else: # Policy Group Template not found # Fetch domain_template nuage_db.fetch() # Sync Generic.sync_f5_storage_databases() return "database updated", 201 else: Generic.log_nuage_storage_engine_already_synchronized(name, pg_vsd_id) return "database already synchronized", 200 class ApiNuagePolicyGroupUpdate(Resource): @staticmethod def put(): # get parameter in payload args = parser_policygroup.parse_args() name = str(args['name']) vsd_id = args['ID'] domain_vsd_id = args['parentID'] ent_vsd_id = args['sourceEnterpriseID'] # Sanity check on enterprise if not Generic.sanity_check_enterprise(ent_vsd_id): return "no database update needed", 200 # load policy_group from Nuage storage database pg_db = nuage_db.get_policy_group(vsd_id=vsd_id) if pg_db is None: # unknown pg domain_db = nuage_db.get_domain(vsd_id=domain_vsd_id) if domain_db is None: # unknown domain if not Generic.sanity_check_domain(vsd_id): return "no database update needed", 200 else: # fetch database nuage_db.flush() nuage_db.fetch() # load policy_group from Nuage storage database pg_db = nuage_db.get_policy_group(vsd_id=vsd_id) if pg_db is None: Generic.log_object_not_found_in_nuage(name, vsd_id) return "no database update needed", 200 else: # pg in db # update db from current config pg_db.fetch() # Sync Generic.sync_storage_databases() return "database updated", 201 # check for name update if pg_db.name != name: # Update Nuage storage database logger.info("%s: update name: pg_id=%s; old_pg_name=%s; new_pg_name=%s" % (__class__.__name__, vsd_id, pg_db.name, name)) pg_db.name = name Generic.sync_storage_databases() return "database updated", 201 else: # check for associated ip_address update # compare ip_address list in current config and database # load old ip_address list from database old_ip_address_list = set(pg_db.get_ip_address_list()) # clear associated vPorts for vport in list(pg_db.vports): pg_db.detach(vport) # fetch from current configuration logger.info("%s: fetch policy group: pg_id=%s; pg_name=%s" % (__class__.__name__, vsd_id, name)) pg_db.fetch() # load current ip_address list from database cur_ip_address_list = set(pg_db.get_ip_address_list()) # compare new and current ip_address list if cur_ip_address_list == old_ip_address_list: Generic.log_nuage_storage_engine_already_synchronized(name, vsd_id) return "database already synchronized", 200 else: # log new ip address ip_address_list_to_attach = list(cur_ip_address_list - old_ip_address_list) if len(ip_address_list_to_attach) > 0: logger.info("%s: pg_id=%s ; pg_name=%s ; added ip_address=%s" % (__class__.__name__, vsd_id, name, ip_address_list_to_attach)) # log deleted ip address ip_address_list_to_detach = list(old_ip_address_list - cur_ip_address_list) if len(ip_address_list_to_detach) > 0: logger.info("%s: pg_id=%s ; pg_name=%s ; deleted ip_address=%s" % (__class__.__name__, vsd_id, name, ip_address_list_to_detach)) # Sync Generic.sync_storage_databases() return "database updated", 201 class ApiNuagePolicyGroupUpdateDirectAttach(Resource): @staticmethod def put(): """ Used for unit tests only Same as ApiNuagePolicyGroupUpdate() but the associated vPort is already in the 'vport_vsd_id' parameter :return: """ # ToDo error unknown policy group # get parameter in payload args = parser_policygroup_direct_attach.parse_args() name = str(args['name']) vsd_id = args['ID'] domain_vsd_id = args['parentID'] ent_vsd_id = args['sourceEnterpriseID'] vport_vsd_id = args['vportID'] # Sanity check on enterprise if not Generic.sanity_check_enterprise(ent_vsd_id): return "no database update needed", 200 # load policy_group from Nuage storage database pg_db = nuage_db.get_policy_group(vsd_id=vsd_id) if pg_db is None: # unknown pg domain_db = nuage_db.get_domain(vsd_id=domain_vsd_id) if domain_db is None: # unknown domain return "error, unknown policy group and unknown domain", 404 else: return "error, unknown policy group", 404 else: # pg in db if vport_vsd_id in pg_db.associated_objects['vport'].keys(): # already attached vport pass elif vport_vsd_id in pg_db.parent.children['vport'].keys(): # existing vport in db and attached to the domain vport_db = pg_db.parent.children['vport'][vport_vsd_id] # attach vPort to policy group pg_db.assign(vport_db) else: # unknown vport in db return "error, unknown vport", 404 # Sync Generic.sync_storage_databases() return "database updated", 201 class ApiNuagePolicyGroupDelete(Resource): @staticmethod def put(): # get parameter in payload args = parser_policygroup.parse_args() name = str(args['name']) vsd_id = args['ID'] # load policy_group from Nuage storage database db_pg = nuage_db.get_policy_group(vsd_id=vsd_id) if db_pg is None: # Database and current Nuage configuration already synchronized Generic.log_nuage_storage_engine_already_synchronized(name='unknown', vsd_id=vsd_id) return "database already synchronized", 200 else: # existing policy group # delete policy group logger.info("%s: delete policy group: pg_id=%s; pg_name=%s" % (__class__.__name__, vsd_id, name)) db_pg.delete() # Sync Generic.sync_f5_storage_databases() return "database updated", 201 class ApiNuageVminterfaceCreate(Resource):
""" Unit tests for the QVM simulator device. """ import logging import re import networkx as nx import pytest import re import pennylane as qml from pennylane import numpy as np from pennylane.operation import Tensor from pennylane.circuit_graph import CircuitGraph from pennylane.wires import Wires from pyquil.quil import Pragma, Program from pyquil.api._quantum_computer import QuantumComputer from conftest import BaseTest from conftest import I, Z, H, U, U2, test_operation_map, QVM_SHOTS import pennylane_forest as plf import pyquil from flaky import flaky log = logging.getLogger(__name__) TEST_QPU_LATTICES = ["4q-qvm"] compiled_program = ( "DECLARE ro BIT[2]\n" 'PRAGMA INITIAL_REWIRING "PARTIAL"\n' "CZ 1 0\n" "RZ(0.432) 1\n" "MEASURE 1 ro[0]\n" "MEASURE 0 ro[1]\n" "HALT\n" ) class TestQVMBasic(BaseTest): """Unit tests for the QVM simulator.""" # pylint: disable=protected-access def test_identity_expectation(self, shots, qvm, compiler): """Test that identity expectation value (i.e. the trace) is 1""" theta = 0.432 phi = 0.123 dev = plf.QVMDevice(device="2q-qvm", shots=shots) with qml.tape.QuantumTape() as tape: qml.RX(theta, wires=[0]) qml.RX(phi, wires=[1]) qml.CNOT(wires=[0, 1]) O1 = qml.expval(qml.Identity(wires=[0])) O2 = qml.expval(qml.Identity(wires=[1])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() res = np.array([dev.expval(O1.obs), dev.expval(O2.obs)]) # below are the analytic expectation values for this circuit (trace should always be 1) self.assertAllAlmostEqual(res, np.array([1, 1]), delta=3 / np.sqrt(shots)) def test_pauliz_expectation(self, shots, qvm, compiler): """Test that PauliZ expectation value is correct""" theta = 0.432 phi = 0.123 dev = plf.QVMDevice(device="2q-qvm", shots=shots) with qml.tape.QuantumTape() as tape: qml.RX(theta, wires=[0]) qml.RX(phi, wires=[1]) qml.CNOT(wires=[0, 1]) O1 = qml.expval(qml.PauliZ(wires=[0])) O2 = qml.expval(qml.PauliZ(wires=[1])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() res = np.array([dev.expval(O1.obs), dev.expval(O2.obs)]) # below are the analytic expectation values for this circuit self.assertAllAlmostEqual( res, np.array([np.cos(theta), np.cos(theta) * np.cos(phi)]), delta=3 / np.sqrt(shots) ) def test_paulix_expectation(self, shots, qvm, compiler): """Test that PauliX expectation value is correct""" theta = 0.432 phi = 0.123 dev = plf.QVMDevice(device="2q-qvm", shots=shots) with qml.tape.QuantumTape() as tape: qml.RY(theta, wires=[0]) qml.RY(phi, wires=[1]) qml.CNOT(wires=[0, 1]) O1 = qml.expval(qml.PauliX(wires=[0])) O2 = qml.expval(qml.PauliX(wires=[1])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() res = np.array([dev.expval(O1.obs), dev.expval(O2.obs)]) # below are the analytic expectation values for this circuit self.assertAllAlmostEqual( res, np.array([np.sin(theta) * np.sin(phi), np.sin(phi)]), delta=3 / np.sqrt(shots) ) def test_pauliy_expectation(self, shots, qvm, compiler): """Test that PauliY expectation value is correct""" theta = 0.432 phi = 0.123 dev = plf.QVMDevice(device="2q-qvm", shots=shots) with qml.tape.QuantumTape() as tape: qml.RX(theta, wires=[0]) qml.RX(phi, wires=[1]) qml.CNOT(wires=[0, 1]) O1 = qml.expval(qml.PauliY(wires=[0])) O2 = qml.expval(qml.PauliY(wires=[1])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() res = np.array([dev.expval(O1.obs), dev.expval(O2.obs)]) # below are the analytic expectation values for this circuit self.assertAllAlmostEqual( res, np.array([0, -np.cos(theta) * np.sin(phi)]), delta=3 / np.sqrt(shots) ) def test_hadamard_expectation(self, shots, qvm, compiler): """Test that Hadamard expectation value is correct""" theta = 0.432 phi = 0.123 dev = plf.QVMDevice(device="2q-qvm", shots=shots) with qml.tape.QuantumTape() as tape: qml.RY(theta, wires=[0]) qml.RY(phi, wires=[1]) qml.CNOT(wires=[0, 1]) O1 = qml.expval(qml.Hadamard(wires=[0])) O2 = qml.expval(qml.Hadamard(wires=[1])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() res = np.array([dev.expval(O1.obs), dev.expval(O2.obs)]) # below are the analytic expectation values for this circuit expected = np.array( [np.sin(theta) * np.sin(phi) + np.cos(theta), np.cos(theta) * np.cos(phi) + np.sin(phi)] ) / np.sqrt(2) self.assertAllAlmostEqual(res, expected, delta=3 / np.sqrt(shots)) @flaky(max_runs=10, min_passes=3) def test_hermitian_expectation(self, shots, qvm, compiler): """Test that arbitrary Hermitian expectation values are correct. As the results coming from the qvm are stochastic, a constraint of 3 out of 5 runs was added. """ theta = 0.432 phi = 0.123 dev = plf.QVMDevice(device="2q-qvm", shots=shots) with qml.tape.QuantumTape() as tape: qml.RY(theta, wires=[0]) qml.RY(phi, wires=[1]) qml.CNOT(wires=[0, 1]) O1 = qml.expval(qml.Hermitian(H, wires=[0])) O2 = qml.expval(qml.Hermitian(H, wires=[1])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() res = np.array([dev.expval(O1.obs), dev.expval(O2.obs)]) # below are the analytic expectation values for this circuit with arbitrary # Hermitian observable H a = H[0, 0] re_b = H[0, 1].real d = H[1, 1] ev1 = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2 ev2 = ((a - d) * np.cos(theta) * np.cos(phi) + 2 * re_b * np.sin(phi) + a + d) / 2 expected = np.array([ev1, ev2]) self.assertAllAlmostEqual(res, expected, delta=4 / np.sqrt(shots)) def test_multi_qubit_hermitian_expectation(self, shots, qvm, compiler): """Test that arbitrary multi-qubit Hermitian expectation values are correct""" theta = 0.432 phi = 0.123 A = np.array( [ [-6, 2 + 1j, -3, -5 + 2j], [2 - 1j, 0, 2 - 1j, -5 + 4j], [-3, 2 + 1j, 0, -4 + 3j], [-5 - 2j, -5 - 4j, -4 - 3j, -6], ] ) dev = plf.QVMDevice(device="2q-qvm", shots=10 * shots) with qml.tape.QuantumTape() as tape: qml.RY(theta, wires=[0]) qml.RY(phi, wires=[1]) qml.CNOT(wires=[0, 1]) O1 = qml.expval(qml.Hermitian(A, wires=[0, 1])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() res = np.array([dev.expval(O1.obs)]) # below is the analytic expectation value for this circuit with arbitrary # Hermitian observable A expected = 0.5 * ( 6 * np.cos(theta) * np.sin(phi) - np.sin(theta) * (8 * np.sin(phi) + 7 * np.cos(phi) + 3) - 2 * np.sin(phi) - 6 * np.cos(phi) - 6 ) self.assertAllAlmostEqual(res, expected, delta=5 / np.sqrt(shots)) def test_var(self, shots, qvm, compiler): """Tests for variance calculation""" dev = plf.QVMDevice(device="2q-qvm", shots=shots) phi = 0.543 theta = 0.6543 with qml.tape.QuantumTape() as tape: qml.RX(phi, wires=[0]) qml.RY(theta, wires=[0]) O1 = qml.var(qml.PauliZ(wires=[0])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() var = np.array([dev.var(O1.obs)]) expected = 0.25 * (3 - np.cos(2 * theta) - 2 * np.cos(theta) ** 2 * np.cos(2 * phi)) self.assertAlmostEqual(var, expected, delta=3 / np.sqrt(shots)) def test_var_hermitian(self, shots, qvm, compiler): """Tests for variance calculation using an arbitrary Hermitian observable""" dev = plf.QVMDevice(device="2q-qvm", shots=100 * shots) phi = 0.543 theta = 0.6543 A = np.array([[4, -1 + 6j], [-1 - 6j, 2]]) with qml.tape.QuantumTape() as tape: qml.RX(phi, wires=[0]) qml.RY(theta, wires=[0]) O1 = qml.var(qml.Hermitian(A, wires=[0])) # test correct variance for <A> of a rotated state dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() var = np.array([dev.var(O1.obs)]) expected = 0.5 * ( 2 * np.sin(2 * theta) * np.cos(phi) ** 2 + 24 * np.sin(phi) * np.cos(phi) * (np.sin(theta) - np.cos(theta)) + 35 * np.cos(2 * phi) + 39 ) self.assertAlmostEqual(var, expected, delta=0.3) @pytest.mark.parametrize( "op", [ qml.QubitUnitary(np.array(U), wires=0), qml.BasisState(np.array([1, 1, 1]), wires=list(range(3))), qml.PauliX(wires=0), qml.PauliY(wires=0), qml.PauliZ(wires=0), qml.S(wires=0), qml.T(wires=0), qml.RX(0.432, wires=0), qml.RY(0.432, wires=0), qml.RZ(0.432, wires=0), qml.Hadamard(wires=0), qml.Rot(0.432, 2, 0.324, wires=0), qml.Toffoli(wires=[0, 1, 2]), qml.SWAP(wires=[0, 1]), qml.CSWAP(wires=[0, 1, 2]), qml.CZ(wires=[0, 1]), qml.CNOT(wires=[0, 1]), qml.PhaseShift(0.432, wires=0), qml.CSWAP(wires=[0, 1, 2]), plf.CPHASE(0.432, 2, wires=[0, 1]), plf.ISWAP(wires=[0, 1]), plf.PSWAP(0.432, wires=[0, 1]), ], ) def test_apply(self, op, apply_unitary, shots, qvm, compiler): """Test the application of gates to a state""" dev = plf.QVMDevice(device="3q-qvm", shots=shots, parametric_compilation=False) obs = qml.expval(qml.PauliZ(0)) if op.name == "QubitUnitary": state = apply_unitary(U, 3) elif op.name == "BasisState": state = np.array([0, 0, 0, 0, 0, 0, 0, 1]) elif op.name == "CPHASE": state = apply_unitary(test_operation_map["CPHASE"](0.432, 2), 3) elif op.name == "ISWAP": state = apply_unitary(test_operation_map["ISWAP"], 3) elif op.name == "PSWAP": state = apply_unitary(test_operation_map["PSWAP"](0.432), 3) else: state = apply_unitary(op.matrix, 3) with qml.tape.QuantumTape() as tape: qml.apply(op) obs dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() res = dev.expval(obs.obs) expected = np.vdot(state, np.kron(np.kron(Z, I), I) @ state) # verify the device is now in the expected state # Note we have increased the tolerance here, since we are only # performing 1024 shots. self.assertAllAlmostEqual(res, expected, delta=3 / np.sqrt(shots)) def test_sample_values(self, qvm, tol): """Tests if the samples returned by sample have the correct values """ dev = plf.QVMDevice(device="1q-qvm", shots=10) with qml.tape.QuantumTape() as tape: qml.RX(1.5708, wires=[0]) O1 = qml.expval(qml.PauliZ(wires=[0])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() s1 = dev.sample(O1.obs) # s1 should only contain 1 and -1 self.assertAllAlmostEqual(s1 ** 2, 1, delta=tol) self.assertAllAlmostEqual(s1, 1 - 2 * dev._samples[:, 0], delta=tol) def test_sample_values_hermitian(self, qvm, tol): """Tests if the samples of a Hermitian observable returned by sample have the correct values """ theta = 0.543 shots = 1_000_000 A = np.array([[1, 2j], [-2j, 0]]) dev = plf.QVMDevice(device="1q-qvm", shots=shots) with qml.tape.QuantumTape() as tape: qml.RX(theta, wires=[0]) O1 = qml.sample(qml.Hermitian(A, wires=[0])) dev.apply(tape.operations, rotations=tape.diagonalizing_gates) dev._samples = dev.generate_samples() s1 = dev.sample(O1.obs) # s1 should only contain the eigenvalues of # the hermitian matrix eigvals = np.linalg.eigvalsh(A) assert np.allclose(sorted(list(set(s1))), sorted(eigvals), atol=tol, rtol=0) # the analytic mean is 2*sin(theta)+0.5*cos(theta)+0.5 assert np.allclose( np.mean(s1), 2 * np.sin(theta) + 0.5 * np.cos(theta) + 0.5, atol=0.1, rtol=0 ) # the analytic variance is 0.25*(sin(theta)-4*cos(theta))^2 assert np.allclose( np.var(s1), 0.25 * (np.sin(theta) - 4 * np.cos(theta)) ** 2, atol=0.1, rtol=0 ) def test_sample_values_hermitian_multi_qubit(self, qvm, tol): """Tests if the samples of a multi-qubit Hermitian observable returned by sample have the correct values """ theta = 0.543
<filename>hw3/asp_planner_core.py """ An algorithm to solve sequential planning problems (specified in a format unique to this assignment) with Clingo Author: <NAME> Student ID: 12547190 References: - plasp 3: Towards Effective ASP Planning (Dimopoulos et al. 2018) https://arxiv.org/pdf/1812.04491.pdf - Potassco plasp https://github.com/potassco/plasp/ Solution Notes: -------------- "A scholar knows not to waste time rediscovering information already known" - <NAME>, The Way of Kings I'm not re-inventing the wheel, just putting pieces together. The goal is to convert a pseudo-PDDL description to ASP facts and then solve the planning problem. A solution is found with a meta-encoding. "plasp 3: Towards Effective ASP Planning" (Dimopoulos et al. 2018 - https://arxiv.org/pdf/1812.04491.pdf) details sequential and parallel meta-encodings, the source code for which is available in the Plasp repository (https://github.com/potassco/plasp). The paper specifically refers to the strips-incremental.lp encoding, but the authors also include a simplified sequential meta-encoding in sequential-horizon.lp. This simplified encoding yields a plan as a sequence of time-stamped actions if and only if the plan's length matches the externally defined horizon constant. At a high-level, my solution: 1. Adds, explains, and slightly alters a minimal version of plasp's sequential-horizon meta-encoding 2. Converts the pseudo-PDDL description to ASP facts in a manner similar to the 'translate' feature of plasp 3. Finds the optimal (shortest) plan by progressively incrementing the horizon from t = [1, t_max] (inclusive) 4. Returns the optimal plan or None if no solution is found in the range [1, t_max] Example Input: initial: (At(C1, SFO) & At(C2, JFK) & At(P1, SFO) & At(P2, JFK) & Cargo(C1) & Cargo(C2) & Plane(P1) & Plane(P2) & Airport(SFO) & Airport(JFK)) goals: (At(C1, JFK) & At(C2, SFO)) action: Load(c, p, a); (At(c, a) & At(p, a) & Cargo(c) & Plane(p) & Airport(a)); (Contains(c, p) & ~At(c, a)) action: Unload(c, p, a); (Contains(c, p) & At(p, a) & Cargo(c) & Plane(p) & Airport(a)); (At(c, a) & ~Contains(c, p)) action: Fly(p, f, to); (At(p, f) & Plane(p) & Airport(f) & Airport(to)); (At(p, to) & ~At(p, f)) t_max: 10 Example Output: ['Load(C2,P2,JFK)', 'Fly(P2,JFK,SFO)', 'Load(C1,P1,SFO)', 'Unload(C2,P2,SFO)', 'Fly(P1,SFO,JFK)', 'Unload(C1,P1,JFK)'] """ # Standard library import collections import itertools import re from typing import Dict, List, Tuple, Union # External imports import clingo # Local imports from planning import PlanningProblem, Action, Expr ### ### Helper functions ### def init_sequential_planning_program() -> str: """Initialize an ASP sequential planning program with a minimized version of the sequential horizon meta-encoding as detailed in "plasp 3: Towards Effective ASP Planning" (Dimopoulos et al. 2018 - https://arxiv.org/pdf/1812.04491.pdf) and explicitly declared in the plasp repository: https://github.com/potassco/plasp/blob/master/encodings/sequential-horizon.lp All credit goes to the plasp authors. My only contribution is minor simplification and explanation of each group of statements in detail :return: Initialized ASP sequential planning program, ready to be extended with facts from a planning problem :rtype: str """ # We reason about the state of the world at particular time steps: [0, t_max] seq_encoding = 'time(0..horizon).\n' # Predicates evaluate to True or False seq_encoding += 'boolean(true).\n' seq_encoding += 'boolean(false).\n' # The contains/2 atom captures this relationship seq_encoding += 'contains(X, value(X, B)) :- predicate(X), boolean(B).\n' # The initial state is at time t=0 # The holds/3 atom captures the value of a predicate at a particular timestep t >= 0 seq_encoding += 'holds(Predicate, Value, 0) :- initialState(Predicate, Value).\n' # Closed World Assumption (CWA): Any ground atoms in the initial state which are not explicitly declared True # are set to False seq_encoding += 'initialState(X, value(X, false)) :- predicate(X), not initialState(X, value(X, true)).\n' # The solution to the planning problem is extracted from occurs/2 atoms # This is a sequential encoding: only one action may occur at a particular timestep # Also, actions may only occur AFTER the initial state. seq_encoding += '1 {occurs(Action, T) : action(Action)} 1 :- time(T), T > 0.\n' # An action may not occur unless its preconditions are met (i.e., for an action to occur at time t, # all applicable predicates must hold the values specified in the precondition at time t-1) seq_encoding += ( ':- occurs(Action, T), precondition(Action, Predicate, Value), ' 'not holds(Predicate, Value, T - 1).\n' ) # Capture the effects of an action: at time t, the value of a predicate is changed to the one specified in the # action's effect as long as the action was valid (see previous statement). seq_encoding += ( 'caused(Predicate, Value, T) :- ' 'occurs(Action, T), ' 'effect(Action, Predicate, Value), ' 'holds(PredicatePre, ValuePre, T - 1) : precondition(Action, PredicatePre, ValuePre).\n' ) # A predicate is considered modified if its value was changed by an action seq_encoding += 'modified(Predicate, T) :- caused(Predicate, Value, T).\n' # The so-called 'inertia' statements. At a particular timestep, the value of a predicate was either: # 1) Modified and therefore holds a new value seq_encoding += 'holds(Predicate, Value, T) :- caused(Predicate, Value, T).\n' # 2) Was not modified and therefore continues to hold its previous value seq_encoding += ( 'holds(predicate(V), Value, T) :- holds(predicate(V), Value, T - 1), ' 'not modified(predicate(V), T), time(T).\n' ) # The goal is not met unless the appropriate predicates hold their goal values at the final timestep seq_encoding += ':- goal(Predicate, Value), not holds(Predicate, Value, horizon).\n' return seq_encoding def make_positive(expression: Expr) -> Expr: """Make any expression positive by removing the ~ if needed :param expression: A potentially negative expression :type expression: Expr :return: A guaranteed positive expression :rtype: Expr """ if expression.op == '~': new_expression = Expr(expression.args[0].op, *expression.args[0].args) return new_expression return expression def is_variable(arg: Expr) -> bool: """Check if an expression argument is a variable. In the psuedo-PDDL description used in this assignment variables are lower case and constants/predicates are upper case :param arg: An expression argument :type arg: Expr :return: True if the argument is a variable, False otherwise :rtype: bool """ return str(arg)[0].islower() def extract_constants_and_predicates(planning_problem: PlanningProblem) -> Tuple[List[Expr], List[Tuple[Expr, int]], Dict[str, Expr]]: """Extract all unique constants and predicates from a planning problem :param planning_problem: A description of the initial state, action(s), and goal(s) of a planning problem :type planning_problem: PlanningProblem :return: Constants as a list of strings, predicates as a list of (name, number of arguments) tuples, a map of constants per predicate. :rtype: Tuple[List[Expr], List[Tuple[Expr, int]], Dict[str, Expr]]: """ seen_predicates = set() seen_constants = set() constants_per_predicate = collections.defaultdict(list) initial_predicates = planning_problem.initial # Make all predicates positive so we can extract the name via predicate.op goal_predicates = list(map(make_positive, planning_problem.goals)) precondition_predicates = list(map(make_positive, [p for a in planning_problem.actions for p in a.precond])) postcondition_predicates = list(map(make_positive, [e for a in planning_problem.actions for e in a.effect])) all_predicates = initial_predicates + goal_predicates + precondition_predicates + postcondition_predicates for predicate in all_predicates: if predicate.op not in seen_predicates and not is_variable(predicate.op): seen_predicates.add((predicate.op, len(predicate.args))) for arg in predicate.args: if arg not in seen_constants and not is_variable(arg): seen_constants.add(arg) constants_per_predicate[predicate.op].append(arg) return list(seen_constants), list(seen_predicates), constants_per_predicate def action_to_asp_facts(action: Action) -> str: """Convert a planning problem Action into a collection of ASP facts :param action: An action schema with preconditions and effects :type action: Action :return: A collection of asp facts for the action, precondition, and effects :rtype: str """ fact_string = '' # Keep track of arguments in order to correctly label constants arg_map = {} variable_counter = 1 for arg in action.args: if not is_variable(arg): arg_map[str(arg)] = f'constant("{str(arg)}")' else: arg_map[str(arg)] = f'X{variable_counter}' variable_counter += 1 # First declare the action as: # action(action(("?", X1, ..., Xn))) :- constant(X1), ..., constant(Xn). if arg_map.values(): action_signature = f'action(("{action.name}", {", ".join(arg_map.values())}))' action_constants = ', '.join([f'constant({k})' for k in arg_map.values()]) action_constants = ' :- ' + action_constants else: action_signature = f'action(("{action.name}"))' action_constants = '' fact_string += f'action({action_signature}){action_constants}.\n' # Declare the preconditions as: # precondition(action_signature, predicate((...)), value(...(predicate(()), true or false)) # :- action(action((...))). # And Effects as: # effect(action_signature, predicate((...)), value(...(predicate(()), true or false)) :- action(action((...))). preconditions = [('precondition', p) for p in action.precond] effects = [('effect', e) for e in action.effect] for name, expression in preconditions + effects: positive_expression = make_positive(expression) # map variables to X1,...,Xn and map constants to constant("...") action_arg_map = {}
2D+F E+2F 3F+G 5G C | C D+E C+E+F D+3F 2E+2F+G 4F+3G 10G D | D 2D+F D+3F 2D+4F+G 6F+2G 6F+7G 20G E | E E+2F 2E+2F+G 6F+2G 2E+4F+5G 6F+12G 30G F | F 3F+G 4F+3G 6F+7G 6F+12G 6F+27G 60G G | G 5G 10G 20G 30G 60G 120G """ # parabolics of S_5 tables["D_4"] = open("D_4.table").read() tables["PD_4"] = """ | A B C D E F G H I J K --+--------------------------------------------------------------------------------- A | A B C D E F G H I J K B | B 2B+G 2F 2F 2G+J 2F+2J 4G+K 4J 4J 4J+2K 8K C | C 2F 2C+H 2F 2H+J 2F+2J 4J 4H+K 4J 4J+2K 8K D | D 2F 2F 2D+I 2I+J 2F+2J 4J 4J 4I+K 4J+2K 8K E | E 2G+J 2H+J 2I+J 2E+G+H+I+2K 6J+K 4G+2J+4K 4H+2J+4K 4I+2J+4K 6J+9K 24K F | F 2F+2J 2F+2J 2F+2J 6J+K 2F+6J+2K 8J+4K 8J+4K 8J+4K 8J+12K 32K G | G 4G+K 4J 4J 4G+2J+4K 8J+4K 8G+10K 8J+8K 8J+8K 8J+20K 48K H | H 4J 4H+K 4J 4H+2J+4K 8J+4K 8J+8K 8H+10K 8J+8K 8J+20K 48K I | I 4J 4J 4I+K 4I+2J+4K 8J+4K 8J+8K 8J+8K 8I+10K 8J+20K 48K J | J 4J+2K 4J+2K 4J+2K 6J+9K 8J+12K 8J+20K 8J+20K 8J+20K 8J+44K 96K K | K 8K 8K 8K 24K 32K 48K 48K 48K 96K 192K """ # parabolics of Weyl D_4 tables["A_5"] = """ | A B C D E F G H I --+------------------------------------------------- A | A B C D E F G H I B | B B+G H G+H I F+I 2G+I H+2I 5I C | C H C+H 2H E+I 3H 2I 2H+2I 6I D | D G+H 2H D+H+I 2I 3H+I G+3I 2H+4I 10I E | E I E+I 2I 2E+2I 3I 4I 6I 12I F | F F+I 3H 3H+I 3I 3F+3I 5I 3H+6I 15I G | G 2G+I 2I G+3I 4I 5I 2G+6I 10I 20I H | H H+2I 2H+2I 2H+4I 6I 3H+6I 10I 2H+14I 30I I | I 5I 6I 10I 12I 15I 20I 30I 60I """ # alternating n=5 tables["GL32"] = """ | A B C D E F G H I J K L M N P --+-------------------------------------------------------------------------------------------------------- A | A B C D E F G H I J K L M N P B | B B+J G+I M J+M F+N G+J+N P I+2N 3J+P K+N+P L+3N M+2P 3N+2P 7P C | C G+I C+L M E+N L+M G+L+N P I+2N J+3N K+N+P 3L+P M+2P 3N+2P 7P D | D M M D+M 2M 2M P H+P M+P 2P 2P 2P 2M+2P 4P 8P E | E J+M E+N 2M 2E+P 2M+N J+N+P 2P M+2N+P 2J+3P N+3P 3N+2P 2M+4P 2N+6P 14P F | F F+N L+M 2M 2M+N 2F+P L+N+P 2P M+2N+P 3N+2P N+3P 2L+3P 2M+4P 2N+6P 14P G | G G+J+N G+L+N P J+N+P L+N+P G+J+L+2N+P 3P 5N+P 3J+3N+3P K+2N+4P 3L+3N+3P 7P 5N+8P 21P H | H P P H+P 2P 2P 3P 3H+3P 4P 6P 6P 6P 8P 12P 24P I | I I+2N I+2N M+P M+2N+P M+2N+P 5N+P 4P I+3N+3P 6N+4P 2N+6P 6N+4P M+9P 4N+12P 28P J | J 3J+P J+3N 2P 2J+3P 3N+2P 3J+3N+3P 6P 6N+4P 6J+9P 3N+9P 9N+6P 14P 6N+18P 42P K | K K+N+P K+N+P 2P N+3P N+3P K+2N+4P 6P 2N+6P 3N+9P 2K+10P 3N+9P 14P 2N+20P 42P L | L L+3N 3L+P 2P 3N+2P 2L+3P 3L+3N+3P 6P 6N+4P 9N+6P 3N+9P 6L+9P 14P 6N+18P 42P M | M M+2P M+2P 2M+2P 2M+4P 2M+4P 7P 8P M+9P 14P 14P 14P 2M+18P 28P 56P N | N 3N+2P 3N+2P 4P 2N+6P 2N+6P 5N+8P 12P 4N+12P 6N+18P 2N+20P 6N+18P 28P 4N+40P 84P P | P 7P 7P 8P 14P 14P 21P 24P 28P 42P 42P 42P 56P 84P 168P """ tables["B_2"] = """ | A B C D E F G H --+---------------------------- A | A B C D E F G H B | B 2B G G 2E H 2G 2H C | C G 2C G H 2F 2G 2H D | D G G 2D H H 2G 2H E | E 2E H H 2E+H 2H 2H 4H F | F H 2F H 2H 2F+H 2H 4H G | G 2G 2G 2G 2H 2H 4G 4H H | H 2H 2H 2H 4H 4H 4H 8H """ tables["PB_2"] = """ | A B C D --+---------------- A | A B C D B | B 2B+D 2D 4D C | C 2D 2C+D 4D D | D 4D 4D 8D """ tables["B_3"] = """ | A B C D E F G H I J K L M N P Q R S T U V W X Y Z a b c d e f g h --+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- A | A B C D E F G H I J K L M N P Q R S T U V W X Y Z a b c d e f g h B | B 2B G G H R 2G 2H S V V W S W b b 2R 2S e f 2V 2W e e e c 2b 2c h 2e 2f h 2h C | C G 2C G N Q 2G W Z Z T W T 2N b 2Q b e 2T g e 2W e e 2Z g 2b h h 2e h 2g 2h D | D G G 2D L P 2G W X Y X 2L Y W 2P b b e e d e 2W 2X 2Y e d 2b h 2d 2e h h 2h E | E H N L E+H U W 3H I+S J+V K+V L+W M+S N+W d g f 3S T+e U+f 3V 3W X+e Y+e Z+e a+c h 3c d+h 3e 3f g+h 3h F | F R Q P U F+U b f f d g d 2U g P+d Q+g R+f 2f 2g 2U+f h h h 2d h d+g b+h 2h 2d+h 2h 4f 2g+h 4h G | G 2G 2G 2G W b 4G 2W e e e 2W e 2W 2b 2b 2b 2e 2e h 2e 4W 2e 2e 2e h 4b 2h 2h 4e 2h 2h 4h H | H 2H W W 3H f 2W 6H 3S 3V 3V 3W 3S 3W h h 2f 6S 3e 3f 6V 6W 3e 3e 3e 3c 2h 6c 3h 6e 6f 3h 6h I | I S Z X I+S f e 3S 2I+f Z+c X+c X+e S+f Z+e h h 2f 2S+2f e+h 3f 2c+e 3e 2X+h e+h 2Z+h c+h 2h 2c+2h 3h 2e+2h 6f 3h 6h J | J V Z Y J+V d e 3V Z+c 2J+c V+c Y+e Y+c Z+e 2d h h 2c+e e+h d+h 2V+2c 3e e+h 2Y+h 2Z+h 2c+d 2h 4c+h 2d+2h 2e+2h 3h 3h 6h K | K V T X K+V g e 3V X+c V+c 2K+c X+e T+c T+e h 2g h 2c+e 2T+h g+h 2V+2c 3e 2X+h e+h e+h 2c+g 2h 4c+h 3h 2e+2h 3h 2g+2h 6h L | L W W 2L L+W d 2W 3W X+e Y+e X+e 2L+2W Y+e 3W 2d h h 3e 3e d+h 3e 6W 2X+2e 2Y+2e 3e d+h 2h 3h 2d+2h 6e 3h 3h 6h M | M S T Y M+S 2U e 3S S+f Y+c T+c Y+e 2M+f T+e 2d 2g 2f 2S+2f 2T+h 2U+2f 2c+e 3e e+h 2Y+h e+h 2a+h 2h 2c+2h 2d+2h 2e+2h 6f 2g+2h 6h N | N W 2N W N+W g 2W 3W Z+e Z+e T+e 3W T+e 2N+2W h 2g h 3e 2T+2e g+h 3e 6W 3e 3e 2Z+2e g+h 2h 3h 3h 6e 3h 2g+2h 6h P | P b b 2P d P+d 2b h h 2d h 2d 2d h 2P+2d b+h b+h 2h 2h 2d+h