diff --git a/.gitattributes b/.gitattributes index 6858c216d49237b106cbd916ff50301dad817010..2f597ad2037bad28b42907515c630f4c39aecfaa 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1700,3 +1700,52 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 3416.jsonl filter=lfs diff=lfs merge=lfs -text 3436.jsonl filter=lfs diff=lfs merge=lfs -text 3431.jsonl filter=lfs diff=lfs merge=lfs -text +3464.jsonl filter=lfs diff=lfs merge=lfs -text +3468.jsonl filter=lfs diff=lfs merge=lfs -text +3425.jsonl filter=lfs diff=lfs merge=lfs -text +3478.jsonl filter=lfs diff=lfs merge=lfs -text +3472.jsonl filter=lfs diff=lfs merge=lfs -text +3483.jsonl filter=lfs diff=lfs merge=lfs -text +3430.jsonl filter=lfs diff=lfs merge=lfs -text +3480.jsonl filter=lfs diff=lfs merge=lfs -text +3475.jsonl filter=lfs diff=lfs merge=lfs -text +3487.jsonl filter=lfs diff=lfs merge=lfs -text +3467.jsonl filter=lfs diff=lfs merge=lfs -text +3484.jsonl filter=lfs diff=lfs merge=lfs -text +3477.jsonl filter=lfs diff=lfs merge=lfs -text +3481.jsonl filter=lfs diff=lfs merge=lfs -text +3496.jsonl filter=lfs diff=lfs merge=lfs -text +349.jsonl filter=lfs diff=lfs merge=lfs -text +3489.jsonl filter=lfs diff=lfs merge=lfs -text +3491.jsonl filter=lfs diff=lfs merge=lfs -text +3476.jsonl filter=lfs diff=lfs merge=lfs -text +3490.jsonl filter=lfs diff=lfs merge=lfs -text +3499.jsonl filter=lfs diff=lfs merge=lfs -text +3479.jsonl filter=lfs diff=lfs merge=lfs -text +3497.jsonl filter=lfs diff=lfs merge=lfs -text +3428.jsonl filter=lfs diff=lfs merge=lfs -text +3502.jsonl filter=lfs diff=lfs merge=lfs -text +3501.jsonl filter=lfs diff=lfs merge=lfs -text +3488.jsonl filter=lfs diff=lfs merge=lfs -text +3442.jsonl filter=lfs diff=lfs merge=lfs -text +350.jsonl filter=lfs diff=lfs merge=lfs -text +3495.jsonl filter=lfs diff=lfs merge=lfs -text +3504.jsonl filter=lfs diff=lfs merge=lfs -text +3505.jsonl filter=lfs diff=lfs merge=lfs -text +3503.jsonl filter=lfs diff=lfs merge=lfs -text +3392.jsonl filter=lfs diff=lfs merge=lfs -text +3509.jsonl filter=lfs diff=lfs merge=lfs -text +3492.jsonl filter=lfs diff=lfs merge=lfs -text +351.jsonl filter=lfs diff=lfs merge=lfs -text +3511.jsonl filter=lfs diff=lfs merge=lfs -text +3519.jsonl filter=lfs diff=lfs merge=lfs -text +3452.jsonl filter=lfs diff=lfs merge=lfs -text +3517.jsonl filter=lfs diff=lfs merge=lfs -text +3518.jsonl filter=lfs diff=lfs merge=lfs -text +3508.jsonl filter=lfs diff=lfs merge=lfs -text +3507.jsonl filter=lfs diff=lfs merge=lfs -text +3520.jsonl filter=lfs diff=lfs merge=lfs -text +3524.jsonl filter=lfs diff=lfs merge=lfs -text +352.jsonl filter=lfs diff=lfs merge=lfs -text +3528.jsonl filter=lfs diff=lfs merge=lfs -text +3531.jsonl filter=lfs diff=lfs merge=lfs -text diff --git a/3392.jsonl b/3392.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f7ccd62c2e8aeb91b4e20558508961c519604b4b --- /dev/null +++ b/3392.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35208113934a0f6f5ba2f0af3bbf549c6c72f23753ca56036bc1dcae1f99304d +size 526647276 diff --git a/3425.jsonl b/3425.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..52b22536dcdb4e46c2bcc5de781ffa45f1064e71 --- /dev/null +++ b/3425.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc2768ace3dd7e27290620e451c8e14fc0b33b82f17229bbae51562dd627ea30 +size 283467129 diff --git a/3428.jsonl b/3428.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cf05a25cf076fc0c2cc5da6d1bd7bf7b18ccd2e9 --- /dev/null +++ b/3428.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f10a604531a27877e5b7d62d9026372be01cde3203666e0c2c351ff1be8bf84 +size 535050272 diff --git a/3430.jsonl b/3430.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a9c667571cae73e21e3b32bbaea55c26bcf0e4c9 --- /dev/null +++ b/3430.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbed45307ebe356107b397c294dddd360f24b826b3b21c6ff199ff7373bb31ff +size 51897877 diff --git a/3442.jsonl b/3442.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..983b3950cdce67e85b2dcfccb1c20bdd1fe44824 --- /dev/null +++ b/3442.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fdd1a0e5661dc9b9f4e19b810ae16709c57a761ec4b0d67f729e28b95e6b545 +size 52554854 diff --git a/3452.jsonl b/3452.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f441cd726a9faec5e3c02cbda746d7d7271181f9 --- /dev/null +++ b/3452.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bea6ad642a07c299ac78d4629213c0964ca6975c498768812927b287663fb8a +size 531514609 diff --git a/3464.jsonl b/3464.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1231b4b0593e72724bc181f974af19190254b751 --- /dev/null +++ b/3464.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c54b791e30cebc29ea68eb02346ae1d0ff07f6efd33cfdbc64c50eaf285ceb54 +size 56508302 diff --git a/3467.jsonl b/3467.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a6d7f6ea1e91d31103ae0fe8749a202c1791aa87 --- /dev/null +++ b/3467.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f313e574abd6c17d3418711402398763f48294a38a78ff9ad6a94a093e145493 +size 246076898 diff --git a/3468.jsonl b/3468.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..31b42f2ba48aea145f25685a3d55340568fae191 --- /dev/null +++ b/3468.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8639634964e94c3d70f66da55c996e14388ac40ee671a9524648eb6e010f64 +size 59993678 diff --git a/3472.jsonl b/3472.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5a5b98517f1e3ea98ac40236608d3ce9f9580abb --- /dev/null +++ b/3472.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be8f753c2764a7b07f6bbaca1b55e797cfcc9eb7dddaec9748de06f1069284cf +size 19646776 diff --git a/3475.jsonl b/3475.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0c709382f9702487b5be4132bcb0444d2938736 --- /dev/null +++ b/3475.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2efe9a616c4176c555ed5d67e70e133c5da337bc31e77ad4d7c28af975f4636 +size 55510898 diff --git a/3476.jsonl b/3476.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e7414a47d763fcf0e96fe9bfeccaa740cf2a412e --- /dev/null +++ b/3476.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a1efae6359a30f33ff8b879df9f2137514a8663faf38fe8e5aa4f6aca775d2b +size 57566940 diff --git a/3477.jsonl b/3477.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b3c61705578a85b2b35cee3fda4e45a44a5c0896 --- /dev/null +++ b/3477.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:542a7aee54d2aa9fb463cfd4ff19c4e0df405c644823b93aa8fd38b0cd0c89a4 +size 56901233 diff --git a/3478.jsonl b/3478.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5e85b6c977786b035a3c1f2c568991e5f420838e --- /dev/null +++ b/3478.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc870f5610d7ef98fb9b5cdc3740b2547d9923b6d0404e201fbd5880e4b7d791 +size 59954502 diff --git a/3479.jsonl b/3479.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..249c4fb40811a795aeae91735f2ee4c62ff99590 --- /dev/null +++ b/3479.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82dbd8b6c831215e0c2a911c11c3cd4a114ff55da161b6a6431155a12353cce5 +size 48549989 diff --git a/3480.jsonl b/3480.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c873d5681d746f4541c0bdba6a979bb30b4c505 --- /dev/null +++ b/3480.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fa3b81e903ec1e0daa0b8bda7faf1095408df85a5d2559df1225dccecfc2ebf +size 60710238 diff --git a/3481.jsonl b/3481.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f25cbf9fc60aa8ddb89c97ea689c0d56004aadd5 --- /dev/null +++ b/3481.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc676975dd3b437ec951dc8841f8c39f0f431c42d8f20816fcd0d5ae3c392caa +size 18969502 diff --git a/3483.jsonl b/3483.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fdff544c0d605648d563f3b24b3f867e14b743e2 --- /dev/null +++ b/3483.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f7f5ca48b5868d3db1986a5ffc5c02084549cd688e08858199e54dab4b02539 +size 12938083 diff --git a/3484.jsonl b/3484.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..23748e8c231a63d5811323cf80727c728b119ce1 --- /dev/null +++ b/3484.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41088d2c1e1052c72d2aaa0adaffa536ad20c94f46ee09b6497653641c3790b2 +size 14025830 diff --git a/3487.jsonl b/3487.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e5a008b7429c2a77945728b2fc30fddc9a437ea9 --- /dev/null +++ b/3487.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5be697107b45f714650220df570cc5518ac31b91b5a8749b1fda018ce6e82b2b +size 57389458 diff --git a/3488.jsonl b/3488.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2be052da257947b2813b3d24ccb201ee60f1c20e --- /dev/null +++ b/3488.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e092e69d75a9c167cef7b534a5b518b0bda28c344029459b14170c97a05f0c09 +size 59049727 diff --git a/3489.jsonl b/3489.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bcef285273107f82b9cc2bf48e63d504d03a9273 --- /dev/null +++ b/3489.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8b29ffb36b7920c2e449f49a0dc567cfa77b05d366a07b9154c75d343dea0ba +size 56210152 diff --git a/349.jsonl b/349.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0a2611ee77391a2c6d64c06b6340dc14443fe016 --- /dev/null +++ b/349.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4108001f2ccb9eab7c626532de323157248fb043558c91e6ecd2a681cf24c438 +size 69077315 diff --git a/3490.jsonl b/3490.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ca8e99ae5db2913e8d1c4d28d6e788334bac24a --- /dev/null +++ b/3490.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e5b9e3dc2c4a46beb3e4c9309a61b32540b01035d4ca9e89ea9c8e39daa4d45 +size 53208610 diff --git a/3491.jsonl b/3491.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4906fc1f9a44b13470981f47b432131a447c699b --- /dev/null +++ b/3491.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81eee2cbc0973a174c3f4ea2e98551b0dd4ba5ab8fbdd2293b60e48ad69ce014 +size 52117188 diff --git a/3492.jsonl b/3492.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d6c2cbf612305711f7066b71501aa5990d82e2e6 --- /dev/null +++ b/3492.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80b814a0192bffa5f94fac42ac783bff3440a1d2dc6ef22785d2ad3fa777a7c5 +size 54554236 diff --git a/3495.jsonl b/3495.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1b2577e4b3dde275ff9e320c49f8ccd0f7c96535 --- /dev/null +++ b/3495.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a900a728b70db8baff4fc4e6b1d3bf7f96758cdf4e068c66618a6060a57e56ae +size 66033221 diff --git a/3496.jsonl b/3496.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3e8c4aaa05e38eea739a813328cb977a3cc9ad7d --- /dev/null +++ b/3496.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b69e34aacc93da7e9c355cb4bef169e072f0c657bd2247146186f79c2cd5ba6b +size 23647522 diff --git a/3497.jsonl b/3497.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a981f09e0a58eb26575cea02c31d28619c15c30d --- /dev/null +++ b/3497.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15233cc95f0e4ca8893c4953dd4ee38e0b084fbc26070c351063c7e068e13a77 +size 60300705 diff --git a/3499.jsonl b/3499.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..46799cffddac9bba5653ba178392dc71f4b76850 --- /dev/null +++ b/3499.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8cd64f4cc86eae00e18821d6896a15f08db4ee67fafca4418331d81707d2911 +size 59406000 diff --git a/350.jsonl b/350.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..66481e11e26ee07c1089e2ee43fd351a48f57b7c --- /dev/null +++ b/350.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf216851aa3859f09bf968bf4750013a9cd8ad69c2784d74a61165af0fa6307e +size 18827084 diff --git a/3501.jsonl b/3501.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..06add65c598b4ce8920efdf74e5b1c697603c75f --- /dev/null +++ b/3501.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39efafc0f40d9309e420d52508d52c752fae89dcd18877abee02d657615a5a4d +size 58512427 diff --git a/3502.jsonl b/3502.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..022e19401e3bd4aa6c50613ac5b8663df582d3f3 --- /dev/null +++ b/3502.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cf5f7519502748584eb25f5b799543558ace8ef86448d192d9c671f56f313c6 +size 65141475 diff --git a/3503.jsonl b/3503.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a373fde7606a4d7a8447887f69e919e7969a87b4 --- /dev/null +++ b/3503.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55047b973442b255f6deacf9c9990ca9b20ef7b36123ee938ad007bc355589f5 +size 25425357 diff --git a/3504.jsonl b/3504.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..70f45666a75bae655b427c888c9d3c7b84eb2b8a --- /dev/null +++ b/3504.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b97912faf353106c371dfb16610a7155c6d7b465b9a83f8cf9002b71d5cb04d +size 64692307 diff --git a/3505.jsonl b/3505.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c2953c49ecb2e6b002e9fe627a613075125811b --- /dev/null +++ b/3505.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc74243e8daa42db69711f6eaa1871772a65c39044ffa3d7456c7a3215dc9ff9 +size 55834519 diff --git a/3507.jsonl b/3507.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..04af8bfe0063b30477634dd0870179354e3b8f73 --- /dev/null +++ b/3507.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89c8352d4e0c9b0a6746d747aef7947c3b57275637f1e54a3983676dad2f6ecc +size 44619656 diff --git a/3508.jsonl b/3508.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ad97ab8b374ed0b2af4ffeb4b976ce8350e21a8a --- /dev/null +++ b/3508.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8848795dc774aa30bda2d739e0d5f5ba90528a359558c17b60d54b8b56d612f0 +size 63123787 diff --git a/3509.jsonl b/3509.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9d6a9ba427f5ac9a304e4fd95e0bcbe03980d352 --- /dev/null +++ b/3509.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f8b87cccedc9f608b64f2840b6863e773e4933c495244f793cc4a3037b927a7 +size 22969949 diff --git a/351.jsonl b/351.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4da060cd9230f1c851b864a842676c334b449d00 --- /dev/null +++ b/351.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8e2905bbaa0b88dbef7b207a859797c0dd44766cea23d8fe5a5e68c598b0eab +size 59200893 diff --git a/3511.jsonl b/3511.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9b42288c066c90fccce2d9334dfc856e113edf47 --- /dev/null +++ b/3511.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1f8bb14b04a6d02bd5d12d382611b96380cdb8009f2b18242e0dda1f400fb67 +size 54649880 diff --git a/3517.jsonl b/3517.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..704da29da1fbe57a3b7958d6bbec9fb54f5118f4 --- /dev/null +++ b/3517.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df77b7a91a3162e4ce3ef28aeca6230e958939b8a47bff62ccbd7062dfe6eb00 +size 59627666 diff --git a/3518.jsonl b/3518.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e26c2402cf5bdab9b96624da5fe4c89195f332b6 --- /dev/null +++ b/3518.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01baf48bc3ac24313fe0d2e472e392d5f4b5480e7cf544b44b3c87d2b5dd170d +size 54366017 diff --git a/3519.jsonl b/3519.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a7c07836480f15258723a449da625b174a25f03c --- /dev/null +++ b/3519.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7461ff4226f2a73f99e093d8b191880680465d18950a04b3d1c574cbcff89c8 +size 24605879 diff --git a/352.jsonl b/352.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a65d328cafe559cbc1dc4fb41c09a36895b6b2d6 --- /dev/null +++ b/352.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ccd504ecd7a330b7ed42c9761f51a2e1cad03dde8aeca64f7a27766a2db692c +size 51128077 diff --git a/3520.jsonl b/3520.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5757f1db0569b07c001dd9747f2ca78ca73e4eca --- /dev/null +++ b/3520.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8637ae19739544cefadc105ee001e282e066905ff5b8189e9041696598a18cd +size 33043150 diff --git a/3524.jsonl b/3524.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cc15d780ac862e17110f9393a891a8d2a5201e1d --- /dev/null +++ b/3524.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4e563ae92bcec5a3a05f76bc1840dd1a8fab33dcb7084f8dd4508461d2a03bc +size 51079290 diff --git a/3528.jsonl b/3528.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..abb1dd8b8657531dacb6ef9e38556d4658aa1fde --- /dev/null +++ b/3528.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:265d231aab8720adca3015067a62a3719cc77f038c8dcb43b092cbc8d5352bf8 +size 56237439 diff --git a/3531.jsonl b/3531.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..973a29a6b23bc8bbb89eb8a5a9046442f6c16350 --- /dev/null +++ b/3531.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7fe39ee6666abe992f2b6b36192c08c56999a6909d6b1a7ae606d3eabd0b8dc +size 21253778 diff --git a/4758.jsonl b/4758.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..763307e00ef884be11606f0154b0decca38928ca --- /dev/null +++ b/4758.jsonl @@ -0,0 +1,649 @@ +{"seq_id":"548708695","text":"import requests\n\nclass PlacesAPI(object):\n\n\tapikey = \"AIzaSyDlv0keEQsyv8KzDpRSFQti3Kiq5Anlff4\"\n\t#endpoint = \"https://maps.googleapis.com/maps/api/geocode/json\"\n\tendpoint_search = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n\tendpoint_detail = \"https://maps.googleapis.com/maps/api/place/details/json\"\n\n\n\tdef __init__(self):\n\t\treturn\n\n\tdef getNearPlaces(self, latlong):\n\t\t\n\t\tparameters = {\n\t\t\t\"location\":latlong, \n\t\t\t\"key\":self.apikey, \n\t\t\t\"radius\":1000,\n\t\t\t\"types\":\"airport|bar|cafe|beauty_salon|book_store|bus_station|casino|city_hall|dentist|department_store|doctor|gym|hospital|library|museum|night_club|park|restaurant|school|shopping_mall|stadium|store|university|zoo\"\n\t\t}\n\t\tresults = requests.get(self.endpoint_search, params=parameters).json()\n\t\t\n\t\treturn results['results']\n\n\tdef getPlaceDetail(self, place_id):\n\t\t\n\t\tparameters = {\n\t\t\t\"placeid\":place_id, \n\t\t\t\"key\":self.apikey, \n\t\t}\n\t\t\n\t\tresult = requests.get(self.endpoint_detail, params=parameters).json()\n\t\t\n\t\treturn result['result']","sub_path":"Places/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"636492248","text":"#!/usr/bin/env python\n\nimport argparse\nfrom glob import glob\nfrom tqdm import tqdm\nimport json\nimport os\nfrom pathlib import Path\nfrom mirdata.validate import md5\n\nDATASET_INDEX_PATH = \"../mirdata/datasets/indexes/openmic2018_index.json\"\n\n\ndef make_dataset_index(dataset_data_path):\n path = Path(dataset_data_path)\n\n # top-key level metadata\n metadata_checksum = md5(path / Path(\"openmic-2018-metadata.csv\"))\n classmap_checksum = md5(path / Path(\"class-map.json\"))\n label_checksum = md5(path / Path(\"openmic-2018-aggregated-labels.csv\"))\n response_checksum = md5(path / Path(\"openmic-2018-individual-responses.csv\"))\n train_split = path / Path(\"partitions\") / Path(\"split01_train.csv\")\n test_split = path / Path(\"partitions\") / Path(\"split01_test.csv\")\n train_checksum = md5(train_split)\n test_checksum = md5(test_split)\n\n index_metadata = {\n \"metadata\": {\n \"openmic-metadata\": (\"openmic-2018-metadata.csv\", metadata_checksum),\n \"openmic-classmap\": (\"class-map.json\", classmap_checksum),\n \"openmic-labels\": (\"openmic-2018-aggregated-labels.csv\", label_checksum),\n \"openmic-responses\": (\"openmic-2018-individual-responses.csv\", response_checksum),\n \"openmic-train\": (str(train_split.relative_to(path)), train_checksum),\n \"openmic-test\": (str(test_split.relative_to(path)), test_checksum),\n }\n }\n\n # top-key level tracks\n index_tracks = {}\n for audio_file in tqdm(sorted(path.rglob(\"*.ogg\"))):\n\n audio_checksum = md5(audio_file)\n arelpath = audio_file.relative_to(path)\n track_id = audio_file.stem\n\n vggish_file = (path / Path(\"vggish\") / arelpath.parent.stem / track_id).with_suffix(\".json\")\n vggish_checksum = md5(vggish_file)\n vrelpath = vggish_file.relative_to(path)\n\n index_tracks[track_id] = {\n \"audio\": (str(arelpath), audio_checksum),\n \"vggish\": (str(vrelpath), vggish_checksum),\n }\n\n # top-key level version\n dataset_index = {\"version\": \"1.0.0\"}\n\n # combine all in dataset index\n dataset_index.update(index_metadata)\n dataset_index.update({\"tracks\": index_tracks})\n\n with open(DATASET_INDEX_PATH, \"w\") as fhandle:\n json.dump(dataset_index, fhandle, indent=2)\n\n\ndef main(args):\n make_dataset_index(args.dataset_data_path)\n\n\nif __name__ == \"__main__\":\n PARSER = argparse.ArgumentParser(description=\"Make dataset index file.\")\n PARSER.add_argument(\n \"dataset_data_path\", type=str, help=\"Path to dataset data folder.\"\n )\n\n main(PARSER.parse_args())\n","sub_path":"scripts/make_openmic2018_index.py","file_name":"make_openmic2018_index.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"237996274","text":"from functools import reduce\nimport math\n\n\nclass State:\n def __init__(self, x, y, direction):\n self.x = x\n self.y = y\n self.direction = direction\n\n\nclass Action:\n def __init__(self, command, steps):\n self.command = command\n self.steps = steps\n\n def __str__(self):\n return self.command + \" - \" + str(self.steps)\n\n\nclass Tower:\n def __init__(self, x, y, damage, attack_range, locked):\n self.x = x\n self.y = y\n self.damage = damage\n self.attack_range = attack_range\n self.locked = locked\n\n\nclass Alien:\n def __init__(self, x, y, health, speed, spawn_time, dead):\n self.x = x\n self.y = y\n self.health = health\n self.speed = speed\n self.spawn_time = spawn_time\n self.dead = dead\n\n\ndef apply_action(state, action, bounds):\n new_state = State(state.x, state.y, state.direction)\n if action.command == 'F':\n if state.direction == 0:\n new_state.y -= action.steps\n elif state.direction == 1:\n new_state.x += action.steps\n elif state.direction == 2:\n new_state.y += action.steps\n elif state.direction == 3:\n new_state.x -= action.steps\n else:\n new_state.direction = (new_state.direction + action.steps) % 4\n\n new_state.x = max(min(bounds[0], new_state.x), 0)\n new_state.y = max(min(bounds[1], new_state.y), 0)\n\n return new_state\n\n\ndef break_action(action):\n actions = []\n\n if(action.command == 'F'):\n for _ in range(action.steps):\n actions.append(Action(action.command, 1))\n else:\n actions.append(action)\n\n return actions\n\n\ndef parse_input_file(in_file):\n actions = []\n bounds = ()\n initial_state = None\n aliens = []\n towers = []\n with open(in_file, \"r\") as f:\n actions = []\n lines = f.readlines()\n bounds = (int(lines[0].split()[0]), int(lines[0].split()[1]))\n initial_state = State(\n int(lines[1].split()[0]), int(lines[1].split()[1]), 1)\n actions_str = lines[2].split()\n commands_str = actions_str[0::2]\n steps_str = actions_str[1::2]\n\n for act_cmd, act_step in zip(commands_str, steps_str):\n actions.append(Action(act_cmd, int(act_step)))\n\n health, speed = (float(lines[3].split()[0]),\n float(lines[3].split()[1]))\n\n n_aliens = int(lines[4])\n\n aliens = [Alien(initial_state.x, initial_state.y, health, speed, int(spawn_time), False)\n for spawn_time in lines[5: 5 + n_aliens]]\n\n tower_damage, tower_range, tower_cost = (\n float(lines[5 + n_aliens].split()[0]), float(lines[5 + n_aliens].split()[1]), int(lines[5+n_aliens].split()[2]))\n\n gold = int(lines[5 + n_aliens + 1])\n\n return bounds, initial_state, actions, aliens, tower_damage, tower_range, tower_cost, gold\n\n\ndef compute_path(bounds, initial_state, actions):\n final_state = State(initial_state.x, initial_state.y,\n initial_state.direction)\n\n new_actions = reduce(lambda l1, l2: l1 + l2,\n [break_action(a) for a in actions])\n\n path = []\n path.append([final_state.x, final_state.y])\n\n for a in new_actions:\n final_state = apply_action(final_state, a, bounds)\n if a.command == 'F':\n path.append([final_state.x, final_state.y])\n\n return path\n\n\ndef compute_alien_position(path, tick, alien):\n if tick < alien.spawn_time:\n return -2, -2\n\n position_idx = min(len(path), math.floor(\n max((tick - alien.spawn_time), 0) * alien.speed))\n\n return path[position_idx][0], path[position_idx][1]\n\n\ndef euclidian_distance(x1, y1, x2, y2):\n return math.sqrt((x2 - x1)*(x2-x1) + (y2 - y1)*(y2-y1))\n\n\ndef lock_tower_target(tower, aliens):\n if tower.locked > -1:\n alien = aliens[tower.locked]\n if euclidian_distance(tower.x, tower.y, alien.x, alien.y) <= tower.attack_range and not alien.dead:\n # return for valid target\n return\n\n # no target\n tower.locked = -1\n min_alien_distance = euclidian_distance(\n tower.x, tower.y, aliens[0].x, aliens[0].y) + 1\n\n # find target\n for a, i in zip(aliens, range(len(aliens))):\n distance = euclidian_distance(tower.x, tower.y, a.x, a.y)\n if not a.dead and distance < min_alien_distance and distance <= tower.attack_range:\n min_alien_distance = distance\n tower.locked = i\n\n\ndef shoot_aliens(towers, aliens):\n for t in towers:\n if t.locked > -1:\n aliens[t.locked].health -= t.damage\n\n for a in aliens:\n if a.health <= 0:\n a.dead = True\n\n\ndef update_alien_positions(aliens, path):\n for a in aliens:\n if a.dead:\n continue\n alien_x, alien_y = compute_alien_position(path, tick, a)\n a.x = alien_x\n a.y = alien_y\n if a.x == a.y == -1:\n return \"LOSS\"\n\n return \"PENDING\"\n\n\ndef compute_path_points_in_range(pos, path, tower_range):\n count = 0\n for path_point in path:\n if (euclidian_distance(pos[0], pos[1], path_point[0], path_point[1])) <= tower_range:\n count += 1\n\n return count\n\n\ndef compute_towers(bounds, path, tower_damage, tower_range, tower_cost, gold):\n empty_positions = [[True for _ in range(bounds[0])]\n for _ in range(bounds[1])]\n\n empty_positions_list = []\n\n for p in path:\n empty_positions[p[0]][p[1]] = False\n\n for x in range(bounds[0]):\n for y in range(bounds[1]):\n if empty_positions[x][y]:\n empty_positions_list.append([x, y])\n\n number_of_towers = math.floor(gold/tower_cost)\n\n for pos in empty_positions_list:\n pos.append(compute_path_points_in_range(pos, path, tower_range))\n\n positions = sorted(empty_positions_list,\n key=lambda x: -x[2])[0:number_of_towers]\n\n towers = []\n for p in positions:\n towers.append(Tower(p[0], p[1], tower_damage, tower_range, False))\n\n return towers\n\n\nif __name__ == \"__main__\":\n in_file = 'level5_5.in'\n out_file = 'level5_5.out'\n\n bounds, initial_state, actions, aliens, tower_damage, tower_range, tower_cost, gold = parse_input_file(\n in_file)\n\n path = compute_path(bounds, initial_state, actions)\n path.append([-1, -1])\n\n towers = compute_towers(bounds, path, tower_damage,\n tower_range, tower_cost, gold)\n\n tick = -1\n result = \"PENDING\"\n while True:\n tick += 1\n result = update_alien_positions(aliens, path)\n if result == \"LOSS\":\n break\n for t in towers:\n lock_tower_target(t, aliens)\n\n if tick > 0:\n shoot_aliens(towers, aliens)\n\n if reduce(lambda x, y: x and y, [a.dead for a in aliens]):\n result = \"WIN\"\n break\n\n print(result)\n\n with open(out_file, \"w\") as f:\n for t in towers:\n f.write(str(t.x) + \" \" + str(t.y) + '\\n')\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"614084163","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\n# from odoo.addons.l10n_mx_edi.hooks import _load_xsd_files\n\n\nclass ResCompany(models.Model):\n _inherit = 'res.company'\n\n l10n_mx_edi_pac = fields.Selection(\n selection=[('finkok', 'Finkok'), ('solfact',\n 'Solucion Factible'), ('prodigia', 'Prodigia')],\n string='PAC',\n help='The PAC that will sign/cancel the invoices',\n )\n\n l10n_mx_edi_pac_contract = fields.Char(\n string='Contrato Prodigia',\n help='La clave del contrato de Prodigia')\n","sub_path":"prodigia-facturacion/models/res_company.py","file_name":"res_company.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"160589669","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#==============================================================================\n# setup.py\n# Copyright (C) 2020 Scott P Morton (spm3c at mtmail.mtsu.edu)\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n# \n#==============================================================================\n\n\"\"\"\nCreated on Mon Feb 18 18:47:13 2019\n\n@author: Scott P. Morton\nCenter for Computational Science\nMiddle Tennessee State University\n\"\"\"\n\nimport os, sys\n\nmessage = \"./setup.py 'the/target study/Directory'\\n or ./setup.py -c 'the/target study/Directory'\\nto copy the example folder from this repo\"\n\nanalysisDirs = [\"Analysis/Additional_Plots\", # contains any additional plots such as EVM screeplot\n \"Analysis/BE\", # Contains binding energies plots and data\n \"Analysis/BESI\", # Contains BESI related plots\n \"Analysis/Datasets\", # All correlated data saved in various formats\n \"Analysis/EVM_images\", # EVM images\n \"Analysis/FingerPrints\", # Electrophoretic Fingerprints (Stieh et al.)\n \"Analysis/HXB2\", # HXB2 alignment data\n \"Analysis/Logos\", # Logos plot data\n \"Analysis/Residues\", # Residue plots\n \"Analysis/seqfiles\", # sequence files\n \"Analysis/VLoops\", # Vloop plots and data\n \"Analysis/Types\", # Place your types file here\n \"Analysis/PCA\",\n \"Analysis/PCA/FingerPrints\"] \nstudyDirs = [\"ABseqfiles\",\n \"ABtemplates\",\n \"Analysis\",\n \"seqfiles\",\n \"src\",\n \"Structures\",\n \"targets\",\n \"templates\",\n \"tools\"]\n\n\ndef _cp(src,dest):\n try:\n os.system('cp -r %s %s' % (src,dest))\n except Exception as err:\n print('(_cp)OS error: {0}'.format(err))\ndef _Mkdir(path):\n try:\n os.makedirs(path)\n except Exception as err:\n print('(_Mkdir)OS error: {0}'.format(err))\ndef _SetWD(path):\n try:\n os.chdir(path)\n print('Current working dir is: %s' % (path))\n except Exception as err:\n print('(_SetWD)OS error: {0}'.format(err))\ndef ProcessCMD(cmd,args, message):\n if len(message) > 0:\n print(message)\n wDir = os.getcwd() \n rc = os.fork()\n if rc == -1:\n print('fork failed')\n raise OSError('os.fork() failed')\n elif rc == 0:\n _SetWD(wDir)\n sys.stdout.flush()\n try:\n os.execvp(cmd,args)\n except Exception as err:\n print('---Child process unable to execute command \"%s\"\\n, Error is-%s' % (cmd,err))\n else:\n status = os.waitpid(rc,0)\n if not(status[1] == 0):\n sys.exit(-1)\n #return status[1]\n else:\n print('Command %s with message %s completed\\n' % (cmd,message))\n return 0\n\ndef main():\n print(sys.argv[0][0:sys.argv[0].rfind('/')])\n copyExample = False\n root = ''\n if(len(sys.argv) > 1):\n for i in range(len(sys.argv)):\n if(sys.argv[i].upper() == '--HELP' or sys.argv[i].upper() == '-H'):\n print(message)\n sys.exit(0)\n elif(sys.argv[i].upper() == '-C'):\n copyExample = True\n else:\n root = sys.argv[i]\n elif(len(sys.argv) == 1) :\n print(message)\n sys.exit(0)\n _Mkdir(root)\n if(copyExample):\n srcDir = '%s/%s/Docs/Example/.' % (os.getcwd(),sys.argv[0][0:sys.argv[0].rfind('/')])\n destDir = '%s/.' % (root)\n cmd = ['cp','-R',srcDir,destDir]\n ProcessCMD(cmd[0],cmd,'Copy example folder from repo')\n \n else:\n for folder in studyDirs:\n _Mkdir('%s/%s' % (root,folder))\n for folder in analysisDirs:\n _Mkdir('%s/%s' % (root,folder))\n\n \n \n \nif __name__ == \"__main__\":\n main() \n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"582233138","text":"import asyncio\nimport time\n\nimport aiohttp\n\nfrom aioselenium import Remote, Keys\nfrom selenium import webdriver\n\n\ndef _create_new_driver_connection():\n driver = webdriver.Firefox()\n # we need to keep these in memory to the session doesn't get closed\n executor_url = driver.command_executor._url\n return driver, executor_url\n\n\n'''\ndef connect_to_selenium_session(session_id, executor_url):\n from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver\n\n # Save the original function, so we can revert our patch\n org_command_execute = RemoteWebDriver.execute\n\n def new_command_execute(self, command, params=None):\n if command == \"newSession\":\n # Mock the response\n return {'success': 0, 'value': None, 'sessionId': session_id}\n else:\n return org_command_execute(self, command, params)\n\n # Patch the function before creating the driver object\n RemoteWebDriver.execute = new_command_execute\n\n new_driver = webdriver.Remote(\n command_executor=executor_url, desired_capabilities={}\n )\n new_driver.session_id = session_id\n\n # Replace the patched function with original function\n RemoteWebDriver.execute = org_command_execute\n\n return new_driver\n'''\n\n\nasync def scraper():\n capabilities = {\n \"browserName\": \"firefox\",\n }\n\n # command_executor = os.getenv('SELENIUM_CLUSTER')\n driver_obj, command_executor = _create_new_driver_connection()\n\n async with aiohttp.ClientSession() as session:\n remote = await Remote.create(\n command_executor, capabilities, session, reconnect=driver_obj.session_id\n )\n async with remote as driver:\n await driver.set_window_size(1920, 1080)\n await driver.get(\"http://www.youtube.com\")\n print('Loaded:',await driver.get_title())\n #element = await driver.find_element_by_xpath('//input[@id=\"search\"]')\n #await element.send_keys(search, Keys.ENTER)\n #video_titles = await driver.find_elements_by_xpath('//a[@id=\"video-title\"]')\n link_elems = await driver.find_elements_by_xpath('//a')\n for e in link_elems[:5]:\n txt = await e.text()\n outer = await e.get_attribute('outerHTML')\n print(txt[:80])\n print(outer[:100])\n\n\n#async def main(search_fields):\n# await asyncio.gather(*[scraper(search) for search in search_fields])\n\n\nif __name__ == \"__main__\":\n\n #search_fields = [\"Soccer\", \"Guatemala\"]##, \"Guitar\", \"Computer\", \"Van Gogh\"]\n #asyncio.run(main(search_fields))\n\n asyncio.run(scraper())\n","sub_path":"visual_webscraper/pipeline_new2/asyncio_test.py","file_name":"asyncio_test.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"526691141","text":"import pika\r\nimport sys #for sys.argv\r\n\r\nconnection=pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\r\nchannel=connection.channel()\r\n\r\nchannel.exchange_declare(exchange='recitations', exchange_type='fanout')\r\n\r\nmessage=str()\r\nfor i in sys.argv[1:]:\r\n message+=i+' '\r\n\r\n\r\nchannel.basic_publish(exchange='recitations', routing_key='', body=message)\r\nprint(f'Sent!:{str(message)}')\r\nconnection.close()","sub_path":"TSIS10/Third Tutorial/Tribe_Leader.py","file_name":"Tribe_Leader.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"520086903","text":"import numpy as np\nimport os\nfrom matplotlib import pyplot as plt\n\ndef read_points(map_file, offset_x=0.0, offset_y=0.0):\n \"\"\"\n Reads a file with the map data in the RNDF Format\n :return: generator of x, y position tuples\n \"\"\"\n resolution =0.01\n Width=3.85\n Length=5.67\n ar = np.zeros((240,320))\n # detect waypoints x.y.z and store latitude / longitude as x / y\n with open(map_file) as m_file:\n for line in m_file:\n x, y = line.split('\\t')[1:3]\n #ar[float(x) + offset_x, float(y) + offset_y]=1\n ar[int(float(y)/Width*ar.shape[0]) , int(float(x)/Length*ar.shape[1])]=1\n #ndimage.binary_dilation(ar)\n ar = ndimage.binary_dilation(ar, iterations=1).astype(ar.dtype)\n np.save(\"map.npy\",ar)\n \n#read_points(\"new_map.txt\",\"r\")\nmap_mat = np.load(\"map.npy\")\nplt.imshow(map_mat)\nplt.show()\n","sub_path":"src/assignment9/src/create_npy_map.py","file_name":"create_npy_map.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"586894809","text":"from __future__ import unicode_literals\nfrom datetime import datetime\n\nexec(compile(open('youtube_dlc/version.py').read(), 'youtube_dlc/version.py', 'exec'))\n\n_LATEST_VERSION = locals()['__version__']\n\n_OLD_VERSION = _LATEST_VERSION.replace('-', '.').split(\".\", 4)\n\nold_ver = '.'.join(_OLD_VERSION[:3])\nold_rev = _OLD_VERSION[3] if len(_OLD_VERSION) > 3 else ''\n\nver = datetime.now().strftime(\"%Y.%m.%d\")\nrev = str(int(old_rev or 0) + 1) if old_ver == ver else ''\n\nversion = '.'.join((ver, rev)) if rev else ver\n\nprint('::set-output name=ytdlc_version::' + version)\n\nfile_version_py = open('youtube_dlc/version.py', 'rt')\ndata = file_version_py.read()\ndata = data.replace(_LATEST_VERSION, version)\nfile_version_py.close()\n\nfile_version_py = open('youtube_dlc/version.py', 'wt')\nfile_version_py.write(data)\nfile_version_py.close()\n","sub_path":"scripts/update-version-workflow.py","file_name":"update-version-workflow.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"502111666","text":"# Project modules\nimport aws_cost_collector\nimport cloudcheckr\n\n\ndef id():\n return \"ec2\"\n\n\ndef log(message):\n print(id() + \": \" + message)\n\n\ndef getTeamCost(team_name,configMap,debug):\n team_cost = dict(individual=dict())\n days_to_report = configMap['global']['days_to_report']\n\n log(\"getting team cost for team: %s for %i days\" % (team_name,days_to_report))\n\n data = None\n for config_plugin in configMap['plugins']:\n if config_plugin['name'] == id():\n if debug: log(\"plugin info found in config file\")\n if 'data_url' in config_plugin:\n # get the data url for the plugin\n data_url = config_plugin['data_url']\n log(\" getting data from cloudcheckr\")\n # get the report data from cloudcheckr which is by tag\n data = cloudcheckr.loadData(data_url, days_to_report, \"Groupings\", debug)\n elif 'aws_ce' in config_plugin:\n group_by_tag = None\n if 'group_by_tag' in config_plugin['aws_ce']:\n group_by_tag = config_plugin['aws_ce']['group_by_tag']\n group_by = None\n if group_by_tag:\n group_by = [\n {\n \"Type\": \"TAG\",\n \"Key\": group_by_tag\n }\n ]\n granularity = 'DAILY'\n if 'filter' in config_plugin['aws_ce']:\n filter = config_plugin['aws_ce']['filter']\n if group_by_tag and filter:\n # get the report data from aws cost explorer\n log(\" getting data from aws using cost explorer\")\n data = aws_cost_collector.get_costs(days_to_report=days_to_report,\n granularity=granularity,\n filter=filter,\n group_by=group_by,\n debug=debug)\n # We need to convert this to cloudcheckr format\n data = cloudcheckr.convert(data, group_by_tag, debug)\n\n if data:\n tag_to_match = None\n if debug: log(\"%i tags returned\" % len(data['Groupings']))\n\n # Find our team info in the config file\n for team in configMap['teams']:\n if team['name'] == team_name:\n team_members = team['members']\n tag_to_match = team[id()]['include_tag']\n\n if debug: log(\"Looking in report data for tag name %s\" % tag_to_match)\n\n # look in the data from the report for a tag value matching that we are looking for\n for tag in data['Groupings']:\n # Assume the memberID/email in the config file is lower case\n member_id = str(tag['Name'].split(tag_to_match)[1]).strip().lower()\n if debug: log(\"member_id in report data is %s\" % member_id)\n if member_id in team_members:\n if debug: log(\"team member found in report data: %s\" % member_id)\n\n # Now we can get their cost from the report data\n totalCost = 0\n for costitem in tag['Costs']:\n totalCost = totalCost + costitem['Amount']\n\n if debug: log(\"total cost for %s is %s\" % (member_id,totalCost))\n\n team_cost['individual'][member_id] = format(float(totalCost),'.2f')\n\n return team_cost\n","sub_path":"team-cost-reporter/plugins/ec2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"100589663","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#Assignment 9 Part 1\n\n\nimport urllib.request\nimport sys\nfrom bs4 import BeautifulSoup\n\nurl = \"https://www.cbssports.com/nfl/stats/playersort/nfl/year-2019-season-regular-category-touchdowns\"\npage = urllib.request.urlopen(url)\nsoup = BeautifulSoup(page.read(), \"lxml\")\n\n\nfind_table = soup.find_all('table', {'class': 'data'})\n\nfor i in find_table:\n table = i\n\n\ntRows = table.find_all('tr', {'class': lambda L: L and L.startswith('row')})\n\n\nalldata = []\n\n\nfor i in tRows:\n if (len(alldata)) < 20:\n tData = i.find_all('td')\n name = str(tData[0].get_text())\n ppos = str(tData[1].get_text())\n pteam = str(tData[2].get_text())\n tdown = str(tData[6].get_text())\n alldata.append('Player:{}, Position:{}, Team:{}, Touchdowns:{}'.format(name, ppos, pteam, tdown))\n print('Player:{}, Position:{}, Team:{}, Touchdowns:{}'.format(name, ppos, pteam, tdown))\n else:\n break\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"football_stats.py","file_name":"football_stats.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"432536499","text":"\nimport sys\nimport io\nimport time\nimport picamera\nfrom PIL import Image\nimport zbar\n\n# Create the in-memory stream\nstream = io.BytesIO()\n# create a reader\nscanner = zbar.ImageScanner()\n# configure the reader\nscanner.parse_config('enable')\n\nwith picamera.PiCamera() as camera:\n #configure camera\n camera.video_stabilization = True\n camera.sharpness = 50\n camera.contrast = 30\n\n #start preview window\n camera.start_preview()\n\n #initialize stream reader\n stream = io.BytesIO()\n try:\n for foo in camera.capture_continuous(stream, format='jpeg'):\n # Truncate the stream to the current position (in case\n # prior iterations output a longer image)\n stream.truncate()\n stream.seek(0)\n\n # obtain image data\n pil = Image.open(stream).convert('L')\n width, height = pil.size\n raw = pil.tostring()\n\n # wrap image data\n image = zbar.Image(width, height, 'Y800', raw)\n\n # scan the image for barcodes\n scanner.scan(image)\n\n # extract results\n for symbol in image:\n # do something useful with results\n print ('decoded', symbol.type, 'symbol', '\"%s\"' % symbol.data)\n\n # clean up\n del(image)\n\n #sleep to avoid 100% cpu usage\n time.sleep(0.05)\n finally:\n camera.stop_preview()","sub_path":"PiCamera/rpi-barcode-reader.py","file_name":"rpi-barcode-reader.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"632207238","text":"from styx_msgs.msg import TrafficLight\n\nimport numpy as np\nimport os\nimport sys\nimport tensorflow as tf\n\nfrom collections import defaultdict\nfrom io import StringIO\n\nfrom utilities import label_map_util\nfrom utilities import visualization_utils as vis_util\n\n#from light_classification.utilities import label_map_util\n#from light_classification.utilities import visualization_utils as vis_util\n\n#import cv2\n\n#Testing\n#from matplotlib import pyplot as plt\n\n\nclass TLClassifier(object):\n \n def __init__(self, *args):\n\n\n #self.current_light = TrafficLight.RED\n self.current_light = 0\n #cwd = os.path.dirname(os.path.realpath(__file__))\n\n # Path to frozen detection graph. This is the actual model that is used for the object detection.\n #base_path = os.path.dirname(os.path.abspath(__file__))\n MODEL_NAME = 'ssd_mobilenet_tl/'\n PATH_TO_CKPT = os.path.join('light_classification/', MODEL_NAME, 'frozen_inference_graph.pb')\n \n\n # Load label map\n \n PATH_TO_LABELS = os.path.join('light_classification/', MODEL_NAME, 'traffic_lights_label_map.pbtxt')\n NUM_CLASSES = 14\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,\n use_display_name=True)\n self.category_index = label_map_util.create_category_index(categories)\n\n # Build network\n self.detection_graph = tf.Graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # https://github.com/tensorflow/tensorflow/issues/6698\n\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self.sess = tf.Session(graph=self.detection_graph, config=config)\n\n # Definite input and output Tensors for detection_graph\n self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n\n print(\"Classifier initialisation completed !\")\n\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n\n image_np_expanded = np.expand_dims(image, axis=0)\n\n # Perform network inference\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores,\n self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n # Check the detections. If it has a good score\n # then set the current light to the detected label. The\n # first one is alwasy the best (they are returned sorted \n # in score order).\n # Note that we have trained for 14 categories, including\n # left/right arrows etc. Here we are only looking for \n # standard red, yellow and green light and ignore others.\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > .05:\n print('scores ', classes[i])\n #classname = self.category_index[classes[i]]['name']\n\n if classes[i] == 1:\n self.current_light = 2 #TrafficLight.GREEN\n elif classes[i] == 7:\n self.current_light = 1 #TrafficLight.YELLOW\n elif classes[i] == 2:\n self.current_light = 0 #TrafficLight.RED\n else:\n self.current_light = 4 #TrafficLight.UNKNOWN\n else:\n self.current_light = 4\n break\n\n print ('==== current Detected light ', self.current_light)\n return self.current_light\n","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"394238707","text":"\"\"\"\nDesign a search autocomplete system for a search engine. Users may input a sentence (at least one word and end with a special character '#'). For each character they type except '#', you need to return the top 3 historical hot sentences that have prefix the same as the part of sentence already typed. Here are the specific rules:\n\nThe hot degree for a sentence is defined as the number of times a user typed the exactly same sentence before.\nThe returned top 3 hot sentences should be sorted by hot degree (The first is the hottest one). If several sentences have the same degree of hot, you need to use ASCII-code order (smaller one appears first).\nIf less than 3 hot sentences exist, then just return as many as you can.\nWhen the input is a special character, it means the sentence ends, and in this case, you need to return an empty list.\nYour job is to implement the following functions:\n\nThe constructor function:\n\nAutocompleteSystem(String[] sentences, int[] times): This is the constructor. The input is historical data. Sentences is a string array consists of previously typed sentences. Times is the corresponding times a sentence has been typed. Your system should record these historical data.\n\nNow, the user wants to input a new sentence. The following function will provide the next character the user types:\n\nList input(char c): The input c is the next character typed by the user. The character will only be lower-case letters ('a' to 'z'), blank space (' ') or a special character ('#'). Also, the previously typed sentence should be recorded in your system. The output will be the top 3 historical hot sentences that have prefix the same as the part of sentence already typed.\n\n\nExample:\nOperation: AutocompleteSystem([\"i love you\", \"island\",\"ironman\", \"i love coding\"], [5,3,2,2])\nThe system have already tracked down the following sentences and their corresponding times:\n\"i love you\" : 5 times\n\"island\" : 3 times\n\"ironman\" : 2 times\n\"i love coding\" : 2 times\nNow, the user begins another search:\n\nOperation: input('i')\nOutput: [\"i love you\", \"island\",\"i love coding\"]\nExplanation:\nThere are four sentences that have prefix \"i\". Among them, \"ironman\" and \"i love coding\" have same hot degree. Since ' ' has ASCII code 32 and 'r' has ASCII code 114, \"i love coding\" should be in front of \"ironman\". Also we only need to output top 3 hot sentences, so \"ironman\" will be ignored.\n\nOperation: input(' ')\nOutput: [\"i love you\",\"i love coding\"]\nExplanation:\nThere are only two sentences that have prefix \"i \".\n\nOperation: input('a')\nOutput: []\nExplanation:\nThere are no sentences that have prefix \"i a\".\n\nOperation: input('#')\nOutput: []\nExplanation:\nThe user finished the input, the sentence \"i a\" should be saved as a historical sentence in system. And the following input will be counted as a new search.\n\nNote:\nThe input sentence will always start with a letter and end with '#', and only one blank space will exist between two words.\nThe number of complete sentences that to be searched won't exceed 100. The length of each sentence including those in the historical data won't exceed 100.\nPlease use double-quote instead of single-quote when you write test cases even for a character input.\nPlease remember to RESET your class variables declared in class AutocompleteSystem, as static/class variables are persisted across multiple test cases. Please see here for more details.\n\"\"\"\n\nclass AutocompleteSystem(object):\n\n def __init__(self, sentences, times):\n \"\"\"\n :type sentences: List[str]\n :type times: List[int]\n \"\"\"\n self.trie = {}\n self.cnt = collections.Counter()\n for sentence, time in zip(sentences, times):\n self.buildtrie(sentence, time)\n self.string = ''\n self.node = self.trie\n\n def input(self, c):\n \"\"\"\n :type c: str\n :rtype: List[str]\n \"\"\"\n def findword(node, re):\n if 'end' in node:\n res.append(re)\n for nxt in node:\n if nxt != 'end':\n findword(node[nxt], re+nxt)\n return\n\n res = []\n if c == '#':\n self.reset()\n return []\n elif c not in self.node:\n self.string += c\n self.node = {}\n return []\n else:\n self.string += c\n self.node = self.node[c]\n findword(self.node, self.string)\n res.sort(key=lambda x:(-self.cnt[x],x) )\n return res[:3]\n\n def reset(self):\n self.buildtrie(self.string, 1)\n self.string = ''\n self.node = self.trie\n\n def buildtrie(self, sentence, time):\n node =self.trie\n for c in sentence:\n if c not in node:\n node[c] = {}\n node = node[c]\n node['end'] = 1\n self.cnt[sentence] += time\n","sub_path":"Python/Trie/DesignSearchAuto2.py","file_name":"DesignSearchAuto2.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"468161385","text":"import csv\nfrom random import seed\nfrom random import randint\nimport time\nimport os\n\ndef tell_story():\n\n file_to_reference = \"stories.csv\"\n real_path = os.path.join(os.path.dirname(__file__),file_to_reference)\n titles = []\n stories = []\n with open(real_path) as csv_file: \n csv_reader = csv.reader(csv_file, delimiter=',') \n for row in csv_reader:\n titles.append(row[0])\n stories.append(row[1])\n\n length = len(titles)\n randomNum = randint(0,length-1)\n title = titles[randomNum]\n story = stories[randomNum]\n\n print(\"Lets hear a story about \" + title)\n time.sleep(1)\n print(story)\n\ndef activationPhrases():\n act = []\n\n act.append(\"tell me a story\")\n act.append(\"what's a good story\")\n\n return act\n\n \n \n","sub_path":"quick_commands/stories/tell_stories.py","file_name":"tell_stories.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"124500397","text":"from django.shortcuts import render, Http404\nfrom django.core.paginator import Paginator\n\nfrom news.models import NewsPost\n\n\ndef index(req, page=\"1\"):\n try:\n page_id = int(page)\n except ValueError:\n raise Http404\n posts = NewsPost.objects.all().order_by('-created')\n paginator = Paginator(posts, 10)\n if page_id not in paginator.page_range:\n raise Http404\n cur_page = paginator.page(page_id)\n return render(req, 'news/posts.html', {\n \"news\": cur_page,\n \"total_pages\": paginator.num_pages,\n \"current_page\": page_id\n })\n\n\ndef post(req, post_id):\n try:\n post = NewsPost.objects.get(id=post_id)\n except NewsPost.DoesNotExist:\n raise Http404\n return render(req, 'news/post.html', {\n \"post\": post\n })","sub_path":"CSchoolSite/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"252554680","text":"# utils_covariance\n# author: Jungtaek Kim (jtkim@postech.ac.kr)\n# last updated: April 15, 2019\n\nimport numpy as np\n\nfrom bayeso import constants\n\n\ndef _get_list_first():\n \"\"\"\n It provides list of strings.\n The strings in that list require two hyperparameters, `signal` and `lengthscales`.\n We simply call it as `list_first`.\n\n :returns: list of strings, which satisfy some requirements we mentioned above.\n :rtype: list\n\n \"\"\"\n\n list_first = ['eq', 'se', 'matern32', 'matern52']\n list_first += ['set_' + str_ for str_ in list_first]\n return list_first\n\ndef get_hyps(str_cov, int_dim, is_ard=True):\n \"\"\"\n It returns a dictionary of default hyperparameters for covariance function, where `str_cov` and `int_dim` are given. If `is_ard` is True, the length scales would be `int_dim`-dimensional vector.\n\n :param str_cov: the name of covariance function.\n :type str_cov: str.\n :param int_dim: dimensionality of the problem we are solving.\n :type int_dim: int.\n :param is_ard: flag for automatic relevance determination.\n :type is_ard: bool., optional\n\n :returns: dictionary of default hyperparameters for covariance function.\n :rtype: dict.\n\n :raises: AssertionError\n\n \"\"\"\n\n assert isinstance(str_cov, str)\n assert isinstance(int_dim, int)\n assert isinstance(is_ard, bool)\n assert str_cov in constants.ALLOWED_GP_COV\n\n hyps = dict()\n hyps['noise'] = constants.GP_NOISE\n\n list_first = _get_list_first()\n\n if str_cov in list_first:\n hyps['signal'] = 1.0\n if is_ard:\n hyps['lengthscales'] = np.ones(int_dim)\n else:\n # TODO: It makes bunch of erros. I should fix it.\n hyps['lengthscales'] = 1.0\n else:\n raise NotImplementedError('get_hyps: allowed str_cov, but it is not implemented.')\n return hyps\n\ndef get_range_hyps(str_cov, int_dim,\n is_ard=True,\n is_fixed_noise=False\n):\n \"\"\"\n It returns default optimization ranges of hyperparameters for Gaussian process regression.\n\n :param str_cov: the name of covariance function.\n :type str_cov: str.\n :param int_dim: dimensionality of the problem we are solving.\n :type int_dim: int.\n :param is_ard: flag for automatic relevance determination.\n :type is_ard: bool., optional\n :param is_fixed_noise: flag for fixing a noise.\n :type is_fixed_noise: bool., optional\n\n :returns: list of default optimization ranges for hyperparameters.\n :rtype: list\n\n :raises: AssertionError\n\n \"\"\"\n\n assert isinstance(str_cov, str)\n assert isinstance(int_dim, int)\n assert isinstance(is_ard, bool)\n assert isinstance(is_fixed_noise, bool)\n assert str_cov in constants.ALLOWED_GP_COV\n\n range_hyps = []\n\n list_first = _get_list_first()\n\n if not is_fixed_noise:\n range_hyps += constants.RANGE_NOISE\n\n if str_cov in list_first:\n range_hyps += constants.RANGE_SIGNAL # for signal scale\n if is_ard: # for lengthscales\n for _ in range(0, int_dim):\n range_hyps += constants.RANGE_LENGTHSCALES\n else:\n range_hyps += constants.RANGE_LENGTHSCALES\n else:\n raise NotImplementedError('get_hyps: allowed str_cov, but it is not implemented.')\n\n return range_hyps\n\ndef convert_hyps(str_cov, hyps, is_fixed_noise=False):\n \"\"\"\n It converts hyperparameters dictionary, `hyps` to numpy array.\n\n :param str_cov: the name of covariance function.\n :type str_cov: str.\n :param hyps: dictionary of hyperparameters for covariance function.\n :type hyps: dict.\n :param is_fixed_noise: flag for fixing a noise.\n :type is_fixed_noise: bool., optional\n\n :returns: converted array of the hyperparameters given by `hyps`.\n :rtype: numpy.ndarray\n\n :raises: AssertionError\n\n \"\"\"\n\n assert isinstance(str_cov, str)\n assert isinstance(hyps, dict)\n assert isinstance(is_fixed_noise, bool)\n assert str_cov in constants.ALLOWED_GP_COV\n\n list_hyps = []\n if not is_fixed_noise:\n list_hyps.append(hyps['noise'])\n\n list_first = _get_list_first()\n\n if str_cov in list_first:\n list_hyps.append(hyps['signal'])\n for elem_lengthscale in hyps['lengthscales']:\n list_hyps.append(elem_lengthscale)\n else:\n raise NotImplementedError('convert_hyps: allowed str_cov, but it is not implemented.')\n return np.array(list_hyps)\n\ndef restore_hyps(str_cov, hyps, is_fixed_noise=False, fixed_noise=constants.GP_NOISE):\n \"\"\"\n It restores hyperparameters array, `hyps` to dictionary.\n\n :param str_cov: the name of covariance function.\n :type str_cov: str.\n :param hyps: array of hyperparameters for covariance function.\n :type hyps: numpy.ndarray\n :param is_fixed_noise: flag for fixing a noise.\n :type is_fixed_noise: bool., optional\n :param fixed_noise: fixed noise value.\n :type fixed_noise: float, optional\n\n :returns: restored dictionary of the hyperparameters given by `hyps`.\n :rtype: numpy.ndarray\n\n :raises: AssertionError\n\n \"\"\"\n\n assert isinstance(str_cov, str)\n assert isinstance(hyps, np.ndarray)\n assert isinstance(is_fixed_noise, bool)\n assert isinstance(fixed_noise, float)\n assert len(hyps.shape) == 1\n assert str_cov in constants.ALLOWED_GP_COV\n\n dict_hyps = dict()\n if not is_fixed_noise:\n dict_hyps['noise'] = hyps[0]\n ind_start = 1\n else:\n dict_hyps['noise'] = fixed_noise\n ind_start = 0\n\n list_first = _get_list_first()\n\n if str_cov in list_first:\n dict_hyps['signal'] = hyps[ind_start]\n list_lengthscales = []\n for ind_elem in range(ind_start+1, len(hyps)):\n list_lengthscales.append(hyps[ind_elem])\n dict_hyps['lengthscales'] = np.array(list_lengthscales)\n else:\n raise NotImplementedError('restore_hyps: allowed str_cov, but it is not implemented.')\n return dict_hyps\n\ndef validate_hyps_dict(dict_hyps, str_cov, int_dim):\n \"\"\"\n It validates hyperparameters dictionary, `dict_hyps`.\n\n :param dict_hyps: dictionary of hyperparameters for covariance function.\n :type dict_hyps: dict.\n :param str_cov: the name of covariance function.\n :type str_cov: str.\n :param int_dim: dimensionality of the problem we are solving.\n :type int_dim: int.\n\n :returns: a tuple of valid hyperparameters and validity flag.\n :rtype: (dict., bool.)\n\n :raises: AssertionError\n\n \"\"\"\n\n assert isinstance(dict_hyps, dict)\n assert isinstance(str_cov, str)\n assert isinstance(int_dim, int)\n assert str_cov in constants.ALLOWED_GP_COV\n\n is_valid = True\n\n if 'noise' not in dict_hyps:\n is_valid = False\n else:\n if not isinstance(dict_hyps['noise'], float):\n is_valid = False\n else:\n if np.abs(dict_hyps['noise']) >= constants.BOUND_UPPER_GP_NOISE:\n dict_hyps['noise'] = constants.BOUND_UPPER_GP_NOISE\n\n if str_cov == 'eq' or str_cov == 'se' or str_cov == 'matern32' or str_cov == 'matern52':\n if 'lengthscales' not in dict_hyps:\n is_valid = False\n else:\n if isinstance(dict_hyps['lengthscales'], np.ndarray) and dict_hyps['lengthscales'].shape[0] != int_dim:\n is_valid = False\n if not isinstance(dict_hyps['lengthscales'], np.ndarray) and not isinstance(dict_hyps['lengthscales'], float):\n is_valid = False\n if 'signal' not in dict_hyps:\n is_valid = False\n else:\n if not isinstance(dict_hyps['signal'], float):\n is_valid = False\n else:\n is_valid = False\n return dict_hyps, is_valid\n\ndef validate_hyps_arr(arr_hyps, str_cov, int_dim):\n \"\"\"\n It validates hyperparameters array, `arr_hyps`.\n\n :param arr_hyps: array of hyperparameters for covariance function.\n :type arr_hyps: numpy.ndarray\n :param str_cov: the name of covariance function.\n :type str_cov: str.\n :param int_dim: dimensionality of the problem we are solving.\n :type int_dim: int.\n\n :returns: a tuple of valid hyperparameters and validity flag.\n :rtype: (numpy.ndarray, bool.)\n\n :raises: AssertionError\n\n \"\"\"\n\n assert isinstance(arr_hyps, np.ndarray)\n assert isinstance(str_cov, str)\n assert isinstance(int_dim, int)\n assert str_cov in constants.ALLOWED_GP_COV\n\n# is_valid = True\n\n raise NotImplementedError('validate_hyps_arr in utils_covariance.py')\n","sub_path":"bayeso/utils/utils_covariance.py","file_name":"utils_covariance.py","file_ext":"py","file_size_in_byte":8438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"641064715","text":"#29\nimport math\ntab = [2, 3, 5, 2, 9, 8, 1, 3, 9, 1, 1, 4, 7, 7, 1, 4]\n#print(len(tab))\ndef mediana(tablica):\n tablica.sort()\n #print(len(tablica)/2)\n if len(tablica)%2 == 0:\n #print(tablica[int(len(tablica)/2)])\n #print(\"test\")\n return((tablica[int(len(tablica)/2)] + tablica[int(len(tablica)/2) - 1])/2)\n else:\n #print(tablica[int(len(tablica)/2)])\n #print(\"test\")\n #print(math.floor(len(tablica)/2))\n return(tablica[int(math.floor(len(tablica)/2))])\n\ndef dominanta(tablica):\n tablica.sort()\n a = tablica[0]\n d = tablica[0]\n iloscA = 0\n iloscD = 0\n for x in tablica:\n #print(x)\n \n if d == x:\n iloscD += 1\n elif a == x:\n iloscA += 1\n else:\n a = x\n iloscA = 1\n if iloscA > iloscD:\n d = a\n iloscD = iloscA\n \n \n #print(\"test\")\n #print(a,d,iloscA,iloscD)\n return(d)\n\nprint(f'Mediana: {mediana(tab)}, Dominanta: {dominanta(tab)}')\n#print(dominanta(tab))","sub_path":"04-Subroutines/zadanie29.py","file_name":"zadanie29.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"248828112","text":"\"\"\"\nGBDX IDAHO Interface.\n\nContact: nate.ricklin@digitalglobe.com\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom builtins import str\nfrom builtins import object\nfrom past.utils import old_div\n\nfrom shapely.wkt import loads\nimport codecs\nimport json\nimport os\nimport requests\n\nfrom gbdxtools.catalog import Catalog\n\nclass Image(object):\n \"\"\" \n Strip Image Class \n Collects metadata on all image parts, groupd pan and ms bands from idaho\n\n \n \"\"\"\n\n def __init__(self, interface):\n self.interface = interface\n self.gbdx_connection = self.interface.gbdx_connection\n self.logger = interface.logger\n\n def __call__(self, cat_id):\n self.cat_id = cat_id\n self._fetch_metadata()\n return self\n\n def _fetch_metadata(self):\n props = self.interface.catalog.get(self.cat_id)['properties']\n f = loads(props['footprintWkt'])\n geom = f.__geo_interface__\n del props['footprintWkt']\n idaho = self.interface.idaho.get_images_by_catid(self.cat_id)\n parts = self.interface.idaho.describe_images(idaho)[self.cat_id]['parts']\n idaho = {k['identifier']: k for k in idaho['results']}\n \n props['parts'] = []\n for p, info in parts.iteritems():\n part = {}\n for key, img in info.iteritems():\n if img['id'] in idaho:\n part[key] = idaho[img['id']]['properties']\n part[key]['bounds'] = loads(idaho[img['id']]['properties']['footprintWkt']).bounds\n props['parts'].append(part)\n\n self.metadata = {\n 'properties': props,\n 'geometry': geom\n }\n\n def vrt(self):\n print('Create a vrt from image parts')\n # look for a vrt on disk else create one\n # return path the vrt\n\n def _generate_vrt(self):\n cols = str(self.darr.shape[-1])\n rows = str(self.darr.shape[1])\n (minx, miny, maxx, maxy) = rasterio.windows.bounds(self._roi, self._src.transform)\n affine = [c for c in rasterio.transform.from_bounds(minx, miny, maxx, maxy, int(cols), int(rows))]\n transform = [affine[2], affine[0], 0.0, affine[5], 0.0, affine[4]]\n\n vrt = ET.Element(\"VRTDataset\", {\"rasterXSize\": cols, \"rasterYSize\": rows})\n ET.SubElement(vrt, \"SRS\").text = str(self._src.crs['init']).upper()\n ET.SubElement(vrt, \"GeoTransform\").text = \", \".join(map(str, transform))\n for i in self._src.indexes:\n band = ET.SubElement(vrt, \"VRTRasterBand\", {\"dataType\": self._src.dtypes[i-1].title(), \"band\": str(i)})\n src = ET.SubElement(band, \"SimpleSource\")\n ET.SubElement(src, \"SourceFilename\").text = \"HDF5:{}://{}_{}_{}\".format(self._filename, self._gid, self.node, self.level)\n ET.SubElement(src, \"SourceBand\").text =str(i)\n ET.SubElement(src, \"SrcRect\", {\"xOff\": \"0\", \"yOff\": \"0\",\n \"xSize\": cols, \"ySize\": rows})\n ET.SubElement(src, \"DstRect\", {\"xOff\": \"0\", \"yOff\": \"0\",\n \"xSize\": cols, \"ySize\": rows})\n\n ET.SubElement(src, \"SourceProperties\", {\"RasterXSize\": cols, \"RasterYSize\": rows,\n \"BlockXSize\": \"128\", \"BlockYSize\": \"128\", \"DataType\": self._src.dtypes[i-1].title()})\n vrt_str = ET.tostring(vrt)\n\n with open(self.vrt, \"w\") as f:\n f.write(vrt_str)\n\n return self.vrt\n\n def aoi(self, bbox=None, geometry=None, pansharpen=False):\n if bbox is None:\n print('Missing either a bbox or a geometry to define an AOI')\n return None\n else:\n return AOI(self, bbox)\n #W, S, E, N = (-95.06904982030392, 29.7187207124839, -95.06123922765255, 29.723901202069023)\n #chip_geo = 'houston_geo.tif'\n #self.interface.idaho.get_chip(coordinates=[W, S, E, N], catid = catid, chip_type='PAN', filename=chip_geo)\n\n\nclass AOI(object):\n def __init__(self, image, bbox):\n self.image = image\n self.bbox = bbox\n self.metadata = metadata\n \n\nif __name__ == '__main__': \n from gbdxtools import Interface\n import json\n gbdx = Interface()\n\n cat_id = '104001001838A000'\n img = gbdx.image(cat_id)\n\n #print(json.dumps(img.metadata, indent=4))\n img.vrt()\n img.aoi()\n","sub_path":"gbdxtools/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"573087857","text":"#!/usr/bin/env python\n\"\"\"\nScrape the NFL fantasy league stats.\n\"\"\"\nimport sys\nimport csv\nimport re\nimport requests\nimport bs4\n\n# original URL:\n# https://fantasy.nfl.com/research/scoringleaders?position=O&statCategory=stats\n# &statSeason=2019&statType=seasonStats&statWeek=5\n\nURL_TEMPLATE = (\n \"https://fantasy.nfl.com/research/scoringleaders\"\n \"?position=O&statCategory=stats&statSeason={}\"\n \"&statType=seasonStats&statWeek={}\"\n)\n\nRX_PLAYER = re.compile(r'player-\\d{7}')\n\nCLASSES_TO_FIELDS = [\n # tag, css class, label\n ('a', 'playerNameFull', 'Player Name'),\n ('td', 'playerOpponent', 'Opponent'),\n ('span', 'statId-5', 'Passing Yards'),\n ('span', 'statId-6', 'Passing TDs'),\n ('span', 'statId-7', 'Passing Interceptions'),\n ('span', 'statId-14', 'Rushing Yards'),\n ('span', 'statId-15', 'Rushing TDs'),\n ('span', 'statId-20', 'Receiving Receptions'),\n ('span', 'statId-21', 'Receiving Yards'),\n ('span', 'statId-22', 'Receiving TDs'),\n ('span', 'statId-28', 'Returns'),\n ('span', 'statId-29', 'Fumble TDs'),\n ('span', 'statId-32', '2 PTS'),\n ('span', 'statId-32', 'Fumbles lost'),\n ('span', 'playerSeasonTotal', 'Fantasy points'),\n]\n\nFIELD_NAMES = [field.replace(' ', '_') for tag, css_class, field in CLASSES_TO_FIELDS]\n\n\ndef main(args):\n \"\"\"\n Program entry point.\n\n Finds each player on specified URL, then parses out player's stats.\n\n :param args: command line arguments -- year and week of stats\n :return: None\n \"\"\"\n url = URL_TEMPLATE.format(args[0], args[1]) # fill in URL with year and week\n\n response = requests.get(url) # make HTTP request\n if response.status_code == requests.codes.OK: # if HTTP request OK\n soup = bs4.BeautifulSoup(response.content, features='lxml') # make soup\n\n with open('fantasy_stats.csv', 'w') as stats_out: # open file for CSV\n wtr = csv.writer(stats_out) # create CSV writer\n wtr.writerow(FIELD_NAMES) # write CSV header\n\n for player_html in soup.findAll('tr', {'class': RX_PLAYER}):\n data_row = [] # data for this player for CSV\n # uncomment to see raw player data:\n # print(\"*\" * 50)\n # print(player_raw)\n # print(\"*\" * 50)\n for tag, css_class, field in CLASSES_TO_FIELDS:\n data = player_html.find(tag, {'class': css_class}).text\n print(field, data)\n data_row.append(data)\n\n wtr.writerow(r.replace('-', '0') for r in data_row) # write to CSV file\n\n print('-' * 60)\n else:\n print(\"Unable to retrieve page\")\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\njunk = \"\"\"\nresponse = requests.get(\n URL,\n auth=(key, value),\n headers={'headername': 'headervalue', 'k2': 'v2'},\n params={...},\n data={....},\n proxies={'https': 'proxy.nike.com:2345'},\n cookies={...},\n)\n\"\"\"\n","sub_path":"parse_fantasy_football.py","file_name":"parse_fantasy_football.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"76825608","text":"\"\"\"\nYou are presented with an integer number larger than 5. Your goal is to identify the longest side\npossible in a right triangle whose sides are not longer than the number you are given.\n\nFor example, if you are given the number 15, there are 3 possibilities to compose right triangles:\n\n1. [3, 4, 5]\n2. [6, 8, 10]\n3. [5, 12, 13]\n\nThe following function shows one way to solve the problem but the code is not ideal or efficient.\nRefactor the code based on what you have learned about code simplicity and efficiency.\n\"\"\"\n#Wrapped everything up inside the function and changed its logic. It was bothering me that we had 3 nested for loops and wanted to reduce those.\n#The previous function used the for loops to create a list with all the combinations possible within the range 3,X (considering a right angle triangle\n#cannot have an integer side that is smaller than 3. After having the list, it applied the pytagoras theorem to all the elements in the list.\n#It seemed a waste of resources to do all that, so I decided to decompose the theorem and use 1 while loop to check my conditions, from 3 to the limit X.\ndef my_function():\n #added input to the function\n y = int(input('What is the maximal length of the triangle side? Enter any integer larger than 5: '))\n #added condition that I could not have a maximal length smaller than 5.\n if y < 5:\n return 'Please enter an integer larger than 5.'\n #added initial values to one of my sides (a) and defined my hipotenuse with any value (it will be updated later). Also created a list to store hipotenuse values.\n a = 3\n c = 0\n c_lst = []\n #the loop will continue while my hipotenuse (c) is smaller than the input y.\n while c <= y :\n #if my side a is an even number, then it means that the hipotenuse minus a is necessarily equal to 2. If I transform the theroem in a\n # 2nd degree pair of equations, it is the following system.\n #If a is even, I will apply the equations to the other sides and will check if the results of b and c are integers.\n #if that happens, then I will add my hipotenuse c to a list of valid hipotenuses.\n if a % 2 == 0:\n b = a ** 2 / 4 - 1\n c = a ** 2 / 4 + 1\n if b % 1 == 0 and c % 1 == 0:\n c_lst.append(c)\n #doing the same with the applicable condition if a is an odd number.\n elif a % 2 == 1:\n b = (a ** 2 - 1) / 2\n c = (a ** 2 + 1) / 2\n if b % 1 == 0 and c % 1 == 0:\n c_lst.append(c)\n #after applying the conditions and adding c to the list (or not), I will add 1 to a, so I can test with another set of variables.\n a += 1\n #in the end, when I reach my input, I will have an ordered list with all my valid hipotenuses. I return the last element of the list.\n return c_lst[-1]\n\nprint(my_function())\n","sub_path":"your-code/challenge-3.py","file_name":"challenge-3.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"311296539","text":"from pathlib import Path\n\nimport airflow.utils.dates\nfrom airflow import DAG\nfrom airflow.contrib.sensors.python_sensor import PythonSensor\nfrom airflow.operators.dummy_operator import DummyOperator\n\ndag = DAG(\n dag_id=\"chapter6_couponing_app_figure612\",\n start_date=airflow.utils.dates.days_ago(3),\n schedule_interval=\"0 16 * * *\",\n description=\"A batch workflow for ingesting supermarket promotions data.\",\n default_args={\"depends_on_past\": True},\n)\n\n\ndef _wait_for_supermarket(supermarket_id_):\n supermarket_path = Path(\"/data/\" + supermarket_id_)\n data_files = supermarket_path.glob(\"data-*.csv\")\n success_file = supermarket_path / \"_SUCCESS\"\n return data_files and success_file.exists()\n\n\nfor supermarket_id in [1, 2, 3, 4]:\n wait = PythonSensor(\n task_id=f\"wait_for_supermarket_{supermarket_id}\",\n python_callable=_wait_for_supermarket,\n op_kwargs={\"supermarket_id\": f\"supermarket{supermarket_id}\"},\n provide_context=True,\n dag=dag,\n )\n copy = DummyOperator(task_id=f\"copy_to_raw_supermarket_{supermarket_id}\", dag=dag)\n process = DummyOperator(task_id=f\"process_supermarket_{supermarket_id}\", dag=dag)\n generate_metrics = DummyOperator(\n task_id=f\"generate_metrics_supermarket_{supermarket_id}\", dag=dag\n )\n compute_differences = DummyOperator(\n task_id=f\"compute_differences_supermarket_{supermarket_id}\", dag=dag\n )\n update_dashboard = DummyOperator(\n task_id=f\"update_dashboard_supermarket_{supermarket_id}\", dag=dag\n )\n notify_new_data = DummyOperator(\n task_id=f\"notify_new_data_supermarket_{supermarket_id}\", dag=dag\n )\n\n wait >> copy >> process >> generate_metrics >> [\n compute_differences,\n notify_new_data,\n ]\n compute_differences >> update_dashboard\n","sub_path":"apache-airflow/materials/data-pipelines-with-apache-airflow-master/chapters/chapter6/dags/couponing_app_figure612.py","file_name":"couponing_app_figure612.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"460164797","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\n\ndef merge_sort(list):\n if len(list) <= 1:\n return list\n mid = len(list) // 2\n leftList = list[:mid]\n rightList = list[mid:]\n leftList = merge_sort(leftList)\n rightList = merge_sort(rightList)\n return merge(leftList, rightList)\n\n\ndef merge(left, right):\n global count\n result = []\n i = 0\n j = 0\n if left[-1] > right[-1]:\n count += 1\n while (i < len(left)) and (j < len(right)):\n if left[i] < right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n while len(left) > i:\n result.append(left[i])\n i += 1\n while len(right) > j:\n result.append(right[j])\n j += 1\n return result\n\n\nT = int(input())\n\nfor test_case in range(1, T+1):\n num_count = int(input())\n my_list = list(map(int, input().split()))\n count = 0\n print('#{} {} {}'.format(test_case, merge_sort(my_list)[num_count//2], count))","sub_path":"Algorithm/개념정리/정렬/5204. 병합 정렬/병합정렬.py","file_name":"병합정렬.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"52566514","text":"#encoding:utf-8\n\nfrom PIL import Image\n\nimages_nums = 200\n\ndef binary_diff(img):\n\n img = img.convert('RGBA')\n pix = img.load()\n color = []\n\n # for test\n for x in xrange(img.size[0]):\n color.append(reduce(lambda x,y:x+y, pix[x,11][0:3]) / 3)\n # print color\n # raw_input('wait...')\n \n for x in xrange(img.size[0]):\n for y in xrange(img.size[1]):\n if pix[x,y][0] == 255 and (pix[x,y][1] > 0 and pix[x,y][1] < 200) and (pix[x,y][2] >= 0 and pix[x,y][2] <= 100):\n pix[x,y] = (0, 0, 0, 255)\n else:\n pix[x,y] = (255, 255, 255, 255)\n return img\n\n\n\ndef cut_pic(filename): #图片处理(灰度化,二值化,切割图片)\n # filepath = filename\n im = Image.open(filename)\n out = binary_diff(im)\n # imgry = im.convert('L') #灰度化\n #imgry.show()\n #二值化\n # threshold = 130\n # table = []\n cut = []\n realcut = []\n #out.show()\n \n #分割图片\n width = out.width\n height = out.height\n #取有像素0的列\n for x in range(0,width):\n for y in range(0,height):\n if out.getpixel((x, y))[0] == 0:\n cut.append(x)\n break\n else:\n continue\n #保存要切割的列\n realcut.append(cut[0]-1)\n for i in range(0,len(cut)-1):\n if cut[i+1]-cut[i] > 1:\n realcut.append(cut[i]+1)\n realcut.append(cut[i+1]-1)\n else:\n continue\n realcut.append(cut[-1]+1)\n #切割图片\n count = [0,2,4,6]\n child_img_list = []\n for i in count:\n child_img = out.crop((realcut[i],0,realcut[i+1],height))\n child_img_list.append(child_img)\n #保存切割的图片\n #for i in range(0,4):\n #child_img_list[i].save(\"E:\\%d.jpg\" % i)\n \n #横向切割\n cut_second =[]\n final_img_list = []\n for i in range(0,4):\n width = child_img_list[i].width\n height = child_img_list[i].height\n #取有像素0的列\n for y in range(0,height):\n for x in range(0,width):\n if child_img_list[i].getpixel((x, y)) == 0:\n cut_second.append(y)\n break\n else:\n continue\n #切割图片\n final_img = child_img_list[i].crop((0,cut_second[0]-1,width,cut_second[-1]+1))\n final_img_list.append(final_img)\n # 返回切割的图片\n return final_img_list\n\nif __name__ == '__main__':\n for i in xrange(images_nums):\n name = 'test_images/'+str(i)+'.gif'\n cut_pic(name)\n","sub_path":"captcha/yiren.py","file_name":"yiren.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"267818617","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport os\nimport subprocess\n\nfrom gen_cell import rndgen_lattice, calc_latvec, calc_cos\nfrom with_spg.fw import fw_input, gen_wypos\nfrom wo_spg.gen_coordinate import rndgen_coord\nfrom ..struc_util import out_poscar\n\n\ndef rndgen_wo_spg(nstruc, natot, atype, nat, id_offset=0, minlen=4, maxlen=10, dangle=20, mindist=1.5,\n maxcnt=200, symprec=0.001, init_pos_path=None):\n '''\n Randomly generate structures without space group information\n '''\n\n # ---------- initialize\n init_struc_data = {}\n spgnum = 0\n\n # ---------- cd gen_struc\n if not os.path.isdir('gen_struc'):\n os.mkdir('gen_struc')\n os.chdir('gen_struc')\n\n # ---------- generate structures\n while len(init_struc_data) < nstruc:\n spg_in, a, b, c, alpha, beta, gamma = rndgen_lattice(spgnum, minlen, maxlen, dangle)\n va, vb, vc = calc_latvec(a, b, c, alpha, beta, gamma)\n tmp_struc = rndgen_coord(natot, atype, nat, va, vb, vc, mindist, maxcnt)\n if tmp_struc is not None: # success of generation\n # ------ check actual space group using pymatgen\n try:\n spg_sym, spg_num = tmp_struc.get_space_group_info(symprec=symprec)\n except TypeError:\n spg_num = 0\n spg_sym = None\n # ------ register the structure in pymatgen format\n cID = len(init_struc_data) + id_offset\n init_struc_data[cID] = tmp_struc\n print('Structure ID {0:>8} was generated. Space group: {1:>3} --> {2:>3} {3}'.format(\n cID, spg_in, spg_num, spg_sym))\n # ------ save poscar\n if init_pos_path is not None:\n out_poscar(tmp_struc, cID, init_pos_path)\n # ---------- go back to ..\n os.chdir('../')\n\n return init_struc_data\n\n\ndef rndgen_spg(nstruc, natot, atype, nat, spgnum='all', id_offset=0,\n minlen=4, maxlen=10, dangle=20, mindist=1.5,\n maxcnt=200, symprec=0.001,\n init_pos_path=None, fwpath='./find_wy'):\n '''\n Randomly generate structures with space group information\n '''\n\n # ---------- initialize\n init_struc_data = {}\n\n # ---------- cd gen_struc\n if not os.path.isdir('gen_struc'):\n os.mkdir('gen_struc')\n os.chdir('gen_struc')\n\n # ---------- generate structures\n while len(init_struc_data) < nstruc:\n spg_in, a, b, c, alpha, beta, gamma = rndgen_lattice(spgnum, minlen, maxlen, dangle)\n cosa, cosb, cosg = calc_cos(alpha, beta, gamma)\n fw_input(atype, nat, spg_in, a, b, c, cosa, cosb, cosg)\n\n # ------ loop for same fw_input\n cnt = 0\n while cnt <= maxcnt:\n # -- run find_wy\n with open('sublog', 'w') as f:\n subprocess.call([fwpath, 'input'], stdout=f, stderr=f)\n\n # -- generate a structure using POS_WY_SKEL_ALL.json\n if not os.path.isfile('POS_WY_SKEL_ALL.json'):\n wyflag = False\n break\n wyflag, tmp_struc = gen_wypos(atype, mindist, maxcnt)\n if wyflag is False: # Failure\n os.remove('POS_WY_SKEL_ALL.json')\n cnt += 1\n continue\n else: # Success\n rm_files() # clean\n break # break fw_input loop\n\n if wyflag is False: # maximum trial or no POS_WY_SKEL_ALL.json file\n rm_files() # clean\n continue # to new fw_input\n\n # ------ check actual space group using pymatgen\n try:\n spg_sym, spg_num = tmp_struc.get_space_group_info(symprec=symprec)\n except TypeError:\n spg_num = 0\n spg_sym = None\n\n # ------ register the structure in pymatgen format\n cID = len(init_struc_data) + id_offset\n init_struc_data[cID] = tmp_struc\n print('Structure ID {0:>8} was generated. Space group: {1:>3} --> {2:>3} {3}'.format(\n cID, spg_in, spg_num, spg_sym))\n\n # ------ save poscar\n if init_pos_path is not None:\n out_poscar(tmp_struc, cID, init_pos_path)\n\n # ------ clean\n rm_files()\n\n # ---------- go back to ..\n os.chdir('../')\n\n return init_struc_data\n\n\ndef rm_files(files=['input', 'POS_WY_SKEL_ALL.json']):\n for rfile in files:\n if os.path.isfile(rfile):\n os.remove(rfile)\n","sub_path":"CrySPY/gen_struc/random/rndgen.py","file_name":"rndgen.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"352308274","text":"# Given a sequence of non-negative integers, where each number is written in a separate line. The sequence ends with 0. Print the number of elements of the sequence that are greater than their neighbors above. \n\ndef greater_than_prev():\n prev = next = int(input())\n count = 0 \n while next != 0:\n if prev > next:\n count += 1\n prev, next = next, int(input())\n return count\n\n\ndef greater_than_prev2():\n a = int(input())\n nums = []\n while a !=0:\n nums.append(a)\n a = int(input())\n \n count = 0 \n for i in range(1,len(nums)):\n if nums[i] > nums[i-1]:\n count+=1\n \n return count\n\nprint(greater_than_prev2())\n# 1\n# 2\n# 3\n# 4\n# 5\n# 0\n\n# Answer: 4 ","sub_path":"Warm-Ups/While_Loops/greater_than_prev.py","file_name":"greater_than_prev.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"48455641","text":"#!/usr/bin/python3 \nimport webbrowser\nimport time\nimport subprocess\noption='''\nPress 1 to start your vlc media player :\nPress 2 to play your fav song in youtube : \nPress 3 to search something on google : \nPress 4 to send whatsapp message to your fav number : \nPress 5 to check current time and date :\npress 6 to reboot your machine : \n'''\nprint(option)\n\n# taking input from user \n# 1 st\nchoice=input()\n# input function will take input in str format \n# conditional state\nif choice == '5':\n # to connect our BIOS time \n current_time=time.ctime()\n print(current_time)\n\nelif choice == '1':\n # to connect os level application we use subprocess \n subprocess.getoutput('vlc')\n\nelif choice == '3':\n data=input(\"type your search :---> \")\n webbrowser.open_new_tab('https://www.google.com/search?q='+data)\n\nelif choice == '2':\n data=input(\"type your fav song :---> \")\n webbrowser.open_new_tab('https://www.youtube.com/results?search_query='+data)\n\n\nelif choice == '4':\n #webbrowser.open_new_tab('https://wa.me/+919511537588')\n webbrowser.open_new_tab('https://api.whatsapp.com/send?phone=919511537588&text=Hello%20there')\n\nelif choice == '6':\n cmdCommand = \"shutdown -h now\"\n process = subprocess.Popen(cmdCommand.split(), stdout=subprocess.PIPE)\n\nelse : \n print(\"hiii\")\n\n","sub_path":"python1.py","file_name":"python1.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"81122192","text":"import requests\nimport bs4\ncity='bangalore'\nlinks=[]\nfor page in range(1, 3):\n root_url = 'http://www.99acres.com/property-in-'+city+'-ffid-page-'+str(page)\n index_url = root_url\n print(index_url)\n response = requests.get(index_url)\n soup = bs4.BeautifulSoup(response.text)\n for link in soup.find_all('a',class_='sName'):\n links.append(link.get('href'))\nprint(links)","sub_path":"Web Crawler.py","file_name":"Web Crawler.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"384847893","text":"import csv\n\nrunFirstPart = True #This collects the keypress sequence from all of the CSV files.\nrunSecondPart = True #This calculates the similarity of the keystroke sequences of participants' first and last games\nrunThirdPart = True #This calculates the frequency of keypress made by all of the participants for all of their games\n\n\ntextFilename = \"sequences.txt\" #This will be the text file which will store all keystroke sequence of all files\nsimilarityFilename = \"similarity_sequence.txt\"\npercentFilename = \"similarity_percentage.txt\"\nfrequencyFilename = \"keypress_frequency.txt\"\nfileNumber = 1 #This is the counter for the filename\nfirstGamesList = [1,7,13,19,25,31,37,43,49,55,61,67,73,79,85,91,97,103,109,115]\n\nif runFirstPart == True:\n #Iterates through each csv files and collects the keypress sequence\n for i in range(120):\n with open(\"Data_\" + str(fileNumber) + \".csv\") as csvFile:\n csvReader = csv.reader(csvFile, delimiter = ',')\n counter = 0\n \n sequenceText = \"\" #This will store the full keypress sequence that will be stored in the new text file.\n \n for row in csvReader:\n if counter == 0:\n counter += 1\n\n else:\n sequenceText += row[3] #This appends the keypress to the variable sequenceText\n\n \n textFile = open(textFilename,\"a\") #Opens the text file\n textFile.write(sequenceText + \"\\n\") #Writes keypress sequence to the text file\n textFile.close()\n\n if fileNumber % 6 == 0 or fileNumber in firstGamesList:\n similarityFile = open(similarityFilename, \"a\")\n similarityFile.write(sequenceText + \"\\n\")\n similarityFile.close()\n \n fileNumber += 1 #This increments along with the data file number\n\n\n\n#============================================================================\n#============================= VARIABLES USED ===============================\n#============================================================================\n\n#sequences = List of all of the sequences of the first and last games of all participants (40 sequences)\n#userFirstGames = List of all sequences from the first games (20 sequences)\n#userLastGames = List of all sequences from the last games (20 sequences)\n#firstAndLastGamePairList = List of first and last game pairings for each participant (20 pairs)\n#firstSequence = Stores the sequence of the first game (used in for loop)\n#lastSequence = Stores the sequence of the last game (used in for loop)\n#shortestLength = Stores an integer value of the shortest length between the pair of sequences\n\nif runSecondPart == True:\n with open('similarity_sequence.txt', 'r') as f:\n sequences = [line.strip() for line in f]\n\n userFirstGames = sequences[0::2] #This is a list which stores all the first games of all participants\n userLastGames = sequences[1::2] #This is a list which stores all the last games of all participants\n\n firstAndLastGamePairZip = zip(userFirstGames, userLastGames) #Combines the two lists element-wise\n firstAndLastGamePairList = list(firstAndLastGamePairZip) #Converts the zip to a list\n\n #Iterates through each list in the pairing list\n loopCounter = 1\n meanValue = 0\n for alist in firstAndLastGamePairList:\n firstSequence = alist[0] #Sequence of the first game\n firstSequenceLength = len(firstSequence) #Length of sequence of first game\n \n lastSequence = alist[1] #Sequence of the last game\n lastSequenceLength = len(lastSequence) #Length of sequence of last game\n\n #Calculates the absolute difference (no negative result) between the two sequence length\n sequenceLengthDifference = abs(firstSequenceLength - lastSequenceLength) \n \n #Identifies the shortest length of sequence between the two given sequences\n shortestLength = 0\n if firstSequenceLength < lastSequenceLength:\n shortestLength = firstSequenceLength\n else:\n shortestLength = lastSequenceLength\n\n #Compares the two sequences character by character and calculates the accuracy (similarity) between the two\n numberOfCorrect = 0 #Counts the total number of matching characters on the sequence\n similarityPercentage = 0 #Stores the percentage of similarity\n for i in range(shortestLength):\n firstSequenceChar = firstSequence[i] #Stores current character of first sequence\n lastSequenceChar = lastSequence[i] #Stores current character of last sequence\n\n #If the current characters are the same (MATCH)\n if firstSequenceChar == lastSequenceChar:\n numberOfCorrect += 1 #Increase the number of correct\n\n #Calculates the percentage of accuracy \n similarityPercentage = (100 / firstSequenceLength) * numberOfCorrect\n similarityPercentage = round(similarityPercentage, 3)\n meanValue += similarityPercentage\n string = \"Participant \" + str(loopCounter) + \" Accuracy : \" + str(similarityPercentage) + \"%\"\n \n #Stores the percentages on a text file\n percentFile = open(percentFilename,\"a\") #Opens the text file\n percentFile.write(string + \"\\n\") #Writes keypress sequence to the text file\n percentFile.close()\n\n loopCounter += 1\n\n #Calculates the mean value of all the percentages and stores result value into the similarity_percentage text file.\n meanValue = round((meanValue / 20), 3)\n meanValueString = \"Mean Value of Percentages : \" + str(meanValue) + \"%\"\n\n percentFile = open(percentFilename,\"a\") #Opens the text file\n percentFile.write(\"\\n\" + meanValueString + \"\\n\") #Writes keypress sequence to the text file\n percentFile.close()\n\n\nif runThirdPart == True:\n with open('sequences.txt', 'r') as f:\n allSequences = [line.strip() for line in f]\n\n #Finds the length of the longest sequence\n longestSequence = 0\n for sequence in allSequences:\n if len(sequence) > longestSequence:\n longestSequence = len(sequence)\n\n #Initialises list of lists\n frequencyList = [[0,0,0,0] for x in range(longestSequence)]\n\n for sequence in allSequences: #Iterates through all sequences\n aCounter = 0\n\n for character in sequence: #Iterates through all characters per sequence\n characterFrequencyList = [0,0,0,0]\n\n #Increments the correct index within the list depending on the keypress\n if character == 'W':\n characterFrequencyList[0] += 1\n\n if character == 'A':\n characterFrequencyList[1] += 1\n\n if character == 'S':\n characterFrequencyList[2] += 1\n\n if character == 'D':\n characterFrequencyList[3] += 1\n \n #Adds the current character frequencies to the main list\n for i in range(4):\n frequencyList[aCounter][i] = frequencyList[aCounter][i] + characterFrequencyList[i]\n \n aCounter += 1\n\n frequencyFile = open(frequencyFilename,\"a\") #Opens the text file\n frequencyFile.write(str(frequencyList)) #Writes keypress frequency to the text file\n frequencyFile.close()\n print(frequencyList)\n\n","sub_path":"Data Analyser/mainReader.py","file_name":"mainReader.py","file_ext":"py","file_size_in_byte":7810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"631812861","text":"\"\"\"\nIntersection of Two Linked Lists\nhttps://leetcode.com/problems/intersection-of-two-linked-lists/\n\nTime O(n)\nSpace O(1)\n\"\"\"\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n lenA = self.lenght(headA)\n lenB = self.lenght(headB)\n \n nodeA, nodeB = headA, headB\n if lenA > lenB:\n nodeA = self.moveForward(headA, lenA - lenB)\n elif lenB > lenA:\n nodeB = self.moveForward(headB, lenB - lenA) \n \n while nodeA and nodeB:\n if nodeA == nodeB:\n return nodeA\n nodeA = nodeA.next\n nodeB = nodeB.next\n \n return None\n \n def lenght(self, head) -> int:\n lenght = 0\n node = head\n while node:\n lenght += 1\n node = node.next\n return lenght\n \n def moveForward(self, head, skip) -> ListNode:\n node = head\n while node and skip > 0:\n skip -= 1\n node = node.next\n return node\n","sub_path":"python/intersection_of_two_linked_lists.py","file_name":"intersection_of_two_linked_lists.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"502183196","text":"#\r\n# Copyright 2013 Intel Corp\r\n#\r\n# Authors: Lianhao Lu \r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\r\n# not use this file except in compliance with the License. You may obtain\r\n# a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\r\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r\n# License for the specific language governing permissions and limitations\r\n# under the License.\r\n\r\nfrom ceilometer.hardware.pollsters import cpu\r\nfrom ceilometer import sample\r\nfrom ceilometer.tests.hardware.pollsters import base\r\n\r\n\r\nclass TestCPUPollsters(base.TestPollsterBase):\r\n def test_1min(self):\r\n self._check_get_samples(cpu.CPULoad1MinPollster,\r\n 'hardware.cpu.load.1min',\r\n 0.99, sample.TYPE_GAUGE,\r\n expected_unit='process')\r\n\r\n def test_5min(self):\r\n self._check_get_samples(cpu.CPULoad5MinPollster,\r\n 'hardware.cpu.load.5min',\r\n 0.77, sample.TYPE_GAUGE,\r\n expected_unit='process')\r\n\r\n def test_15min(self):\r\n self._check_get_samples(cpu.CPULoad15MinPollster,\r\n 'hardware.cpu.load.15min',\r\n 0.55, sample.TYPE_GAUGE,\r\n expected_unit='process')\r\n","sub_path":"CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/tests/hardware/pollsters/test_cpu.py","file_name":"test_cpu.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"178147668","text":"import psycopg2\nimport sys\n\n#Establish connection with the database. \n#In case the database 'unbabel' doesn't exist, print message and stop execution.\ntry:\n\tconn = psycopg2.connect(database=\"postgres\", user = \"postgres\",\n password = \"root\", host = \"127.0.0.1\", port = \"5432\")\nexcept psycopg2.OperationalError:\n\tsys.exit('\\n\\n\\n THERE IS NO DATABASE \"postgres\". PLEASE CREATE A DATABASE WITH THAT NAME. \\n\\n\\n')\n\n#Create a cursor to execute querries.\ncur = conn.cursor()\n\n#Create a table with multiple columns. Print rror message in case table is already created.\ntry:\n\n\t\tcur.execute('''CREATE TABLE company\n (Transaction_ID TEXT PRIMARY KEY,\n Created_at DATE,\n Start_Date DATE,\n End_Date DATE,\n Amount_USD REAL,\n Status CHAR(20),\n Revenue_Type CHAR(3) );''')\nexcept psycopg2.ProgrammingError :\n\t\tprint('\\n TABLE \"COMPANY\" ALREADY EXISTS \\n')\n\n#Commit the alteration to the database and close the open connections.\nconn.commit()\ncur.close()\nconn.close()\n","sub_path":"create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"16302680","text":"# coding=utf-8\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport gettext\n\ngettext.install('messages', './locale')\n\nimport sys\n\nimport queries as q\nfrom ui import choice_question\n\n\ndef main(database, command, *args):\n commands = {\n 'import': import_file,\n 'list': list_words,\n 'top': list_top,\n 'cram': cram,\n }\n q.use(database)\n commands[command](*args)\n\ndef import_file(filename):\n ''' Import words from file\n with pairs separated by \" - \" (dash with spaces)\n '''\n print(_('Loading \"%s\"' % filename))\n with open(filename, 'r', encoding='utf-8') as f:\n print(_('Loaded %s words' % q.add_words_list(f)))\n\ndef list_words():\n for row in q.list_words():\n print(row[0], '-', row[1])\n\ndef list_top(count):\n for row in q.select_remembered(count):\n print(*row)\n\ndef cram(count):\n words_ids = q.select_most_hard_words_ids(count)\n for word_id in words_ids:\n variant_ids = q.get_translation_variants(word_id, 4)\n word, t = q.get_word_and_translation(word_id)\n variants = [q.get_word_and_translation(id)[1] for id in variant_ids]\n answer = choice_question(word, variants)\n answer_id = variant_ids[answer]\n q.record_answer(word_id, variant_ids[answer])\n if answer_id == word_id:\n print(_('Correct!'))\n else:\n print(_('Wrong! \"{word}\" is correctly translated as \"{translation}\"').format(word=word, translation=t))\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(_('You need to pass database name and command'))\n sys.exit(1)\n main(*sys.argv[1:])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"189960443","text":"from concurrent.futures import TimeoutError\r\nfrom google.cloud import pubsub_v1, bigquery\r\nimport os\r\nimport json\r\nimport datetime as dt\r\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"C:/Users/62895/Desktop/week4/key.json\"\r\n\r\nproject_id = \"sunlit-amulet-318910\"\r\ndataset_id = \"week_4\"\r\nsubscription_id = \"subscription-week4subscription\"\r\ntimeout = 30.0\r\nos.getenv('GOOGLE_APPLICATION_CREDENTIALS')\r\nsubscriber = pubsub_v1.SubscriberClient()\r\nsubscription_path = subscriber.subscription_path(project_id, subscription_id)\r\n\r\ndef callback(message):\r\n str_data = str(message.data).replace(\"\\\\\", \"\").replace(\r\n \"rn\", \"\").replace(\"b\\'b\\'\", \"\").replace(\"\\'\\'\", \"\").replace(\" \", \"\")\r\n json_data = json.loads(str_data)\r\n insert = [obj for obj in json_data[\"activities\"]\r\n if obj[\"operation\"] == \"insert\"]\r\n delete = [obj for obj in json_data[\"activities\"]\r\n if obj[\"operation\"] == \"delete\"]\r\n for items in range(len(insert)):\r\n insert[items]['col_names'].append('time')\r\n insert[items]['col_types'].append('STRING')\r\n insert[items]['col_values'].append(str(dt.datetime.now()))\r\n for k in range(len(insert[items][\"col_types\"])):\r\n if insert[items]['col_types'][k] == 'TEXT':\r\n insert[items]['col_types'][k] = 'STRING'\r\n for items2 in range(len(delete)):\r\n for l in delete[items2]['old_value']['col_types']:\r\n if l == 'TEXT':\r\n l = 'STRING'\r\n client = bigquery.Client()\r\n for j in range(len(insert)):\r\n tables = client.list_tables(project_id + \".week_4\")\r\n bq_table_name = [table.table_id for table in tables]\r\n table_id = project_id + \".week_4.\" + insert[j][\"table\"]\r\n ins = {}\r\n for i in range(len(insert[j][\"col_names\"])):\r\n ins[insert[j][\"col_names\"][i]] = insert[j][\"col_values\"][i]\r\n rows_to_insert = [ins]\r\n if insert[j]['table'] in bq_table_name:\r\n if client.insert_rows_json(table_id, rows_to_insert)[0]['errors'][0]['message'] == f\"no such field: {client.insert_rows_json(table_id, rows_to_insert)[0]['errors'][0]['location']}.\":\r\n table = client.get_table(table_id)\r\n new_column = client.insert_rows_json(table_id, rows_to_insert)[0]['errors'][0]['location']\r\n col_names = insert[j][\"col_names\"]\r\n col_types = insert[j][\"col_types\"]\r\n original_schema = table.schema\r\n new_schema = original_schema[:]\r\n new_schema.append(bigquery.SchemaField(\r\n new_column, col_types[col_names.index(new_column)]))\r\n table.schema = new_schema\r\n alter_table_query = (f'''\r\n ALTER TABLE {project_id}.{dataset_id}.{table_id}\r\n {table.schema}''')\r\n client.query(alter_table_query).result()\r\n else:\r\n create_tbl_query = (f'''\r\n CREATE TABLE IF NOT EXISTS {project_id}.{dataset_id}.{table_id}\r\n ({original_schema})\r\n ''')\r\n client.query(create_tbl_query).result()\r\n else:\r\n schema = []\r\n for i in range(len(insert[j][\"col_names\"])):\r\n field = bigquery.SchemaField(\r\n insert[j][\"col_names\"][i], insert[j][\"col_types\"][i])\r\n schema.append(field)\r\n table = bigquery.Table(table_id, schema=schema)\r\n table = client.create_table(table)\r\n print(\r\n f\"Created table {table.project}.{table.dataset_id}.{table.table_id}\")\r\n errors = client.insert_rows_json(\r\n f\"{table.project}.{table.dataset_id}.{table.table_id}\", rows_to_insert)\r\n if errors == []:\r\n print(\"New rows have been added.\")\r\n else:\r\n print(\"Encountered errors while inserting rows: {}\".format(errors))\r\n for j in range(len(delete)):\r\n tables = client.list_tables(project_id + \".week_4\")\r\n bq_table_name = [table.table_id for table in tables]\r\n table_id = project_id + \".week_4.\" + delete[j][\"table\"]\r\n cond = []\r\n for i in range(len(delete[j][\"old_value\"][\"col_names\"])):\r\n if str(delete[j][\"old_value\"][\"col_types\"][i]) == \"TEXT\":\r\n where = str(delete[j][\"old_value\"][\"col_names\"][i]) + \\\r\n \" = '\" + str(delete[j][\"old_value\"][\"col_values\"][i]) + \"'\"\r\n cond.append(where)\r\n else:\r\n where = str(delete[j][\"old_value\"][\"col_names\"][i]) + \\\r\n \" = \" + str(delete[j][\"old_value\"][\"col_values\"][i])\r\n cond.append(where)\r\n condition = ' AND '.join(cond)\r\n delete_query = f\"\"\" DELETE FROM `{table_id}` WHERE {condition}\"\"\"\r\n if delete[j]['table'] in bq_table_name:\r\n query_job = client.query(delete_query)\r\n query_job.result()\r\n print(f\"Deletion on {delete[j]['table']} success.\")\r\n else:\r\n print(f\"Table does not exist\")\r\n\r\n\r\nstreaming_pull_future = subscriber.subscribe(\r\n subscription_path, callback=callback)\r\nprint(f\"Listening for messages on {subscription_path}..\\n\")\r\nwith subscriber:\r\n try:\r\n streaming_pull_future.result(timeout=timeout)\r\n except TimeoutError:\r\n streaming_pull_future.cancel()","sub_path":"week4/gatekeeper.py","file_name":"gatekeeper.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"614200785","text":"from .exceptions import *\nimport random\n\n\nclass GuessAttempt(object):\n def __init__(self, letter, hit=None, miss=None):\n if hit and miss:\n raise InvalidGuessAttempt('Your guess is not valid')\n \n self.letter = letter\n self.hit = hit\n self.miss = miss\n \n def is_hit(self):\n if self.hit:\n return True\n return False\n\n def is_miss(self):\n if self.miss:\n return True\n return False\n\nclass GuessWord(object):\n def __init__(self, answer):\n self.answer = answer\n self.masked = '*'*len(self.answer)\n if len(self.answer) == 0 or len(self.masked)==0:\n raise InvalidWordException()\n \n def uncover_word(self, letter):\n \n new_word = self.masked\n letter = letter.lower()\n self.answer = self.answer.lower()\n\n for idx, char in enumerate(self.answer):\n if char == letter:\n new_word = self.masked[:idx] + char + self.masked[idx+1:]\n self.masked = new_word\n \n self.masked = new_word\n \n return self.masked\n \n def perform_attempt(self, letter):\n \n if len(letter)>1:\n raise InvalidGuessedLetterException()\n if len(self.answer) != len(self.masked):\n raise InvalidWordException()\n \n if letter.lower() in self.answer.lower():\n attempt = GuessAttempt(letter, hit = True)\n self.masked = self.uncover_word(letter)\n else:\n attempt = GuessAttempt(letter, miss = True)\n return attempt\n \n self.answer = word\n self.masked -'*' * len(word)\n \n\n\nclass HangmanGame(object): \n WORD_LIST = ['rmotr', 'python', 'awesome']\n \n \n def __init__(self, word_list = WORD_LIST, number_of_guesses = 5):\n self.word_list = word_list\n self.remaining_misses = number_of_guesses\n self.previous_guesses = []\n selected_word= self.select_random_word(word_list)\n self.word = GuessWord(selected_word)\n \n def guess(self, letter):\n \n if self.is_finished():\n raise GameFinishedException()\n \n self.previous_guesses.append(letter.lower()) \n attempt = self.word.perform_attempt(letter)\n \n if attempt.is_miss():\n self.remaining_misses -=1\n if self.remaining_misses < 1:\n raise GameLostException()\n \n if self.is_won():\n raise GameWonException()\n \n return attempt\n \n @classmethod\n def select_random_word(cls, word_list):\n if not word_list:\n raise InvalidListOfWordsException()\n return random.choice(word_list)\n \n def is_won (self):\n if self.word.answer == self.word.masked:\n return True\n return False\n \n def is_lost(self):\n if self.remaining_misses ==0 and self.word.answer != self.word.masked:\n return True\n return False\n \n def is_finished(self):\n if self.is_lost() or self.is_won():\n return True\n return False\n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"399282465","text":"from __future__ import division\nimport json\nimport locale\nimport datetime\nimport time\nimport random\nimport logging\nimport web\nfrom math import ceil\n\nimport random\nimport string\n\nfrom utils.decorator import authentication\nfrom utils.error import RESTError\nfrom base import Base, NBase\nfrom dbapi.EventClient import EventClient\n\ndef randomStr(size = 6, chars=string.ascii_letters + string.digits):\n return ''.join(random.choice(chars) for x in range(size))\n\nclass EventBase(Base):\n def __init__(self):\n logging.debug('EventBase.__init__')\n self.eventobj = EventClient()\n\nclass EventCount(EventBase):\n @authentication\n def POST(self, **kwagrs):\n logging.debug('EventCount.POST')\n web.header('Content-Type', 'application/json')\n param = self.validateParams()\n ret = {'time': [], 'data': [[]]}\n count = self.eventobj.EventCountOverTime(param, param['collector'])\n if not count:\n count = []\n for line in count:\n m = int(line['start_time'])\n ret['time'].append(datetime.datetime.fromtimestamp(m).strftime(self.fmt))\n ret['data'][0].append(int(line['event_count']))\n return json.dumps(ret)\n\nclass EventService(EventBase):\n @authentication\n def POST(self, **kwagrs):\n logging.debug('EventService.POST')\n web.header('Content-Type', 'application/json')\n param = self.validateParams()\n count = self.eventobj.EventCountByServices(param, param['collector'])\n if not count:\n count = []\n ret = []\n for line in count:\n ret.append({'value': line['service_name'], 'number': line['event_count']})\n return json.dumps(ret)\n\n##class EventSeverity(EventBase):\n## @authentication\n## def POST(self, **kwagrs):\n## logging.debug('EventSeverity.POST')\n## web.header('Content-Type', 'application/json')\n## param = self.validateParams()\n## count = self.eventobj.EventCountBySeverity(param, param['collector'])\n## if not count:\n## count = []\n## ret = []\n## for line in count:\n## ret.append({'value': line['severity_level'], 'number': line['event_count']})\n## return json.dumps(ret)\n\nclass EventType(EventBase):\n @authentication\n def POST(self, **kwagrs):\n logging.debug('EventType.POST')\n web.header('Content-Type', 'application/json')\n param = self.validateParams()\n count = self.eventobj.EventCountByType(param, param['collector'])\n if not count:\n count = []\n ret = []\n for line in count:\n ret.append({'value': line['event_type'], 'number': line['event_count']})\n return json.dumps(ret)\n\nclass EventServer(EventBase):\n @authentication\n def POST(self, **kwagrs):\n logging.debug('EventServer.POST')\n web.header('Content-Type', 'application/json')\n param = self.validateParams()\n count = self.eventobj.EventCountByServer(param, param['collector'])\n if not count:\n count = []\n ret = []\n for line in count:\n ret.append({'value': line['server_ip'], 'number': line['event_count']})\n return json.dumps(ret)\n\nclass EventLog(NBase):\n def __init__(self):\n logging.debug('EventLog.__init__')\n self.eventobj = EventClient()\n\n @authentication\n def POST(self, **kwargs):\n logging.debug('EventLog.POST')\n web.header('Content-Type', 'application/json')\n from utils import schema\n param = self.validateParams(schema.EVENT_LOG_POST_SCHEMA)\n logging.debug('before query log: %d' % time.time())\n param['collector_id'] = param['collector']\n param['service_id'] = param['service']\n logs = self.eventobj.LogQuery(param)\n logging.debug('after query log: %d' % time.time())\n if not logs:\n logs = []\n total = 0 # total pages\n records = self.eventobj.GetRecordCounts(param) # total rows\n page = param['page_no']\n page_limits = param['page_limits']\n logging.info('records: %r, page: %r, page_limits: %r' % (records, page, page_limits))\n if records > 0:\n total = ceil(records / page_limits)\n if page > total:\n page = total\n ret = {\n 'total': total,\n 'page': page,\n 'records': records,\n 'rows': []\n }\n keys = ['event_id', 'event_type', 'event_time', 'event_name', 'service_name', 'source_ip', 'service_id', 'service_host']\n #keys = ['event_time', 'event_name', 'severity_level', 'service_id', 'service_name', 'event_counts']\n for log in logs:\n ret['rows'].append(dict((key, log[key]) for key in keys))\n #logging.info('log length: %d' % len(ret))\n logging.debug('after process log: %d' % time.time())\n ret['rows'].reverse()\n ret = json.dumps(ret)\n logging.debug('after dumps log: %d' % time.time())\n return ret\n\nclass EventLogDetail(object):\n def __init__(self):\n logging.debug('EventLogDetail.__init__')\n self.eventobj = EventClient()\n\n @authentication\n def GET(self, cid, sid, eid, **kwargs):\n logging.debug('EventLogDetail.GET')\n logging.info('collector id: %s, service id: %s, event id: %s' % (cid, sid, eid))\n ret = self.eventobj.LogDetails(int(eid), int(cid), int(sid))\n if not ret:\n raise RESTError('INFO.404.ITEM_NOT_FOUND')\n return json.dumps(ret)\n","sub_path":"backend/handlers/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"645865638","text":"from django.shortcuts import render\nfrom .forms import LostForm, FoundForm\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\n\ndef home(request):\n template = 'home.html'\n context = locals()\n return render(request, template, context)\n\n\ndef lost(request):\n form = LostForm(request.POST or None)\n confirm_message = None\n if form.is_valid():\n name = form.cleaned_data['name']\n description = form.cleaned_data['description']\n subject = '[ Lost Found ] Test Subject'\n message = '%s %s' % (description, name)\n email_from = form.cleaned_data['email']\n email_to = ['raj75092@gmail.com']\n send_mail(subject, message, email_from, email_to, fail_silently=True)\n confirm_message = \"Your Request has Processed. We will be Right back to you.\"\n form = None\n context = {'form': form, 'confirm_message': confirm_message, }\n template = 'lost.html'\n return render(request, template, context)\n\n\ndef found(request):\n form = FoundForm(request.POST or None)\n\n if form.is_valid():\n print(request.POST)\n template = 'found.html'\n context = locals()\n return render(request, template, context)\n","sub_path":"box/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"18004802","text":"\ndef main():\n print(one_away(\"ABCD\", \"ABD\"))\n print(one_away(\"bbcd\", \"bcd\"))\n print(one_away(\"ABCD\", \"ABFG\"))\n\n\ndef one_away(string_1, string_2):\n if string_1.__len__() + 1 == string_2.__len__():\n return added_char(string_2, string_1)\n\n if string_1.__len__() - 1 == string_2.__len__():\n return added_char(string_1, string_2)\n\n if string_1.__len__() == string_2.__len__():\n return replaced_char(string_1, string_2)\n\n return False\n\n\ndef replaced_char(string_1, string_2):\n iterator = 0\n is_replaced = False\n while iterator < string_1.__len__():\n if string_1[iterator] != string_2[iterator]:\n if is_replaced:\n return False\n is_replaced = True\n iterator += 1\n return True\n\n\n# string_2 must be bigger than string_1\n\n\ndef added_char(string_1, string_2):\n index_1 = 0\n index_2 = 0\n\n while index_1 < string_1.__len__() & index_2 < string_2.__len__():\n if string_1[index_1] != string_2[index_2]:\n if index_1 != index_2:\n return False\n else:\n index_2 += 1\n else:\n index_1 += 1\n index_2 += 1\n return True\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"c1e1-5.py","file_name":"c1e1-5.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"469777860","text":"import numpy as np\nfrom itertools import product\n\n\nclass Backgammon(object):\n \"\"\"docstring for Game\"\"\"\n\n def __init__(self, name=\"Backgammon\", start=1):\n self.name = name\n self.state = [0 for _ in range(28)]\n self.side = 0\n self.pair_counter = 0\n self.result = dict(finished=False, msg=None)\n self.start = start\n self.state[start + 0] = 2\n self.state[start + 11] = 5\n self.state[start + 16] = 3\n self.state[start + 18] = 5\n\n self.state[start + 23] = -2\n self.state[start + 12] = -5\n self.state[start + 7] = -3\n self.state[start + 5] = -5\n\n def get_number_of_out_pots(self, side):\n if side == 1:\n return self.state[0]\n if side == -1:\n return self.state[24]\n raise Exception(\"{} side is invalid\".format(side))\n\n def turn_manager(self, dice_pair=None):\n self.test()\n if self.side == 0:\n self.side = np.random.choice([-1, 1])\n print(\"player:\", self.side, \"starts the game.\")\n return\n if dice_pair[0] == dice_pair[1]:\n if self.pair_counter == 0:\n print(\"player: \", self.side, \"had pair, it is his turn again.\")\n self.pair_counter += 1\n return\n else:\n print(\"player: \", self.side, \"turn for pair finished.\")\n self.pair_counter = 0\n self.side *= -1\n print(\"It is player: \", self.side, \"turn.\")\n return\n\n def move(self, side, movements, current_state=''):\n for movement in movements:\n if len(movement) == 1:\n continue\n position = movement[1]\n if side * self.state[position] < 0:\n self.hit(-side, position)\n self.state[movement[0]] -= side\n self.state[position] += side\n\n def hit(self, side, position):\n idx = 0 if side == 1 else 25\n print(\"hiting:\", side, \"at:\", position, \"moving to:\", idx)\n self.state[idx] += side\n self.state[position] -= side\n\n def check_status(self, side):\n idx = self.get_outbox_index(side)\n if self.state[idx] != 0:\n return 'out'\n index_range = range(self.start + 18) if side == 1 else range(self.start + 6, 26)\n for i in index_range:\n if side * self.state[i] > 0:\n return 'normal'\n return 'final'\n\n def check_moves(self, movements, side, status):\n moves = []\n for movement in movements:\n if movement[0][0] == movement[1][0]:\n if side * self.state[movement[0][0]] > 1:\n moves.append(movement)\n else:\n moves.append([movement[0], [0]])\n moves.append([[0], movement[1]])\n else:\n moves.append(movement)\n movements = moves\n if status in ['final', 'normal']:\n return movements\n idx = self.get_outbox_index(side)\n entrace_moves = list(filter(lambda x: x[0][0] == idx or x[1][0] == idx, movements))\n if side * self.state[idx] == 1:\n return entrace_moves\n return list(filter(lambda x: x[0][0] == idx and x[1][0] == idx, entrace_moves))\n\n def possible_moves(self, side, dice_result, current_state):\n final_result = []\n for dice in dice_result:\n result = []\n for i, state in enumerate(current_state[:-2]):\n if not side * state > 0:\n continue\n # print(\"possible for dice:\", dice, \"move at: \", i)\n checking_possition = i + side * dice\n if not 0 < checking_possition < 25:\n continue\n if side * current_state[checking_possition] < -1:\n continue\n # print(\"we can move to\", checking_possition)\n result.append([i, checking_possition])\n final_result.append(result if len(result) > 0 else [[0]])\n return list(product(final_result[0], final_result[1]))\n\n def possible_final_moves(self, side, dice_result, current_state):\n final_result = []\n for dice in dice_result:\n result = []\n for i, state in enumerate(current_state[:-2]):\n if not side * state > 0:\n continue\n checking_possition = i + side * dice\n if side == 1:\n if i > 24:\n continue\n if checking_possition > 24:\n checking_possition = 26\n else:\n if checking_possition < 1:\n checking_possition = 27\n if side * current_state[checking_possition] < -1:\n continue\n result.append([i, checking_possition])\n final_result.append(result if len(result) > 0 else [[0]])\n return list(product(final_result[0], final_result[1]))\n\n def get_outbox_index(self, side):\n return 0 if side == 1 else 25\n\n def get_movements(self, side, dice_pair):\n status = self.check_status(side)\n movements = self.possible_moves(side=side, dice_result=dice_pair, current_state=self.state)\n if status == 'final':\n movements = self.possible_final_moves(side=side, dice_result=dice_pair, current_state=self.state)\n # return self.check_moves(movements, side, status)\n return self.check_moves(movements, side, status)\n\n def test(self):\n if self.state[26] == 15:\n self.result = dict(finished=True, msg=\"player 1 won!\")\n # raise Exception(\"player 1 won!\")\n if self.state[27] == -15:\n self.result = dict(finished=True, msg=\"player -1 won!\")\n # raise Exception(\"player -1 won!\")\n if self.state[0] < 0:\n self.result = dict(finished=True, msg=\"-1 side at: 0\")\n # raise Exception(\"-1 side at: 0\")\n if self.state[25] > 0:\n self.result = dict(finished=True, msg=\"1 side at: 25\")\n # raise Exception(\"1 side at: 25\")\n for side in [-1, 1]:\n guys = filter(lambda x: x * side > 0, self.state)\n if side * sum(guys) != 15:\n self.result = dict(finished=True, msg=\"how many guys for: {} ??\")\n # raise Exception(\"how many guys for: {} ??\".format(side))\n\n def get_number_of_single_pots(self, side):\n return len(list(filter(lambda x: x * side == 1, self.state[self.start: self.start + 24])))\n\n def rate_current_side_state(self, side):\n out_pots = abs(self.get_number_of_out_pots(side=side))\n # single\n","sub_path":"backgammon/game_manager.py","file_name":"game_manager.py","file_ext":"py","file_size_in_byte":6712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"414652897","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'book'\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n url(r'^upload/$', views.upload_file, name=\"upload\"),\n url(r'^upload_book/$', views.upload_book, name=\"upload_book\"),\n url(r'^add_author/$', views.add_author, name=\"add_author\"),\n url(r'^list_book/$', views.list_book, name=\"list_book\"),\n url(r'^list_author/$', views.list_author, name=\"list_author\"),\n url(r'^all_book/$', views.all_book, name=\"all_book\"),\n url(r'^download$', views.download, name=\"download\"),\n ]\n","sub_path":"backend/python/framework/django/mysite/book/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"468563349","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n'''\r\n The MIT License:\r\n\r\n Permission is hereby granted, free of charge, to any person obtaining a copy of this software and \r\n associated documentation files (the \"Software\"), to deal in the Software without restriction, \r\n including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, \r\n and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, \r\n subject to the following conditions:\r\n\r\n The above copyright notice and this permission notice shall be included in all copies or substantial \r\n portions of the Software.\r\n\r\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT \r\n NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. \r\n IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\r\n WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION \r\n WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n\r\n Copyright 2019,2020 Ali Erkan IMREK \r\n'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport importlib\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass KBConfig:\r\n\r\n\r\n\r\n\tdef __init__(self, fn = \"default\", path = \"\"):\r\n\t\tfilename = fn\r\n\t\tif path == \"\":\r\n\t\t\tpath, filename = os.path.split(os.path.abspath(__file__))\r\n\t\t\tself._fn = os.path.join(path, fn+\".conf\")\r\n\t\telse:\r\n\t\t\tself._fn = os.path.join(path, fn+\".conf\")\r\n\t\ttry:\r\n\t\t\tself._checklist = __import__(filename)\r\n\t\texcept:\r\n\t\t\tself._checklist = None\r\n\t\tself.reload()\r\n\t\t\r\n\t\t\r\n\t\r\n\t\r\n\tdef _load(self):\r\n\t\ttry:\r\n\t\t\tfh = open(self._fn, \"r\")\r\n\t\t\tlines = fh.read().splitlines()\r\n\t\t\traw = []\r\n\t\t\tfor line in lines:\r\n\t\t\t\tif len(line.strip()) > 0:\r\n\t\t\t\t\tif line.strip()[0] != \"#\":\r\n\t\t\t\t\t\traw.append(line.strip())\r\n\t\texcept OSError:\r\n\t\t\traise\r\n\t\texcept:\r\n\t\t\traise Exception(\"File error\", self._fn)\r\n\t\treturn(raw)\r\n\t\t\r\n\t\t\t\r\n\t\t\t\t\r\n\t\t\r\n\tdef _build(self, raw):\r\n\t\tconf = {}\r\n\t\tsection = \"\"\r\n\t\tclass obj:\r\n\t\t\tpass\r\n\r\n\t\tfor l in raw:\r\n\t\t\tif len(l.split(\"=\")) == 2:\tv=True\r\n\t\t\telse:\tv=False\r\n\t\t\tif l[0] == \"[\" and l[len(l)-1] == \"]\":\ts=True\r\n\t\t\telse:\ts=False\r\n\r\n\t\t\tif v and not s and section != \"\":\r\n\t\t\t\tvar = l.split(\"=\")[0].strip()\r\n\t\t\t\tval = l.split(\"=\")[1].strip()\r\n\t\t\t\tself._makevar(section, var, val)\r\n\t\t\telif s and not v:\r\n\t\t\t\tsection=l[1:len(l)-1]\r\n\t\t\t\tif section not in self._checklist.__dir__():\r\n\t\t\t\t\traise Exception(\"Section error\", section)\t\r\n\t\t\t\tif section not in self.__dir__():\r\n\t\t\t\t\tvars(self)[section] = obj()\r\n\t\t\telse:\r\n\t\t\t\traise Exception(\"Value error\", l)\r\n\r\n\t\tfor s in self._checklist.__dir__():\r\n\t\t\tif s[0] != \"_\" and type(vars(self._checklist)[s]) == \"dict\":\r\n\t\t\t\tfor v in vars(self._checklist)[s].keys():\r\n\t\t\t\t\tif v not in vars(self)[s].__dir__():\r\n\t\t\t\t\t\tself._makevar(s, v, vars(self._checklist)[s][v][\"default\"])\r\n\r\n\r\n\t\r\n\r\n\t\r\n\r\n\tdef _makevar(self, s, var, val):\r\n\t\tvalid, value = self._check(s, var, val)\r\n\t\tif valid:\r\n\t\t\tvars(vars(self)[s])[var] = value\r\n\t\telse:\r\n\t\t\traise Exception(\"Invalid value\", str(var) + \" \" +str(value))\t\t\t\r\n\t\r\n\t\r\n\t\r\n\tdef _check(self, s, var, val):\r\n\t\tif self._checklist == None:\r\n\t\t\treturn(True, val)\r\n\t\telse:\r\n\t\t\tif vars(self._checklist)[s][var][\"values\"] != []:\r\n\t\t\t\tif val not in vars(self._checklist)[s][var][\"values\"]:\r\n\t\t\t\t\treturn(False, vars(self._checklist)[s][var][\"values\"])\r\n\t\t\ttry:\r\n\t\t\t\tif vars(self._checklist)[s][var][\"type\"] == \"int\":\r\n\t\t\t\t\tval = int(val)\r\n\t\t\texcept:\r\n\t\t\t\treturn(False, vars(self._checklist)[s][var][\"type\"])\r\n\t\t\tif vars(self._checklist)[s][var][\"type\"] == \"int\":\r\n\t\t\t\tif vars(self._checklist)[s][var][\"range\"] != []:\r\n\t\t\t\t\tr1 = vars(self._checklist)[s][var][\"range\"][0]\r\n\t\t\t\t\tr2 = vars(self._checklist)[s][var][\"range\"][1]\r\n\t\t\t\t\tif val not in list(range(r1, r2)):\r\n\t\t\t\t\t\treturn(False, vars(self._checklist)[s][var][\"range\"])\r\n\t\t\treturn(True, val)\r\n\t\treturn(False, \"\")\r\n\t\t\r\n\r\n\t\t\r\n\tdef reload(self):\r\n\t\traw = self._load()\r\n\t\tself._build(raw)\r\n\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\ndef test():\r\n\timport time\r\n\tt = KBConfig(\"testconf\")\r\n\tprint(t.TESTSECTION.testvar)\r\n\tprint(t.TESTSECTION.testvar2 +5)\r\n\ttime.sleep(10)\r\n\tt.reload()\r\n\tprint(t.TESTSECTION2.testvar3)\r\n\t\r\n\r\n\t\r\n#test()","sub_path":"src/rpct/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"615974606","text":"\"\"\"\n8-7. Album: Write a function called make_album() that builds a dictionary\ndescribing a music album. The function should take in an artist name and an\nalbum title, and it should return a dictionary containing these two pieces of\ninformation. Use the function to make three dictionaries representing different\nalbums. Print each return value to show that the dictionaries are storing the\nalbum information correctly.\nUse None to add an optional parameter to make_album() that allows you to\nstore the number of songs on an album. If the calling line includes a value for\nthe number of songs, add that value to the album’s dictionary. Make at least\none new function call that includes the number of songs on an album.\n\"\"\"\nif __name__ == '__main__':\n def make_album(artist_name, album_title, count=None):\n album = {\n 'artist_name': artist_name,\n \"album_title\": album_title\n }\n if count:\n album.update({\"count\": count})\n return album\n print(make_album(\"Red Hot Chili Peppers\", \"Stadium Arcadium\"), make_album(\"Nirvana\", \"Nevermind\", 13),\n make_album(\"Heymoonshakers\", \"Noir\"), sep=\"\\n\")\n","sub_path":"Chapter 8/8.7.py","file_name":"8.7.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"281955086","text":"from skimage import io\n\nimport torch \nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\n\nimport torchvision\nfrom torchvision import transforms\n\nfrom ..base.custom_vgg import CustomVGG\nfrom ..utils.dataset_loader import WikiartDataset\nfrom .transform_image import Rescale, RandomCrop\n\ndef train(batch, model, optimizer):\n model.train()\n optimizer.zero_grad()\n output = model(batch['image'].float()) \n loss = F.multilabel_soft_margin_loss(output, batch['artist'].float())\n loss.backward()\n optimizer.step()\n return loss.item()\n\n\ndef test(batch, model):\n model.test()\n output = model(batch['image'].float())\n loss = F.nll_loss(output, batch['artist'].float())\n return loss.item()\n\n\nif __name__ == '__main__':\n # parameters\n min_dim = 225\n num_epochs = 1\n display_step = 100\n\n wikiart_preprocessing = transforms.Compose([Rescale(2*min_dim), \n RandomCrop(min_dim),\n transforms.ToTensor()])\n dataset = WikiartDataset('data/wikiart', io.imread, transform=wikiart_preprocessing)\n dataloader = DataLoader(dataset, batch_size=16, shuffle=True, num_workers=1)\n \n model = CustomVGG(torchvision.models.vgg16(), dataset.num_artists)\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n\n # iterate through data set in batches\n for epoch in range(num_epochs):\n for i_step, sampled_batch in enumerate(dataloader):\n train_loss = train(sampled_batch, model, optimizer)\n print('TRAIN', i_step, train_loss)\n\n","sub_path":"code/models/artist_predictor.py","file_name":"artist_predictor.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"489685708","text":"import random\nprint(\"Vamos jogar um jogo!\")\nporta = int(input(\"Escolha uma porta [1] [2] [3]: \"))\nprêmio = random.randrange(1, 4)\npf1 = list(range(1, 4))\npf1.remove(prêmio)\nprint(pf1)\nportafalsa1 = random.choice(pf1)\npf1.remove(portafalsa1)\nportafalsa2 = random.choice(pf1)\nif prêmio == porta:\n nada = random.choice([portafalsa1,portafalsa2])\n print(\"Na porta %i não tem nada\"%nada)\nif portafalsa1 == porta:\n nada = portafalsa2\n print(\"Na porta %i não tem nada\"%nada)\nif portafalsa2 == porta:\n nada = portafalsa1\n print(\"Na porta %i não tem nada\"%nada)\ntroca = input(\"Deseja trocar de porta? (S/N) \")\nif troca == \"S\":\n tv = list(range(1,4))\n tv.remove(nada and porta)\n porta = random.choice(tv)\nif prêmio == porta:\n print(\"Você ganhou!\")\nelse:\n print(\"Você perdeu :(\")\n\n\n\n","sub_path":"Ignorância Zero/024Exercício2NãoGambiarra.py","file_name":"024Exercício2NãoGambiarra.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"158012442","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 2 15:34:54 2017\n\n@author: tsalo\n\"\"\"\nfrom os import mkdir\nfrom glob import glob\nfrom shutil import copyfile\nfrom os.path import join, isfile, isdir, abspath\n\nimport pandas as pd\nfrom pyneurovault import api\nfrom nipype.interfaces import afni\n\ndf = pd.read_csv('data/metadata/image_metadata.csv', encoding='utf-8')\n\n# Only keep group level maps\nred_df = df.loc[df['analysis_level']=='group']\nprint('{0} group-level maps kept.'.format(red_df.shape[0]))\n\n# Only keep unthresholded maps\nred_df = red_df.loc[red_df['is_thresholded']==False]\nprint('{0} unthresholded maps kept.'.format(red_df.shape[0]))\n\n# Only keep MNI maps\nred_df = red_df.loc[red_df['not_mni']==False]\nprint('{0} MNI maps kept.'.format(red_df.shape[0]))\n\n## Check for RFX GLM IBMA\n# Only keep beta maps\nred_df = red_df.loc[red_df['map_type']=='univariate-beta map']\nprint('{0} beta maps kept.'.format(red_df.shape[0]))\n\n# Download the files\ndest_file = '/scratch/tsalo006/Grey10.nii'\nprint('Downloading')\ndest_dir = join('data/files/downloaded/')\nif not isdir(dest_dir):\n mkdir(dest_dir)\n api.download_images(dest_dir, red_df, dest_file)\n\n# Only keep files that were successfully converted\nfs = glob(join(abspath(out_dir), '*.nii.gz'))\nred_df = red_df.loc[red_df['local_file'].isin(fs)]\nred_df.to_csv('data/metadata/downloaded_images.csv', index=False,\n encoding='utf-8')\n","sub_path":"pull_files_for_cbp.py","file_name":"pull_files_for_cbp.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"537011749","text":"import pandas as pd\nimport numpy as np\n\nnp.random.seed(0)\nfrom AdaBoost.Algorithm.AdaBoostRandom import AdaBoostRandom\nfrom AdaBoost.Algorithm.AdaBoost_decision_stump import AdaBoost_decison_stump\n\nclass Preprocess:\n def __init__(self):\n pass\n\n\n def _read_data_crx(self, filename):\n dataset_train = pd.read_csv(filename, sep = '\\s', header=None)\n dataset_train = dataset_train.values\n dataset_train = np.where(dataset_train == '?', 0.0, dataset_train)\n dataset_train = pd.DataFrame(dataset_train)\n temp_dataset = pd.get_dummies(dataset_train.iloc[:,:-1], columns=[0,3,4,5,6,8,9,11,12], dtype=float)\n\n dataset = np.c_[temp_dataset.values, dataset_train.iloc[:,-1].values]\n dataset[:,-1] = np.where(dataset[:,-1] == \"+\", 1, -1)\n dataset = np.delete(dataset, [6, 9, 13, 17, 32,], axis=1)\n dataset = dataset.astype(float)\n np.random.shuffle(dataset)\n return dataset\n\n def _read_data_vote(self, filename):\n dataset_train = pd.read_csv(filename, sep = '\\s', header = None)\n dataset_train = dataset_train.values\n dataset_train = np.where(dataset_train == '?', 0.0, dataset_train)\n dataset_train = pd.DataFrame(dataset_train)\n temp_dataset = pd.get_dummies(dataset_train.iloc[:, :-1], dtype=float)\n dataset = np.c_[temp_dataset.values, dataset_train.iloc[:, -1].values]\n dataset[:, -1] = np.where(dataset[:, -1] == \"d\", 1, -1)\n dataset = np.delete(dataset, [0,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45], axis = 1)\n dataset = dataset.astype(float)\n np.random.shuffle(dataset)\n return dataset\n\n def _read_data_spam(self, filename):\n dataset_train = pd.read_csv(filename, header=None)\n dataset = dataset_train.values\n np.random.shuffle(dataset)\n return dataset\n\n\n def normalise_data(self, dataset):\n for i in range(dataset.shape[1] - 1):\n temp = dataset[:, i]\n dataset[:, i] = (temp - np.mean(temp)) / np.std(temp)\n return dataset\n\n\n\n def run(self, normalise, epochs, random_classifier, database):\n with open(\"HW_5.2_vote_random.txt\", \"w\") as f:\n filenames = [\"../data/spambase.data.txt\", \"../data/crx.data\", \"../data/vote.data\"]\n if(database == 1):\n data = self._read_data_crx(filenames[database])\n elif(database == 0):\n data = self._read_data_spam(filenames[database])\n indices = np.where(data[:, -1] == 0)\n data[:, -1][indices] = -1\n elif(database == 2):\n data = self._read_data_vote(filenames[database])\n if(normalise):\n data = self.normalise_data(data)\n test_size = int(data.shape[0] * .1)\n test_data = data[:test_size,:]\n training_data = data[test_size:, :]\n random_percentages = [5,10,15,20,30,50,80]\n for i in range(len(random_percentages)):\n training_size = int(data.shape[0] * (random_percentages[i] / 100))\n random_sampled_training_data = training_data[:training_size,:]\n if (random_classifier):\n boosting = AdaBoostRandom()\n else: boosting = AdaBoost_decison_stump()\n training_accuracy, testing_accuracy = boosting.boost(random_sampled_training_data, test_data, epochs, f)\n f.write(\"\\ntraining accuracy at \" + str(random_percentages[i]) + \" percentage is = \" + str(training_accuracy))\n f.write(\"\\ntesting accuracy at \" + str(random_percentages[i]) + \" percentage is = \" + str(testing_accuracy))\n print(\"training accuracy at \",random_percentages[i],\" percentage is = \", training_accuracy)\n print(\"testing accuracy at \", random_percentages[i], \" percentage is = \", testing_accuracy)\n c = []\n\nboost = Preprocess()\nboost.run(normalise = False,epochs = 101, random_classifier= False, database = 2)\n\n\n\n\n\n\n","sub_path":"AdaBoost/AdaBoost_different_datasets.py","file_name":"AdaBoost_different_datasets.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"260626957","text":"import psycopg2\nimport db.QUERIES as queries\nfrom db.db_config import config\nfrom db.birthday import Birthday\n\n\ndef __create_connection():\n conn = psycopg2.connect(\n database=config['database'],\n user=config['user'],\n password=config['password'],\n host=config['host'],\n port=config['port']\n )\n return conn\n\n\ndef create_tables():\n conn = __create_connection()\n cursor = conn.cursor()\n cursor.execute(queries.create_birthday_table)\n conn.commit()\n cursor.close()\n conn.close()\n\n\ndef drop_tables():\n conn = __create_connection()\n cursor = conn.cursor()\n cursor.execute(queries.delete_birthday_table)\n conn.commit()\n cursor.close()\n conn.close()\n\n# #############\n\n\ndef create_birthday(user_id, day, month, year):\n conn = __create_connection()\n cursor = conn.cursor()\n cursor.execute(queries.create_birthday, (user_id, day, month, year))\n conn.commit()\n cursor.close()\n conn.close()\n\n\ndef get_birthday_all():\n conn = __create_connection()\n cursor = conn.cursor()\n cursor.execute(queries.get_birthday_all)\n rows = cursor.fetchall()\n birthdays = []\n for row in rows:\n birthdays.append(Birthday(user_id=row[0], day=row[1], month=row[2], year=row[3]))\n cursor.close()\n conn.close()\n return birthdays\n\n\ndef get_birthday_one(user_id):\n conn = __create_connection()\n cursor = conn.cursor()\n cursor.execute(queries.get_birthday_one, (user_id,))\n birthday_data = cursor.fetchone()\n result = None\n if birthday_data:\n result = Birthday(birthday_data[0], birthday_data[1], birthday_data[2], birthday_data[3])\n cursor.close()\n conn.close()\n return result\n\n\ndef update_birthday(user_id, day, month, year):\n conn = __create_connection()\n cursor = conn.cursor()\n cursor.execute(queries.update_birthday, (month, day, year, user_id))\n conn.commit()\n cursor.close()\n conn.close()\n\n\ndef delete_birthday(user_id):\n conn = __create_connection()\n cursor = conn.cursor()\n cursor.execute(queries.delete_birthday, (user_id,))\n conn.commit()\n cursor.close()\n conn.close()\n","sub_path":"db/db_adapter.py","file_name":"db_adapter.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"550318787","text":"import src.lib.command_headers as command_headers\nfrom src.lib.queries.command_queries import edit_command\n\n\ndef edit(args, **kwargs):\n command = args[0].lower()\n user_level = args[1]\n response = \" \".join(args[2:])\n creator = kwargs.get(\"username\", \"testuser\")\n channel = kwargs.get(\"channel\", \"testchannel\")\n if command[0] is \"!\":\n if command not in command_headers.commands:\n if user_level == \"reg\" or user_level == \"mod\":\n return edit_command(command, creator, user_level, response, channel)\n else:\n return \"User level must be 'reg' or 'mod'\"\n else:\n return \"{} already built in to Lorenzo.\".format(command)\n else:\n return \"Command must begin with '!'\"\n","sub_path":"src/lib/commands/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"256484347","text":"from flask_sqlalchemy import SQLAlchemy\nfrom flask import current_app\n\ndb = SQLAlchemy(current_app)\n\nclass Spectre(db.Model):\n __tablename__ = \"spectres\"\n\n id = db.Column(db.Integer, primary_key=True)\n gid = db.Column(db.String(8), unique=True)\n active = db.Column(db.Boolean, default=False)\n is_root = db.Column(db.Boolean(), default=False)\n\n haunt_id = db.Column(db.Integer, db.ForeignKey(\"haunts.id\"))\n parent_id = db.Column(db.Integer, db.ForeignKey(\"spectres.id\"))\n\n children = db.relationship(\"Spectre\", backref=db.backref('parent', remote_side=[id]))\n\n def as_dict(self):\n return dict(id=self.id, parent_id=self.parent_id, active=self.active, is_root=self.is_root, children=[i.id for i in self.children])\n\nclass Haunt(db.Model):\n __tablename__ = \"haunts\"\n\n id = db.Column(db.Integer, primary_key=True)\n\n ghosts = db.relationship(\"Spectre\", backref=db.backref('haunt', remote_side=[id]))\n","sub_path":"ghosts/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"425491434","text":"#!usr/bin/env python\n#-*- coding:utf-8 _*-\n\"\"\"\n@author: WILLIAM\n@file: fratio_dataset.py\n@Time: 2020/9/29 \n@From: ASUS Win10\n@Overview: \n\"\"\"\nimport random\nfrom torch.utils.data import Dataset\nimport os\nfrom kaldi_io import read_mat\nimport numpy as np\n\n\nclass SpeakerDataset(Dataset):\n def __init__(self, dir, samples_per_speaker, transform, loader=read_mat,\n return_uid=False):\n self.return_uid = return_uid\n\n feat_scp = dir + '/feats.scp'\n spk2utt = dir + '/spk2utt'\n # utt2spk = dir + '/utt2spk'\n utt2num_frames = dir + '/utt2num_frames'\n # utt2dom = dir + '/utt2dom'\n\n if not os.path.exists(feat_scp):\n raise FileExistsError(feat_scp)\n if not os.path.exists(spk2utt):\n raise FileExistsError(spk2utt)\n\n invalid_uid = []\n with open(utt2num_frames, 'r') as f:\n for l in f.readlines():\n uid, num_frames = l.split()\n if int(num_frames) < 50:\n invalid_uid.append(uid)\n\n dataset = {}\n with open(spk2utt, 'r') as u:\n all_cls = u.readlines()\n for line in all_cls:\n spk_utt = line.split()\n spk_name = spk_utt[0]\n if spk_name not in dataset.keys():\n dataset[spk_name] = [x for x in spk_utt[1:] if x not in invalid_uid]\n\n # pdb.set_trace()\n\n speakers = [spk for spk in dataset.keys()]\n speakers.sort()\n print('==> There are {} speakers in Dataset.'.format(len(speakers)))\n spk_to_idx = {speakers[i]: i for i in range(len(speakers))}\n idx_to_spk = {i: speakers[i] for i in range(len(speakers))}\n\n uid2feat = {} # 'Eric_McCormack-Y-qKARMSO7k-0001.wav': feature[frame_length, feat_dim]\n with open(feat_scp, 'r') as f:\n for line in f.readlines():\n uid, feat_offset = line.split()\n if uid in invalid_uid:\n continue\n uid2feat[uid] = feat_offset\n\n print(' There are {} utterances in Dataset, where {} utterances are removed.'.format(len(uid2feat),\n len(invalid_uid)))\n self.speakers = speakers\n self.dataset = dataset\n self.uid2feat = uid2feat\n self.spk_to_idx = spk_to_idx\n self.idx_to_spk = idx_to_spk\n self.num_spks = len(speakers)\n\n self.loader = loader\n self.feat_dim = loader(uid2feat[dataset[speakers[0]][0]]).shape[1]\n self.transform = transform\n self.samples_per_speaker = samples_per_speaker\n\n def __getitem__(self, sid):\n # start_time = time.time()\n spk = self.idx_to_spk[sid]\n utts = self.dataset[spk]\n num_utt = len(utts)\n\n y = np.array([[]]).reshape(0, self.feat_dim)\n uid = utts[np.random.randint(0, num_utt)]\n\n feature = self.loader(self.uid2feat[uid])\n y = np.concatenate((y, feature), axis=0)\n\n while len(y) < self.samples_per_speaker:\n uid = utts[np.random.randint(0, num_utt)]\n feature = self.loader(self.uid2feat[uid])\n y = np.concatenate((y, feature), axis=0)\n # transform features if required\n feature = self.transform(y)\n label = sid\n\n return feature, label\n\n def __len__(self):\n return len(self.speakers) # 返回\n","sub_path":"Process_Data/Subband/fratio_dataset.py","file_name":"fratio_dataset.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"431726117","text":"import requests, json, random\n\nclass ESTClient(object):\n\tendpoint = \"https://cloud.estimote.com\"\n\n\tcommon_headers = {\n\t\t\"Content-Type\": \"application/json\",\n\t\t\"User-Agent\": \"EstimoteApp (iPhone; iPhone OS 9.3.5) EstimoteSDK/4.14.0\"\n\t}\n\n\tdef __init__(self, email, password):\n\t\tself.email = email\n\t\tself.password = password\n\t\tself.cookie = \"\"\n\n\tdef login(self):\n\t\tcredentials = json.dumps({\"username\": self.email, \"password\": self.password})\n\t\treq = requests.post(self.endpoint + \"/v1/login\", headers=self.common_headers, data=credentials)\n\t\ttry:\n\t\t\tself.cookie = req.headers[\"Set-Cookie\"]\n\t\texcept Exception as e:\n\t\t\tprint(\"Invalid Credentials!\")\n\t\t\treturn False\n\t\treturn True\n\nclass ESTInterface(object):\n\tdef __init__(self, client):\n\t\tself.client = client\n\t\tself.cookie = client.cookie.split(\";\")[0]\n\t\tadapted_headers = client.common_headers\n\t\tadapted_headers[\"Cookie\"] = self.cookie\n\t\tself.headers = adapted_headers\n\n\tdef get(self, path, headers):\n\t\treq = requests.get(self.client.endpoint + path, headers=headers)\n\t\treturn req.text\n\n\tdef get_devices(self):\n\t\treturn json.loads(self.get(\"/v2/devices\", self.headers))\n\nclass ESTDevice(object):\n\tdef __init__(self, identifier, interface):\n\t\tself.identifier = identifier\n\t\tself.interface = interface\n\n\tdef get_all(self):\n\t\treq = requests.get(self.interface.client.endpoint + \"/v3/devices/\" + self.identifier, headers=self.interface.headers)\n\t\treturn json.loads(req.text)\n\n\tdef parent_get(self, key):\n\t\treturn self.get_all()[key]\n\n\tdef parent_set(self, value, for_key):\n\t\tupdate = {\n\t\t\t\"identifier\": self.identifier,\n\t\t\tfor_key: value\n\t\t}\n\n\t\treq = requests.post(self.interface.client.endpoint + \"/v2/devices/\" + self.identifier, headers=self.interface.headers, data=json.dumps(update))\n\t\tsuccess = json.loads(req.text)[\"success\"]\n\t\treturn success\n\n\tdef shadow_get(self, key):\n\t\tres = self.get_all()[\"data\"]\n\t\treturn res[\"shadow\"][key]\n\n\tdef shadow_set(self, value, for_key):\n\t\tupdate = {\n\t\t\t\"identifier\": self.identifier,\n\t\t\t\"shadow\": {\n\t\t\t\tfor_key: value\n\t\t\t}\n\t\t}\n\t\treq = requests.post(self.interface.client.endpoint + \"/v2/devices/\" + self.identifier, headers=self.interface.headers, data=json.dumps(update))\n\t\tsuccess = json.loads(req.text)[\"success\"]\n\t\treturn success\n\n\tdef get_name(self):\n\t\treturn self.shadow_get(\"name\")\n\n\tdef set_name(self, name):\n\t\treturn self.shadow_set(name, \"name\")\n\n\tdef get_location(self):\n\t\treturn self.shadow_get(\"location\")\n\n\tdef set_location(self, timezone, country, zipcode, state, state_code, city, street_name, street_number, formatted_address, lat, lng):\n\t\tlocation = {\n\t\t\t\"timezone\": timezone,\n\t\t\t\"location_id\": random.randint(0, 100000),\n\t\t\t\"country\": country,\n\t\t\t\"zipcode\": zipcode,\n\t\t\t\"state\": state,\n\t\t\t\"state_code\": state_code,\n\t\t\t\"city\": city,\n\t\t\t\"street_name\": street_name,\n\t\t\t\"street_number\": street_number,\n\t\t\t\"formatted_address\": formatted_address,\n\t\t\t\"latitude\": lat,\n\t\t\t\"longitude\": lng\n\t\t}\n\t\treturn self.shadow_set(json.dumps(location), \"location\")\n\n\tdef get_color(self):\n\t\treturn self.get_all()[\"data\"][\"color\"]\n\n","sub_path":"estcloud/estcloud.py","file_name":"estcloud.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"572302600","text":"t = int(input())\nfor _ in range(t):\n n = int(input())\n graph = [[] for _ in range(n+1)]\n\n last_year = list(map(int, input().split())) #n번째 -> n번 등수인 팀\n\n indegree = [0] * (n+1)\n for i in range(len(last_year)):\n for j in range(-len(last_year) + 1 + i, 0):\n graph[last_year[i]].append(last_year[j])\n indegree[last_year[j]] += 1\n\n c = int(input())\n for _ in range(c):\n a, b = map(int, input().split())\n if b in graph[a]: #작년에 a팀 등수가 b팀보다 높았다.\n graph[a].remove(b)\n graph[b].append(a)\n indegree[a] += 1\n indegree[b] -= 1\n else: #작년에 a팀 등수가 b팀보다 낮았다.\n graph[b].remove(a)\n graph[a].append(b)\n indegree[a] -= 1\n indegree[b] += 1\n q = []\n result = []\n flag = 0\n for i in range(1, n+1):\n if indegree[i] == 0:\n q.append(i)\n\n for i in range(n):\n if len(q) == 0:\n print(\"IMPOSSIBLE\")\n flag = 1\n break\n elif len(q) >= 2:\n print(\"?\")\n flag = 1\n break\n checked = q.pop()\n result.append(checked)\n for i in graph[checked]:\n indegree[i] -= 1\n if indegree[i] == 0:\n q.append(i)\n\n if flag == 0:\n for i in result:\n print(i, end = \" \")\n print()\n","sub_path":"Q45_최종 순위.py","file_name":"Q45_최종 순위.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"22697210","text":"# -*- coding: utf-8 -*-\n# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\n\n__protobuf__ = proto.module(\n package=\"google.ads.googleads.v12.errors\",\n marshal=\"google.ads.googleads.v12\",\n manifest={\"UrlFieldErrorEnum\",},\n)\n\n\nclass UrlFieldErrorEnum(proto.Message):\n r\"\"\"Container for enum describing possible url field errors.\n \"\"\"\n\n class UrlFieldError(proto.Enum):\n r\"\"\"Enum describing possible url field errors.\"\"\"\n UNSPECIFIED = 0\n UNKNOWN = 1\n INVALID_TRACKING_URL_TEMPLATE = 2\n INVALID_TAG_IN_TRACKING_URL_TEMPLATE = 3\n MISSING_TRACKING_URL_TEMPLATE_TAG = 4\n MISSING_PROTOCOL_IN_TRACKING_URL_TEMPLATE = 5\n INVALID_PROTOCOL_IN_TRACKING_URL_TEMPLATE = 6\n MALFORMED_TRACKING_URL_TEMPLATE = 7\n MISSING_HOST_IN_TRACKING_URL_TEMPLATE = 8\n INVALID_TLD_IN_TRACKING_URL_TEMPLATE = 9\n REDUNDANT_NESTED_TRACKING_URL_TEMPLATE_TAG = 10\n INVALID_FINAL_URL = 11\n INVALID_TAG_IN_FINAL_URL = 12\n REDUNDANT_NESTED_FINAL_URL_TAG = 13\n MISSING_PROTOCOL_IN_FINAL_URL = 14\n INVALID_PROTOCOL_IN_FINAL_URL = 15\n MALFORMED_FINAL_URL = 16\n MISSING_HOST_IN_FINAL_URL = 17\n INVALID_TLD_IN_FINAL_URL = 18\n INVALID_FINAL_MOBILE_URL = 19\n INVALID_TAG_IN_FINAL_MOBILE_URL = 20\n REDUNDANT_NESTED_FINAL_MOBILE_URL_TAG = 21\n MISSING_PROTOCOL_IN_FINAL_MOBILE_URL = 22\n INVALID_PROTOCOL_IN_FINAL_MOBILE_URL = 23\n MALFORMED_FINAL_MOBILE_URL = 24\n MISSING_HOST_IN_FINAL_MOBILE_URL = 25\n INVALID_TLD_IN_FINAL_MOBILE_URL = 26\n INVALID_FINAL_APP_URL = 27\n INVALID_TAG_IN_FINAL_APP_URL = 28\n REDUNDANT_NESTED_FINAL_APP_URL_TAG = 29\n MULTIPLE_APP_URLS_FOR_OSTYPE = 30\n INVALID_OSTYPE = 31\n INVALID_PROTOCOL_FOR_APP_URL = 32\n INVALID_PACKAGE_ID_FOR_APP_URL = 33\n URL_CUSTOM_PARAMETERS_COUNT_EXCEEDS_LIMIT = 34\n INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_KEY = 39\n INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_VALUE = 40\n INVALID_TAG_IN_URL_CUSTOM_PARAMETER_VALUE = 41\n REDUNDANT_NESTED_URL_CUSTOM_PARAMETER_TAG = 42\n MISSING_PROTOCOL = 43\n INVALID_PROTOCOL = 52\n INVALID_URL = 44\n DESTINATION_URL_DEPRECATED = 45\n INVALID_TAG_IN_URL = 46\n MISSING_URL_TAG = 47\n DUPLICATE_URL_ID = 48\n INVALID_URL_ID = 49\n FINAL_URL_SUFFIX_MALFORMED = 50\n INVALID_TAG_IN_FINAL_URL_SUFFIX = 51\n INVALID_TOP_LEVEL_DOMAIN = 53\n MALFORMED_TOP_LEVEL_DOMAIN = 54\n MALFORMED_URL = 55\n MISSING_HOST = 56\n NULL_CUSTOM_PARAMETER_VALUE = 57\n VALUE_TRACK_PARAMETER_NOT_SUPPORTED = 58\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/ads/googleads/v12/errors/types/url_field_error.py","file_name":"url_field_error.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"330630416","text":"# Script to create segmented images using binary images and original images\n\nimport imageio\nimport numpy as np\nimport os\n\nfor name in os.listdir('original_data/'):\n if name != '180404_125102_RB180':\n for imname in os.listdir('original_data/' + name):\n origImg = imageio.imread('original_data/' + name + '/' + imname);\n origImg = origImg/65535\n origImg = origImg*255\n origImg = np.uint8(origImg)\n\n bwimname = imname[0:-3] + 'png'\n bwImg = imageio.imread('binary_data/' + name + '/' + bwimname)\n bwImg = bwImg[:,:,0]\n \n newImg = np.zeros(bwImg.shape,dtype=np.uint8)\n if np.all(np.unique(bwImg) == [0,255]) or np.all(np.unique(bwImg) == [0]):\n segmentedInds = np.where(bwImg == 255)\n newImg[segmentedInds] = origImg[segmentedInds] # set segmented regions in BW image to original values\n savename = name + '_' + bwimname\n imageio.imwrite('./new_data/' + savename,newImg)\n \n else:\n raise ValueError('Black-white was not really black and white. Fix.')\n \n\n \n","sub_path":"data/segmented_cells_data/create_segmented_images.py","file_name":"create_segmented_images.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"470706600","text":"def merge_arr(arr, l, m, r):\n s, e = l, m\n inv = 0\n temp_arr = []\n while s <= (m-1) and e <= r:\n if arr[s] > arr[e]:\n inv += (m - s)\n temp_arr.append(arr[e])\n e += 1\n else:\n temp_arr.append(arr[s])\n s += 1\n while s <= (m-1):\n temp_arr.append(arr[s])\n s += 1\n while e <= r:\n temp_arr.append(arr[e])\n e += 1\n for k in range(l, r+1):\n arr[k] = temp_arr[k-l]\n return inv\n\n\n\ndef count_array_inv_ms(arr, l, r):\n inv = 0\n if l < r:\n m = l + (r - l) // 2\n inv = count_array_inv_ms(arr, l, m)\n inv += count_array_inv_ms(arr, m+1, r)\n inv += merge_arr(arr, l, m+1, r)\n return inv\n\n\n\ndef count_array_inv(arr):\n return count_array_inv_ms(arr, 0, len(arr)-1)\n\n\n# Driver Code\narr = [1, 20, 6, 4, 5]\nn = len(arr)\nprint(\"Number of inversions are {}\".format(\n count_array_inv(arr)))\n\n\"\"\"\n3:30\n\nfor every element count number of elements lower than that on the right side\nO(N^2)\n\n- modified merge sort - whenever inversion; everything greater will also be inverted\nso get inversions from left and right \nand add merge step inverson\n\"\"\"","sub_path":"Prev/num_array_inv.py","file_name":"num_array_inv.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"385636353","text":"import sys, os, argparse, glob\nsys.path.insert(0, '/home/grigoryanlab/home/fzheng/modules_py')\nimport General, Analyze, Constants, PDB\nfrom mymodules import designscore\n\npar = argparse.ArgumentParser()\npar.add_argument('--p', required = True, help = 'a whole pdb file')\npar.add_argument('--n', type = int, default = 50, help = 'top n sequences to choose from the second search')\npar.add_argument('--src', required = True, help = 'the directory of the first search')\npar.add_argument('--hs', required = True, help = 'head of .seq files of the first search')\npar.add_argument('--hd', required = True, help = 'head of .seq files of the second search')\npar.add_argument('--uniq', action = 'store_true', help = 'if making uniq from the given .seq files')\npar.add_argument('--o', required = True, help = 'output file')\npar.add_argument('--raw', action = 'store_true', help = 'if it is true, output raw score')\npar.add_argument('--excluded', nargs = '*', default = [], help = 'a list, which include residues whose results not added to the final score')\n\nargs = par.parse_args()\n\nargs.src = General.absPath(args.src)\n\n# hash to store designability\nabd = {} # smoothed designability, calculated in the same way as design score\nnn = {}\nnewhd = 'uniq_'+args.hd if args.uniq else args.hd\n\nbase = General.getBase( General.removePath(args.p) )\nconres = PDB.ConRes(args.p)\n\nfor res in conres:\n cid, resnum = res.getChid(), res.getResnum()\n if cid + ',' + str(resnum) in args.excluded: \n continue\n abd[ cid + ',' + str(resnum) ] = 0\n nn[ cid + ',' + str(resnum) ] = 0\n\nout = open(args.o, 'w')\n\nfor res in conres:\n cid, resnum, resname = res.getChid(), res.getResnum(), res.getResname()\n if cid + ',' + str(resnum) in args.excluded: \n continue\n if cid == ' ':\n fragpdb = base + '_-' + str(resnum) + '.pdb'\n else:\n fragpdb = base + '_' + cid + str(resnum) + '.pdb'\n seqf = args.hd + '_' + General.changeExt(fragpdb, 'seq')\n srcseqf = args.src + '/' + args.hs + '_' + General.changeExt(fragpdb, 'seq')\n \n if not os.path.isfile(srcseqf):\n continue\n if not os.path.isfile(seqf):\n freq = 0\n else:\n if args.uniq:\n Analyze.trimByUniqSeq(seqf, args.hd, 'uniq_'+args.hd)\n seqf = seqf.replace(args.hd, 'uniq_'+args.hd)\n col = Analyze.readColumn(seqf, 0, top = args.n) # bbrmsd is sorted\n if len(col) - 1 > 0:\n rmsdn, nact = float(col[-1]), len(col)-1\n srccol = Analyze.readColumn(srcseqf, 0)\n nsrc = Analyze.underRMSD(srccol, rmsdn)\n freq = min(float(nsrc)/nact, 1)\n else:\n freq = 0\n \n if args.raw:\n out.write(cid +','+str(resnum) + ':' +str('%.3f'%freq) + ' ')\n \n # add this score to all residue which has this residue as neighbor\n neighbors = designscore.neighborList(args.p, cid + ',' + str(resnum), os.path.dirname(args.src + '/' + fragpdb))\n \n for nb in neighbors:\n if nb in args.excluded:\n continue\n if args.raw:\n out.write(nb + ' ')\n else:\n abd[nb] += freq\n nn[nb] += 1\n if args.raw:\n out.write('\\n') \n \nif not args.raw: \n for res in conres:\n k = res.getChid()+','+str(res.getResnum())\n if k in args.excluded:\n continue\n if nn[k] != 0:\n abd[k] /= float(nn[k])\n out.write( k + ' : ' + str('%.3f' %abd[k]) + '\\n' ) \n \n \n\n\n","sub_path":"calculateDesignability.py","file_name":"calculateDesignability.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5737755","text":"#\n# Prelab2Ejercicio1.py\n# Descripcion: El siguiente programa calcula el valor absoluto de a\n# Autor:\n# Douglas Torres\n#\n# Ultima modificacion: 19/04/2014\n#\n#\n# Constantes\n# \t\ta : int // Numero al que se le va a calcular el valor absoluto \t\t\t\n# Variables\n# \t\tb: int // Variable que almacena el valor absoluto de a\n\n# Valores iniciales\n\na = int(input(\"De un valor para a y se le dara el valor absoluto. \"))\n\n\n# Precondicion\ntry:\n\tassert (True)\nexcept:\n\tprint (\"La precondicion no se cumple\")\n\tquit()\n\n# Calculo\n\nif a >= 0:\n\tb = a\nelse: \n\tb = -1*a\n\n# Postcondicion\ntry:\n\tassert (b == abs(a))\nexcept:\n\tprint(\"La postcondicion no se cumple\")\n\tquit()\n# Salida\nprint (\"El valor absoluto de \",a,\" es \",b)\n","sub_path":"Python/Laboratorio-2/Prelab2ejercicio1.py","file_name":"Prelab2ejercicio1.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"169875582","text":"# python3\r\n\r\nimport sys, threading\r\nsys.setrecursionlimit(10**7) # max depth of recursion\r\nthreading.stack_size(2**27) # new thread will get stack of such size\r\n\r\nclass TreeHeight:\r\n def __init__(self):\r\n self.n = 0\r\n self.parent = []\r\n self.cache = []\r\n\r\n def read(self):\r\n \"\"\"Reads data from standard input.\"\"\"\r\n self.n = int(sys.stdin.readline())\r\n self.parent = list(map(int, sys.stdin.readline().split()))\r\n self.cache = [0] * self.n\r\n\r\n def path_len(self, node_id):\r\n \"\"\"Returns path length from given node to the root.\"\"\"\r\n parent = self.parent[node_id]\r\n if parent == -1:\r\n return 1\r\n\r\n if self.cache[node_id]:\r\n return self.cache[node_id]\r\n\r\n self.cache[node_id] = 1 + self.path_len(self.parent[node_id])\r\n return self.cache[node_id]\r\n\r\n def compute_height(self):\r\n \"\"\"Computes the tree height.\"\"\"\r\n return max([self.path_len(i) for i in range(self.n)])\r\n\r\n #the following one is provided by the class, it's rather slow! Do not use it!!\r\n # def read(self):\r\n # self.n = int(sys.stdin.readline())\r\n # self.parent = list(map(int, sys.stdin.readline().split()))\r\n #\r\n # def compute_height(self):\r\n # # Replace this code with a faster implementation\r\n # maxHeight = 0\r\n # for vertex in range(self.n):\r\n # height = 0\r\n # i = vertex\r\n # while i != -1:\r\n # height += 1\r\n # i = self.parent[i]\r\n # maxHeight = max(maxHeight, height)\r\n # return maxHeight\r\n\r\ndef main():\r\n tree = TreeHeight()\r\n tree.read()\r\n print(tree.compute_height())\r\n\r\nthreading.Thread(target=main).start()\r\n","sub_path":"Programming-Assignment-1/tree_height/tree-height.py","file_name":"tree-height.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"631354401","text":"from sklearn.feature_selection import SelectKBest, f_regression\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\n\n\ndef constraints(val):\n return 0 if val < 0 else val\n\n\ndef generate_submission_regression(features, target, testing_data):\n scaler = StandardScaler()\n scaler.fit(features)\n features = scaler.transform(features)\n\n dim = SelectKBest(f_regression, k=10)\n features = dim.fit_transform(features, target)\n\n mlp = MLPRegressor(\n activation='relu', alpha=0.10000000000000001, batch_size='auto',\n beta_1=0.9, beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(100,), learning_rate='adaptive',\n learning_rate_init=0.10000000000000001, max_iter=200, momentum=0.9,\n nesterovs_momentum=True, power_t=0.5, random_state=None,\n shuffle=True, solver='sgd', tol=0.0001, validation_fraction=0.1,\n verbose=False, warm_start=False\n )\n\n mlp.fit(features, target)\n\n testing_data = scaler.transform(testing_data)\n testing_data = dim.transform(testing_data)\n\n predictions = mlp.predict(testing_data)\n predictions = list(map(constraints, predictions))\n pd.DataFrame(predictions).to_csv('kaggle/results.csv')\n","sub_path":"regression/code/regression_best.py","file_name":"regression_best.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"284626989","text":"# This is just a throwaway program to convert a specific\n# movie script I found on the internet into semantic XML.\n#\n# The script was an HTML file with the entire text in\n# a
 tag.  Not even broken into paragraphs (

).\n# Several lines were bolded with tags,\n# which always appeared on the left margin.\n#\n# I noted that different kinds of text were indented\n# by different amounts, so this script semantically \n# labels the parts via their indentation. \n# \n# The only ambiguous ones were scene titles and descriptive\n# text, and I used some context to discern which is which.\n# Scene titles are always after a blank line, and always\n# in ALL CAPS.\n\ndef noTag(ln):\n \"\"\"strips out B tags\"\"\"\n if ln.startswith(''):\n return noTag(ln[3:])\n if ln.startswith(''):\n return noTag(ln[4:])\n return ln\n\ndef firstLetter(ln):\n \"\"\"Gives the position for the first non-space\"\"\"\n n = 0\n for c in ln:\n if c != ' ':\n return n\n n = n + 1\n # nothing found\n return 0\n\ndef inCaps(ln):\n \"\"\"tells if a line has all its letters capitalized\"\"\"\n return all(c.isupper() for c in ln if c.isalpha())\n\nclass TagState:\n def __init__(self):\n self.tags = ['-1']\n self.__tag('script')\n self.llb = True # last line blank\n def __tag(self,t):\n self.tags.append(t)\n print(f'\\n<{t}>', end='')\n def __close(self):\n t = self.tags.pop()\n print(f'', end='')\n def closeTo(self,what):\n \"\"\"close up to but not including the named tag\"\"\"\n t = self.tags[-1]\n while t != what:\n self.__close()\n t = self.tags[-1]\n def newBlank(self):\n if self.tags[-1] == 'p':\n self.__close()\n def newScene(self,l):\n self.closeTo('script') \n self.__tag('scene')\n print(f'{l.strip()}')\n def newSpeaker(self,s):\n self.closeTo('scene')\n print()\n self.__tag('speaker')\n print(f'{s.strip()}',end='')\n def newSpeech(self,s):\n # close and open stage directions\n if self.tags[-1] == 'sdir':\n self.__close()\n # open a paragraph if we aren't in one\n if self.tags[-1] != 'p':\n self.__tag('p')\n print(s.strip())\n def newDesc(self,d):\n if self.tags[-1] != 'p':\n # we are not already in a paragraph...\n if self.tags[-1] != 'desc':\n # we aren't in a desc, so back up to scene level\n self.closeTo('scene')\n print()\n self.__tag('desc')\n self.__tag('p')\n print(d.strip())\n def newStageDir(self, sd):\n if self.tags[-1] != 'sdir':\n if self.tags[-1] == 'p': \n self.__close()\n self.__tag('sdir')\n print(sd.strip())\n def done(self):\n self.closeTo('-1')\n\n# ################\n# 0 blank\n# 15 description / SCENE\n# 25 speech\n# 30 stagedir\n# 37 speaker \n# ################\ndef procline(ts, l):\n \"\"\"process line 'l', using TagState 'ts'\"\"\"\n loc = firstLetter(l) \n if loc == 0:\n ts.newBlank() \n elif loc == 15 and ts.llb and inCaps(l):\n ts.newScene(l)\n elif loc == 15:\n ts.newDesc(l)\n elif loc == 25:\n ts.newSpeech(l)\n elif loc == 30:\n ts.newStageDir(l)\n elif loc == 37:\n ts.newSpeaker(l)\n else:\n print('**********')\n print(f'**********BAD LINE {loc} <{l}>')\n print('**********')\n ts.llb = (loc == 0) # remember if the line was blank\n \ndef process(fn):\n with open(fn,\"r\") as ifile:\n ts = TagState()\n for line in ifile:\n line = noTag(line)\n procline(ts,line)\n ts.done()\n\nprocess('Script.input')\n\n","sub_path":"random_exercises/movieScript/mscript.py","file_name":"mscript.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"122676831","text":"#!/usr/bin/env python\n\n\"\"\"\nRos node to make to the turtle draw a circle\n\"\"\"\nimport rospy\nfrom turtlesim.msg import Pose\nfrom geometry_msgs.msg import Twist\nimport math\n\n\nclass TurtleController(object):\n \"\"\"\n Controls the turtle to draw a circle\n \"\"\"\n def __init__(self):\n # turtle current position\n self.turtle_x = 0\n self.turtle_y = 0\n self.turtle_theta = 0\n\n # initial turtle coordinates\n self.initial_turtle_x = None\n self.initial_turtle_y = None\n self.initial_turtle_theta = None\n\n self.initial_coords_updated = False\n\n # turtle angular speed (rad/s)\n self.turtle_speed = 0.8\n # radius of the circle to be drawn\n self.circle_radius = 1\n\n rospy.init_node(\"node_turtle_revolve\", anonymous=False)\n\n # Subscriber for \"turtle1/pose\" to get current location and speed.\n rospy.Subscriber(\"/turtle1/pose\", Pose, self.update_pose)\n\n # Publisher for \"turtle1/cmd_vel\" to publish angular and linear vel.\n self.velocity_pub = rospy.Publisher(\"/turtle1/cmd_vel\", Twist,\n queue_size=10)\n\n def update_pose(self, msg):\n \"\"\"\n Callback funtion for topic \"/turtle1/pose\".\n Updates x, y and theta.\n \"\"\"\n self.turtle_x = msg.x\n self.turtle_y = msg.y\n self.turtle_theta = msg.theta\n\n if not self.initial_coords_updated:\n self.initial_turtle_x = msg.x\n self.initial_turtle_y = msg.y\n self.initial_turtle_theta = msg.theta\n self.initial_coords_updated = True\n\n def run(self):\n msg = Twist()\n current_distance = 0\n circumfence = 2 * math.pi * self.circle_radius\n\n msg.linear.x = self.turtle_speed\n msg.angular.z = self.turtle_speed\n\n rate = rospy.Rate(5)\n\n t0 = rospy.Time.now().to_sec()\n while not rospy.is_shutdown() and current_distance <= circumfence:\n # calculate the position and update\n rospy.loginfo(\"Peri: %f Distance Travelled: %f\",\n circumfence, current_distance)\n rospy.loginfo(\"x: %f y: %f theta: %f\", self.turtle_x,\n self.turtle_y, self.turtle_theta)\n\n self.velocity_pub.publish(msg)\n t1 = rospy.Time.now().to_sec()\n current_distance = (self.turtle_speed * self.circle_radius)\n current_distance *= (t1 - t0)\n\n rate.sleep()\n\t# calculate the position and update\n rospy.loginfo(\"Peri: %f Distance Travelled: %f\",\n circumfence, current_distance)\n rospy.loginfo(\"x: %f y: %f theta: %f\", self.turtle_x,\n self.turtle_y, self.turtle_theta)\n\n self.velocity_pub.publish(msg)\n t1 = rospy.Time.now().to_sec()\n current_distance = (self.turtle_speed * self.circle_radius)\n current_distance *= (t1 - t0)\n\n rate.sleep()\n\n msg.linear.x = 0\n msg.angular.z = 0\n self.velocity_pub.publish(msg)\n\n rospy.loginfo(\"Goal reached\")\n\n\nif __name__ == \"__main__\":\n try:\n controller = TurtleController()\n controller.run()\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"move_turtle_circle.py","file_name":"move_turtle_circle.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"578072357","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf.urls.i18n import i18n_patterns\n\nimport html5_appcache\nhtml5_appcache.autodiscover()\n\nfrom .sitemap import NewsSitemap\nfrom .views import NewsListView, NewsDetailView\n\nsitemaps = {\n 'news': NewsSitemap,\n}\n\nurlpatterns = i18n_patterns('',\n url(\"^list/$\", NewsListView.as_view(), name=\"news_list\"),\n url(\"^(?P\\d+)/live/$\", NewsListView.as_view(), name=\"news_detail_live\"),\n url(\"^(?P\\d+)/$\", NewsDetailView.as_view(), name=\"news_detail\"),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^sitemap.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),\n)\nif html5_appcache.settings.DJANGOCMS:\n urlpatterns += i18n_patterns('',\n url(r'^', include('cms.urls')),\n )","sub_path":"html5_appcache/test_utils/testapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"184911375","text":"# Configuration file where you can set the parameter default values and\n# descriptions. This is used by both the standalone RPCA and QIIME 2 RPCA sides\n# of DEICODE.\nDEFAULT_RANK = 3\nDEFAULT_MSC = 500\nDEFAULT_MFC = 10\nDEFAULT_ITERATIONS = 5\n\nDESC_RANK = (\"The underlying low-rank structure (suggested: 1 < rank < 10)\"\n \" [minimum 2]\")\nDESC_MSC = \"Minimum sum cutoff of sample across all features\"\nDESC_MFC = \"Minimum sum cutoff of features across all samples\"\nDESC_ITERATIONS = (\"The number of iterations to optimize the solution\"\n \" (suggested to be below 100; beware of overfitting)\"\n \" [minimum 1]\")\n","sub_path":"deicode/_rpca_defaults.py","file_name":"_rpca_defaults.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"376764482","text":"# /usr/bin/python\n# coding=utf-8\n\n'''\npython的pickle模块实现了基本的数据序列和反序列化。\n通过pickle模块的序列化操作我们能够将程序中运行的对象信息保存到文件中去,永久存储。\n通过pickle模块的反序列化操作,我们能够从文件中创建上一次程序保存的对象。\n基本接口:\n pickle.dump(obj, file, [,protocol])\n\n'''\n\nimport pickle\n# 使用pickle模块将数据对象保存到文件\ndata1 = {'a': [1, 2.0, 3, 4+6j],\n 'b': ('string', u'Unicode string'),\n 'c': None}\n\nlist1 = [1,2,3]\nlist1.append(list1)\n\n# 打开文件\noutput = open('data.pkl', 'wb')\n\n# Pickle dictionary using protocol 0.\npickle.dump(data1, output)\n# Pickle the list using the highest protocol available.\npickle.dump(list1, output, -1)\n# close\noutput.close()\n\n\n\n# 使用pickle模块从文件中重构python对象\nimport pprint,pickle\n# 打开文件\npkl_file = open('data.pkl', 'rb')\n\n\n\ndata3 = pickle.load(pkl_file)\npprint.pprint(data3)\nprint(type(data3))\nprint(data3)\n\n\ndata2 = pickle.load(pkl_file)\npprint.pprint(data2)\n\n# close\npkl_file.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python 3.x/输入与输出/pickle模块.py","file_name":"pickle模块.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"408948525","text":"from algorithm import Algorithm\nimport heapq\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\nWMAX = 1e3\ndx = [-1, 0, 1, 1, 1, 0, -1, -1]\ndy = [1, 1, 1, 0, -1, -1, -1, 0]\n\nclass UCS(Algorithm):\n def __init__(self, start_point, end_point, ENV):\n super().__init__(start_point, end_point, ENV)\n self.d = {}\n for i in range(self.E.xmax + 1):\n for j in range(self.E.ymax + 1):\n self.d[(i, j)] = WMAX\n self.fre[(i, j)] = 1\n self.trace[(i, j)] = -1\n\n self.d[self.start_point] = 0\n\n def output(self):\n if (self.trace[self.end_point] == -1):\n print(\"There is no path from {} to {}\".format(self.start_point, self.end_point))\n return []\n else:\n trace_path = []\n while (self.start_point != self.end_point):\n trace_path.append(self.end_point)\n self.end_point = self.trace[self.end_point]\n\n trace_path.append(self.start_point)\n return np.array(trace_path)\n\n def run(self,mode = 1):\n plt.title(\"UCS Algorithm\")\n time_start = time.time()\n pq = []\n heapq.heappush(pq, (0, self.start_point))\n while len(pq) > 0:\n w, p = heapq.heappop(pq)\n\n if (self.fre[p] == 0): continue\n if (p == self.end_point): break\n self.fre[p] = 0\n px, py = p\n for i in range(8):\n next_p = (px + dx[i], py + dy[i])\n w_move = 1\n if ((i == 0) | (i == 2) | (i == 4) | (i == 6)): #cross move\n w_move = np.sqrt(2)\n if ((self.E.is_valid_point(next_p) == True) & (self.E.is_valid_move(p, next_p))):\n if ((self.fre[next_p] == 1) & (self.d[next_p] > self.d[p] + w_move)):\n self.d[next_p] = self.d[p] + w_move\n heapq.heappush(pq, (self.d[next_p], next_p))\n self.trace[next_p] = p\n if mode :\n plt.plot((px, px + dx[i]), (py, py + dy[i]), color='r')\n plt.pause(0.00000001)\n\n self.cost = self.d[self.end_point]\n self.timeProcessing = (time.time() - time_start)\n\n if mode >= 0:\n path = self.output()\n if (len(path) > 0):\n self.E.draw_path(path)\n # plt.show()\n","sub_path":"ucs.py","file_name":"ucs.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"478691575","text":"import argparse\n\n\nclass CLI:\n \"\"\" Command Line Interface \"\"\"\n\n @staticmethod\n def get_args():\n \"\"\" Loading arguments given by user \"\"\"\n\n # Creating parser\n parser = argparse.ArgumentParser(prog='my_program',\n description='Working with movies DB.')\n parser.add_argument('--version', action='version', version='1.0.0')\n\n # Sorting records\n parser.add_argument('--sort_by', help='sort records', action='store', nargs=2,\n type=str)\n\n # Filtering records\n parser.add_argument('--filter_by', help='filter records', action='store',\n nargs='+', type=str,\n metavar=('column', 'value'))\n\n # Comparing records\n parser.add_argument('--compare_by', help='compare records', action='store',\n nargs='+', type=str,\n metavar=('compare_type', 'value'))\n\n # Highscores\n parser.add_argument('--highscores', help='show highscores', action='store_true')\n\n # Adding title to the database\n parser.add_argument('--add', help='add title to the database', action='store',\n nargs='+', type=str,\n metavar=('title1', 'title2...'))\n\n args = parser.parse_args()\n\n commands = {'sort_by': args.sort_by,\n 'filter_by': args.filter_by,\n 'compare_by': args.compare_by,\n 'highscores': args.highscores,\n 'add': args.add}\n\n return commands\n","sub_path":"modules/cli_interface.py","file_name":"cli_interface.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"28561123","text":"import argparse\nimport cv2\nimport glob\nimport logging\nimport numpy as np\nimport os\nimport torch\nfrom typing import Tuple\n\nimport detectron2\nfrom detectron2 import model_zoo\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\n\nlogging_fmt=\"%(asctime)s %(levelname)s: %(message)s\"\nlogging.basicConfig(format=logging_fmt)\nlogger=logging.getLogger(__name__)\nlogger.setLevel(level=logging.INFO)\n\ndevice=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef get_region_features_single(\n raw_image:np.ndarray,\n predictor:DefaultPredictor)->Tuple[torch.Tensor,torch.Tensor]:\n \"\"\"\n 一つの画像から特徴量を取得する。\n \"\"\"\n with torch.no_grad():\n raw_height,raw_width=raw_image.shape[:2]\n\n image=predictor.aug.get_transform(raw_image).apply_image(raw_image)\n image=torch.as_tensor(image.astype(\"float32\").transpose(2,0,1))\n inputs=[{\"image\":image,\"height\":raw_height,\"width\":raw_width}]\n images=predictor.model.preprocess_image(inputs)\n\n model=predictor.model\n\n #Backboneから特徴量を生成する。\n feature=model.backbone(images.tensor)\n #Proposalを生成する。\n proposals,_=model.proposal_generator(images,feature)\n instances,_=model.roi_heads(images,feature,proposals)\n #RoI特徴量を生成する。\n pred_boxes=[x.pred_boxes for x in instances]\n box_features=model.roi_heads.box_pooler(\n [feature[f] for f in feature if f!=\"p6\"],pred_boxes\n )\n box_features=model.roi_heads.box_head(box_features) #FC層の出力 (RoIの数,特徴量の次元数)\n\n box_coords=torch.empty(0,4).to(device) #RoIの座標\n for boxes in pred_boxes:\n box_coords=torch.cat([box_coords,boxes.tensor],dim=0)\n\n return box_coords,box_features\n\ndef main(args):\n image_dir:str=args.image_dir\n boxes_save_dir:str=args.boxes_save_dir\n features_save_dir:str=args.features_save_dir\n index_lower_bound:int=args.index_lower_bound\n index_upper_bound:int=args.index_upper_bound\n\n model_name=\"COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml\"\n cfg=get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(model_name))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST=0.7\n cfg.MODEL.DEVICE=str(device)\n cfg.MODEL.WEIGHTS=model_zoo.get_checkpoint_url(model_name)\n predictor=DefaultPredictor(cfg)\n\n os.makedirs(boxes_save_dir,exist_ok=True)\n os.makedirs(features_save_dir,exist_ok=True)\n\n pathname=os.path.join(image_dir,\"*.jpg\")\n files=glob.glob(pathname)\n for idx,file in enumerate(files):\n if idx=0 and idx>=index_upper_bound:\n break\n\n logger.info(\"{}\\t{}\".format(idx,file))\n\n image=cv2.imread(file)\n if image is None:\n logger.warn(\"画像を開けませんでした。\\t{}\".format(file))\n \n boxes,features=get_region_features_single(image,predictor)\n\n save_filename=os.path.basename(os.path.splitext(file)[0])+\".pt\"\n boxes_save_filepath=os.path.join(boxes_save_dir,save_filename)\n features_save_filepath=os.path.join(features_save_dir,save_filename)\n\n torch.save(boxes,boxes_save_filepath)\n torch.save(features,features_save_filepath)\n\nif __name__==\"__main__\":\n parser=argparse.ArgumentParser()\n parser.add_argument(\"--image_dir\",type=str)\n parser.add_argument(\"--boxes_save_dir\",type=str)\n parser.add_argument(\"--features_save_dir\",type=str)\n parser.add_argument(\"--index_lower_bound\",type=int,default=-1)\n parser.add_argument(\"--index_upper_bound\",type=int,default=-1)\n args=parser.parse_args()\n\n main(args)\n","sub_path":"stair_feature_extractor.py","file_name":"stair_feature_extractor.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"217225866","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport sys\nimport sqlite3\nimport datetime\nfrom flask import Flask, render_template, request, session, redirect, g\n\n\n#sys.path.append(\"\")\n\nusername = \"user\"\npassword = \"password\"\nDATABASE = \"blogdata\"\n\napp = Flask(__name__)\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n return db\n\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\n@app.context_processor\ndef cur_date():\n return {'cur_date': datetime.date.today()}\n\n\n\n@app.route('/', methods = ['GET'])\ndef index():\n cur = get_db().cursor()\n cur.execute('select TITLE, AUTHOR, CONTENT, DATETIME from POST')\n p_list = [dict(title = row[0], author = row[1], content = row[2], datetime = row[3]) for row in cur.fetchall()]\n return render_template('index.html', p_list = p_list)\n \n\n\n@app.route('/login', methods = ['GET', 'POST']) \ndef login():\n error = None\n if request.method == 'GET':\n return render_template('login.html')\n if request.method == 'POST':\n if request.form['username'] == 'user' and request.form['password'] == 'password':\n session['logged_in'] = True\n return redirect('/dashboard')\n else:\n flash('Invalid login')\n return render_template('login.html', error = error)\n \n \n\n@app.route('/dashboard', methods = ['GET'])\ndef dashboard():\n if session['logged_in'] == True:\n cur = get_db().cursor()\n cur.execute('select ID, TITLE from POST')\n d_list = [dict(id = row[0], title = row[1]) for row in cur.fetchall()]\n return render_template(\"dashboard.html\", d_list = d_list)\n else:\n return redirect('/login')\n\n \n \n@app.route('/post/add', methods = ['GET','POST'])\ndef addnewpost():\n error = None\n if session['logged_in'] == True:\n if request.method == 'GET':\n return render_template(\"addpost.html\")\n elif request.method == 'POST':\n cur = get_db().cursor()\n cur.execute('insert into POST (TITLE, AUTHOR, CONTENT, DATETIME) values (?,?,?,?)',(request.form['title'],request.form['author'], request.form['content'], request.form['datetime']))\n get_db().commit()\n return redirect('/dashboard')\n else:\n flash(\"Error adding Post\")\n return redirect('/post/add')\n else:\n return redirect('/login')\n \n\n \n@app.route('/post/', methods = ['GET','POST'])\ndef modifypost(id):\n error = None\n cur = get_db().cursor()\n if session['logged_in'] == True:\n if request.method == 'GET':\n cur.execute('select TITLE, AUTHOR, CONTENT from POST where ID = ?',(id))\n pp_list = [dict(title = row[0], author = row[1], content = row[2]) for row in cur.fetchall()]\n return render_template(\"modifypost.html\", pp_list = pp_list)\n elif request.method == 'POST':\n t = request.form['title']\n a = request.form['author']\n c = request.form['content']\n d = request.form['datetime']\n i = id\n cur.execute('update post SET TITLE = ?, AUTHOR = ?, CONTENT = ?, DATETIME = ? WHERE id = ?', (t, a, c, d, i))\n get_db().commit()\n return redirect('/dashboard')\n else:\n flash(\"Error updating Post\")\n return redirect('/post/')\n else:\n return redirect('/login')\n\n\n@app.route('/delete/', methods = ['POST'])\ndef delpost(id):\n cur = get_db().cursor()\n cur.execute('DELETE from POST where ID = ?',(id))\n get_db().commit()\n return redirect('/dashboard')\n \n \n \n\nif __name__ == '__main__':\n app.secret_key = 'jhntp92uvnp948yubq'\n app.run()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Final_project.py","file_name":"Final_project.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61906570","text":"#\n#\n#\n\n# This library contains direction metadata to better interface with the game.\nfrom hlt.positionals import Direction\n\nDEBUG_NONE = 0\nDEBUG_GAME = 1\nDEBUG_SHIP = 2\nDEBUG_NAV = 4\nDEBUG_NAV_METRICS = 8\nDEBUG_GAME_METRICS = 16\nDEBUG_COMMANDS = 32\nDEBUG_STATES = 64\nDEBUG_OUTPUT_GAME_METRICS = 128\n\nDEBUG_ALL = DEBUG_GAME | DEBUG_SHIP | DEBUG_NAV | DEBUG_NAV_METRICS | DEBUG_GAME_METRICS | DEBUG_COMMANDS | DEBUG_STATES | DEBUG_OUTPUT_GAME_METRICS\n\nDEBUG = DEBUG_NONE\n\n# convert a Direction obj back to a string\nDIRECTIONS = {\n \"n\": Direction.North,\n \"s\": Direction.South,\n \"e\": Direction.East,\n \"w\": Direction.West,\n \"o\": Direction.Still\n}\n\nMIN_LOITER = 4\nMAX_LOITER = 64\n\nMAX_SHIPS = 24\n\nSTATS_DIR = 'stats'","sub_path":"bots/v13/myutils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"449392863","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport filebrowser.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('products', '0009_auto_20150927_1611'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='choicecategory',\n name='abbrev',\n field=models.CharField(help_text=b'For prefixes on choice display names - typically 3 chars, uppercase', max_length=10, blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='choicecategory',\n name='name',\n field=models.CharField(max_length=80),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='image',\n name='image',\n field=filebrowser.fields.FileBrowseField(unique=True, max_length=200),\n preserve_default=True,\n ),\n ]\n","sub_path":"apps/products/migrations/0010_auto_20150927_1918.py","file_name":"0010_auto_20150927_1918.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"240250067","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 17/8/3 17:34\r\n# @Author : Wei Jian\r\n# @Email : jesseweifj@gmail.com\r\n# @File : empirical.py\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport datetime as dt\r\nimport scipy.stats as scs\r\nimport matplotlib.dates as mdates\r\n\r\n\r\ndef normality_test(arr):\r\n print(\"Skew of dataset %14.3f\" % scs.skew(arr))\r\n print(\"Skew test p-value %14.3f\" % scs.skewtest(arr)[1])\r\n print(\"Kurt of dataset %14.3f\" % scs.kurtosis(arr))\r\n print(\"Kurt test p-value %14.3f\" % scs.kurtosistest(arr)[1])\r\n print(\"Norm test p-value %14.3f\" % scs.normaltest(arr)[1])\r\n\r\n\r\nclass TimeSeries:\r\n \"\"\"\r\n This class is designed to evaluate the performance of given strategy.\r\n The main evaluation indicators are the same as the ones in module \"empyrical\" of package zipline.\r\n The main improvements of this class include\r\n 1. Don't need to care about the frequency of data.\r\n 2. Evaluation indicators can be annual indicators or semiannual ones if you call the class method \"set_annual_unit\"\r\n\r\n Note that you should provide one pd.Series object with datetime index to initialize this class.\r\n\r\n \"\"\"\r\n Annual_Unit = dt.timedelta(365)\r\n\r\n @classmethod\r\n def set_annual_unit(cls, timedelta):\r\n if not isinstance(timedelta, dt.timedelta):\r\n raise TypeError(\"The parameter should be datetime.timedelta! \")\r\n cls.Annual_Unit = timedelta\r\n\r\n def __init__(self, TimeSeries):\r\n \"\"\"\r\n Initialization. If warns were given, you may be unable to execute the member functions\r\n :param TimeSeries: pd.Series\r\n \"\"\"\r\n if not type(TimeSeries) is pd.Series:\r\n raise TypeError(\"The TimeSeries should be type pd.Series!\")\r\n self.TimeSeries = TimeSeries.dropna()\r\n if self.TimeSeries.index.duplicated().any():\r\n print(\"Warning: time index duplication!! \")\r\n self._len = len(self.TimeSeries)\r\n if self._len < 1:\r\n print(\"Warning: number of effective data is 0!! \")\r\n self._frequency = self._annual_factor()\r\n\r\n def _growth(self):\r\n return self.TimeSeries / self.TimeSeries[0]\r\n\r\n def _trunc_factor_series(self, factor_series):\r\n factor_series = factor_series.loc[self.TimeSeries.index]\r\n if factor_series.isnull().any():\r\n raise ValueError(\"Factor Series doesn't contain all required date points! \")\r\n return factor_series\r\n\r\n def _other_series(self, factor_series):\r\n temp = self._trunc_factor_series(factor_series)\r\n return type(self)(temp)\r\n\r\n def _factor_returns(self, factor_series):\r\n return self._other_series(factor_series).simple_returns()\r\n # factor_returns = self.pct_change(factor_series)\r\n # return factor_returns\r\n\r\n def _annual_factor(self):\r\n \"\"\"\r\n Returns annual factor which is equal to data frequency per year \r\n :return: float\r\n \"\"\"\r\n if self._len < 1:\r\n return 0\r\n num_years = (self.TimeSeries.index[-1] - self.TimeSeries.index[0]) / self.Annual_Unit\r\n if num_years == 0:\r\n return 1\r\n return self._len / num_years\r\n\r\n def simple_returns(self):\r\n \"\"\"\r\n Gives the simple returns for all time points\r\n :return: \r\n \"\"\"\r\n return self.pct_change(self.TimeSeries)\r\n\r\n def cum_returns(self):\r\n df_cum = self.simple_returns().add(1).cumprod()\r\n return df_cum - 1\r\n\r\n def max_drawback(self):\r\n \"\"\"\r\n Determines the maximum drawback of a strategy.\r\n :return: float; \r\n \"\"\"\r\n max_value = np.maximum.accumulate(self.TimeSeries)\r\n max_draw = np.min((self.TimeSeries - max_value) / max_value)\r\n return max_draw\r\n\r\n def annual_returns(self):\r\n \"\"\"\r\n :param TimeSeries: pd.Series\r\n :return: float; annual return rate\r\n \"\"\"\r\n if self._len < 1:\r\n return np.nan\r\n # len / frequency is equal to the number of years\r\n return (self.TimeSeries[-1] / self.TimeSeries[0]) ** (self._frequency / self._len) - 1\r\n\r\n def annual_volatility(self):\r\n \"\"\"\r\n :param TimeSeries: pd.Series\r\n :return: float; annual volatility rate\r\n \"\"\"\r\n if self._len < 1:\r\n return np.nan\r\n r = self.simple_returns()\r\n return r.std() * np.sqrt(self._frequency)\r\n\r\n def accumulate_return(self):\r\n \"\"\"\r\n Returns the total return rate of whole investment period\r\n :return: float;\r\n \"\"\"\r\n if self._len < 1:\r\n return np.nan\r\n return (self.TimeSeries.iloc[-1] / self.TimeSeries.iloc[0]) - 1\r\n\r\n def sharpe_ratio(self, risk_free=0.0):\r\n \"\"\"\r\n Determines the Sharpe ratio of a strategy.\r\n :param risk_free: float or pd.Series; annual rate free rate\r\n :return: float;\r\n \"\"\"\r\n if self._len < 2:\r\n return np.nan\r\n rf = (1 + risk_free) ** (1 / self._frequency) - 1\r\n ret = self._adjust_returns(self.simple_returns(), rf).mean()\r\n vol = self.simple_returns().std()\r\n if vol == 0:\r\n return np.nan\r\n return ret / vol * np.sqrt(self._frequency)\r\n\r\n def sortino_ratio(self, required_return=0.0, _downside_risk=None):\r\n \"\"\"\r\n Determines the Sortino ratio of a strategy.\r\n :param required_return: float\r\n Required annual return rate\r\n :param _downside_risk: float\r\n The downside risk of the given inputs\r\n :return: float\r\n Sortino ratio\r\n \"\"\"\r\n if self._len < 2:\r\n return np.nan\r\n adj_returns = self._adjust_returns(self.simple_returns(), required_return)\r\n mu = np.nanmean(adj_returns, axis=0)\r\n dsr = (_downside_risk if _downside_risk is not None\r\n else self.downside_risk(required_return))\r\n sortino = mu / dsr\r\n return sortino * self._frequency\r\n\r\n def downside_risk(self, required_return=0.0):\r\n \"\"\"\r\n Determines the downside deviation below a threshold\r\n :param required_return: float\r\n Required annual return rate\r\n :return: float\r\n Downside deviation\r\n \"\"\"\r\n if self._len < 1:\r\n return np.nan\r\n returns = self.simple_returns()\r\n downside_diff = self._adjust_returns(returns, required_return)\r\n mask = downside_diff > 0\r\n downside_diff[mask] = 0\r\n squares = np.square(downside_diff)\r\n mean_squares = np.nanmean(squares, axis=0)\r\n dside_risk = np.sqrt(mean_squares) * np.sqrt(self._frequency)\r\n\r\n return dside_risk\r\n\r\n def excess_sharpe(self, factor_series):\r\n \"\"\"\r\n Determines the Excess Sharpe of a strategy.\r\n The excess Sharpe is a simplified Information Ratio that uses\r\n tracking error rather than \"active risk\" as the denominator.\r\n :param factor_series: pd.Series\r\n Benchmark value series which should be at least including all date points of given time series \r\n :return: float\r\n The excess sharpe.\r\n \"\"\"\r\n if self._len < 2:\r\n return np.nan\r\n returns = self.simple_returns()\r\n factor_returns = self._factor_returns(factor_series)\r\n\r\n active_return = self._adjust_returns(returns, factor_returns)\r\n tracking_error = np.nanstd(active_return, ddof=1)\r\n\r\n if tracking_error == 0 or tracking_error is np.nan:\r\n return np.nan\r\n return np.nanmean(active_return) / tracking_error\r\n\r\n def aggregate_returns(self, convert_to):\r\n \"\"\"\r\n Aggregates returns by week, month, or year.\r\n :param convert_to: str\r\n Can be 'W', 'M', or 'Y' \r\n :return: pd.Series\r\n Daily returns of the strategy, noncumulative.\r\n \"\"\"\r\n\r\n def cumulate_returns(TimeSeries):\r\n return TimeSeries.add(1).prod() - 1\r\n\r\n if convert_to.upper() == \"W\":\r\n grouping = [lambda x: x.year, lambda x: x.isocalendar()[1]]\r\n elif convert_to.upper() == \"M\":\r\n grouping = [lambda x: x.year, lambda x: x.month]\r\n elif convert_to.upper() == \"Y\":\r\n grouping = [lambda x: x.year]\r\n else:\r\n raise ValueError(\r\n 'convert_to must be {}, {} or {}'.format(\"W: WEEKLY\", \"M: MONTHLY\", \"Y: YEARLY\")\r\n )\r\n\r\n return self.simple_returns().groupby(grouping).apply(cumulate_returns)\r\n\r\n def beta(self, factor_series, risk_free=0.0):\r\n \"\"\"\r\n Determines the beta between strategy returns and factor returns\r\n :param factor_series: pd.Series\r\n :param risk_free: float or pd.Series\r\n Annual risk free rate\r\n :return: float\r\n Beta\r\n \"\"\"\r\n if self._len < 2 or len(factor_series) < 2:\r\n return np.nan\r\n rf = (1 + risk_free) ** (1 / self._frequency) - 1\r\n adj_returns = self._adjust_returns(self.simple_returns(), rf)\r\n adj_factor_returns = self._adjust_returns(self._factor_returns(factor_series), rf)\r\n\r\n return self._beta(adj_returns, adj_factor_returns)\r\n\r\n def alpha(self, factor_series, risk_free=0.0, _beta=None):\r\n \"\"\"\r\n Determines the alpha between strategy returns and factor returns\r\n :param factor_series: pd.Series\r\n :param risk_free: float or pd.Series\r\n Annual risk free rate\r\n :return: float\r\n Annual alpha\r\n \"\"\"\r\n if self._len < 2:\r\n return np.nan\r\n if _beta is None:\r\n _beta = self.beta(factor_series, risk_free)\r\n\r\n rf = (1 + risk_free) ** (1 / self._frequency) - 1\r\n factor_returns = self._factor_returns(factor_series)\r\n adj_returns = self._adjust_returns(self.simple_returns(), rf)\r\n adj_factor_returns = self._adjust_returns(factor_returns, rf)\r\n\r\n alpha_series = adj_returns - (_beta * adj_factor_returns)\r\n\r\n return np.nanmean(alpha_series) * self._frequency\r\n\r\n def value_at_risk(self, cutoff=0.05):\r\n \"\"\"\r\n Value at risk (VaR).\r\n :param cutoff: float, optional\r\n Decimal representing the percentage cutoff for the bottom percentile of\r\n returns. Defaults to 0.05.\r\n :return: float\r\n The VaR value.\r\n \"\"\"\r\n return np.percentile(self.simple_returns(), 100 * cutoff)\r\n\r\n def conditional_value_at_risk(self, cutoff=0.05):\r\n \"\"\"\r\n Conditional value at risk (CVaR).\r\n :param cutoff: float, optional\r\n Decimal representing the percentage cutoff for the bottom percentile of\r\n returns. Defaults to 0.05.\r\n :return: float\r\n The CVaR value.\r\n \"\"\"\r\n cutoff_index = int((self._len - 1) * cutoff)\r\n return np.mean(np.partition(self.simple_returns(), cutoff_index)[:cutoff_index + 1])\r\n\r\n def draw_growth(self, factor_series=None):\r\n \"\"\"\r\n Draws the strategy value growth path. \r\n If factors_series was provided, function would draw them both.\r\n :param factor_series: pd.Series\r\n :return: None\r\n \"\"\"\r\n fz = 12\r\n fig = plt.figure()\r\n growth = self._growth()\r\n ax = growth.plot(kind=\"line\", linewidth=1, c='b')\r\n plt.title(r\"$Strategy\\ Growth$\", fontsize=fz)\r\n plt.ylabel(r\"$Growth$\", fontsize=fz)\r\n if not factor_series is None:\r\n factor_growth = self._other_series(factor_series)._growth()\r\n plt.plot(factor_growth, c='r')\r\n f_name = factor_growth.name\r\n if f_name is None:\r\n f_name = r\"Factor\"\r\n plt.legend([\"Strategy\", f_name], loc=\"best\")\r\n plt.xlabel(r\"$Date$\", fontsize=fz)\r\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y%m%d'))\r\n plt.gcf().autofmt_xdate()\r\n plt.show()\r\n return fig\r\n\r\n def draw_simple_returns(self):\r\n \"\"\"\r\n Draws the distribution of the simple returns\r\n :return: None\r\n \"\"\"\r\n fz = 12\r\n fig = plt.figure()\r\n ret = self.simple_returns()\r\n ret.plot(kind=\"kde\", c=\"b\", linewidth=2)\r\n n, bins, patches = plt.hist(ret, 20, normed=1, facecolor='g', alpha=0.75)\r\n plt.xlabel(r'$Return\\ Rate$', fontsize=fz)\r\n plt.ylabel(r'$Probability$', fontsize=fz)\r\n plt.title(r'$Histogram\\ of\\ Return\\ Rate$', fontsize=fz)\r\n plt.text(bins[0], max(n),\r\n r'$\\mu={:.4f}\\%, \\ \\sigma={:.4f}\\%$'.format(ret.mean() * 100, ret.std() * 100),\r\n fontsize=fz)\r\n plt.grid(True)\r\n plt.show()\r\n return fig\r\n\r\n def draw_aggregate_returns(self, convert_to):\r\n \"\"\"\r\n Draws aggregate returns\r\n :param convert_to: str\r\n Can be 'W', 'M', or 'Y' \r\n :return: None\r\n \"\"\"\r\n convert_to = convert_to.upper()\r\n Flag = {\"W\": \"WEEKLY\", \"M\": \"MONTHLY\", \"Y\": \"YEARLY\"}\r\n\r\n agg = self.aggregate_returns(convert_to)\r\n\r\n fig = plt.figure()\r\n ax = agg.plot(kind=\"bar\")\r\n labels = agg.index.values\r\n x_ticks = ax.get_xticks()\r\n N = len(x_ticks)\r\n step = max(int(N / 12), 1)\r\n\r\n # old_dates=pd.Series(labels).map(lambda x:dt.datetime(x[0],x[1],1))\r\n # agg.index=old_dates\r\n # plt.figure()\r\n # ax = plt.bar(left=agg.index,height=agg.values,width=20)\r\n # plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y%m%d'))\r\n # plt.gcf().autofmt_xdate()\r\n\r\n new_x_ticks = x_ticks[range(0, N, step)]\r\n new_x_labels = labels[range(0, N, step)]\r\n _ = ax.xaxis.set_ticks(new_x_ticks)\r\n _ = ax.set_xticklabels(new_x_labels)\r\n\r\n y_ticks = ax.get_yticks()\r\n new_y_ticks = map(lambda x: \"{:.2%}\".format(x), y_ticks)\r\n _ = ax.set_yticklabels(new_y_ticks)\r\n\r\n fig.autofmt_xdate()\r\n _ = ax.set_title(Flag[convert_to].title() + \" Aggregate Returns\")\r\n plt.show()\r\n return fig\r\n\r\n def draw_up_down_compare(self, factor_series, risk_free=0.0):\r\n \"\"\"\r\n Draws the returns scatter plot and beta line from up and down regression\r\n :param factor_series: pd.Series\r\n :param risk_free: float or pd.Series \r\n :return: None\r\n \"\"\"\r\n\r\n def plot_poly(x, y, **kwargs):\r\n p = np.polyfit(x, y, 1)\r\n plt.plot(x, np.polyval(p, x), '-', **kwargs)\r\n return p\r\n\r\n fig = plt.figure()\r\n\r\n fz = 12\r\n rf = (1 + risk_free) ** (1 / self._frequency) - 1\r\n adj_factor_returns = self._adjust_returns(self._factor_returns(factor_series), rf)\r\n adj_returns = self._adjust_returns(self.simple_returns(), rf)\r\n idx = np.argsort(adj_factor_returns)\r\n x = adj_factor_returns[idx]\r\n y = adj_returns[idx]\r\n\r\n plt.plot(x, y, \"o\", alpha=0.75, c=\"k\", markersize=4, label=\"Returns\")\r\n idx_down = x < 0\r\n x_down, y_down = x[idx_down], y[idx_down]\r\n idx_up = x > 0\r\n x_up, y_up = x[idx_up], y[idx_up]\r\n\r\n p_down = plot_poly(x_down, y_down, c=\"g\", linewidth=2, label=\"Down\")\r\n p_up = plot_poly(x_up, y_up, c=\"r\", linewidth=2, label=\"Up\")\r\n plt.legend(loc=\"best\")\r\n plt.xlabel(\"Factor returns\", fontsize=fz)\r\n plt.ylabel(\"Strategy returns\", fontsize=fz)\r\n plt.title(\"Returns against Factor Returns\", fontsize=fz)\r\n plt.text(x[-1], np.polyval(p_up, x[-1]), r'$\\beta_{up}=%.3f$' % (p_up[0]), fontsize=fz)\r\n plt.text(x[0], np.polyval(p_down, x[0]), r'$\\beta_{down}=%.3f$' % (p_down[0]), fontsize=fz)\r\n\r\n return fig\r\n\r\n def indicators(self):\r\n from collections import OrderedDict\r\n ind_dict = OrderedDict(\r\n [\r\n ('total return', self.accumulate_return()),\r\n ('annual return', self.annual_returns()),\r\n ('annual vol', self.annual_volatility()),\r\n ('sharpe ratio', self.sharpe_ratio()),\r\n ('max drawback', self.max_drawback()),\r\n ('VaR', self.value_at_risk()),\r\n ]\r\n )\r\n # ind_series=pd.Series(ind_dict)\r\n\r\n return ind_dict\r\n\r\n def describe(self, factor_series=None, risk_free=0.0):\r\n \"\"\"\r\n Prints the statistics of given strategy.\r\n :param factor_series: pd.Series \r\n :param risk_free: float or pd.Series\r\n :return: None \r\n \"\"\"\r\n\r\n ind_dict = self.indicators()\r\n print(\"======: Strategy profile: ======\")\r\n fmt = \"{:>14s} {:15.2%}\"\r\n for key, value in ind_dict.items():\r\n print (fmt.format(key, value))\r\n\r\n # print(\"======: Strategy profile: ======\")\r\n # print(\"%14s %15.2f%%\" % ('total return', 100 * self.accumulate_return()))\r\n # print(\"%14s %15.2f%%\" % ('annual return', 100 * self.annual_returns()))\r\n # print(\"%14s %15.2f%%\" % ('annual vol', 100 * self.annual_volatility()))\r\n # print(\"%14s %15.5f\" % ('sharpe ratio', self.sharpe_ratio()))\r\n # print(\"%14s %15.2f%%\" % ('max drawback', 100 * self.max_drawback()))\r\n # print(\"%14s %15.2f%%\" % ('VaR', 100 * self.value_at_risk()))\r\n\r\n if not factor_series is None:\r\n print()\r\n print(\"======: Compare with factor :======\")\r\n\r\n print(\"%14s %15.2f%%\" % ('factor return', 100 * self._other_series(factor_series).accumulate_return()))\r\n print(\"%14s %15.5f\" % ('beta', self.beta(factor_series, risk_free)))\r\n print(\"%14s %15.5f\" % ('alpha', self.alpha(factor_series, risk_free)))\r\n\r\n print(\"\")\r\n ret = self.simple_returns()\r\n print(\"======: Simple return rate :======\")\r\n self.print_statistics(ret)\r\n\r\n def up_capture(self, factor_series, **kwargs):\r\n \"\"\"\r\n Calculates the performance of strategy when the factor goes up\r\n :param factor_series: pd.Series \r\n :param kwargs: \r\n :return: float\r\n \"\"\"\r\n factor_returns = self._other_series(factor_series).simple_returns()\r\n returns = self.simple_returns()\r\n return self._trend(returns, factor_returns, self._capture, up=True, **kwargs)\r\n\r\n def down_capture(self, factor_series, **kwargs):\r\n \"\"\"\r\n Calculates the performance of strategy when the factor goes down\r\n :param factor_series: pd.Series \r\n :param kwargs: \r\n :return: float\r\n \"\"\"\r\n factor_returns = self._other_series(factor_series).simple_returns()\r\n returns = self.simple_returns()\r\n return self._trend(returns, factor_returns, self._capture, up=False, **kwargs)\r\n\r\n def up_alpha_beta(self, factor_series, risk_free=0.0):\r\n \"\"\"\r\n Determines the alpha and beta when factor goes up\r\n :param factor_series: pd.Series\r\n :param risk_free: float or pd.Series\r\n Annual risk free rate\r\n :return: tuple\r\n (Annual alpha, beta)\r\n \"\"\"\r\n rf = (1 + risk_free) ** (1 / self._frequency) - 1\r\n adj_factor_returns = self._adjust_returns(self._factor_returns(factor_series), rf)\r\n adj_returns = self._adjust_returns(self.simple_returns(), rf)\r\n beta = self._trend(adj_returns, adj_factor_returns, self._beta, up=True)\r\n alpha = np.mean(adj_returns - adj_factor_returns * beta) * self._frequency\r\n return (alpha, beta)\r\n\r\n def down_alpha_beta(self, factor_series, risk_free=0.0):\r\n \"\"\"\r\n Determines the alpha and beta when factor goes down\r\n :param factor_series: pd.Series\r\n :param risk_free: float or pd.Series\r\n Annual risk free rate\r\n :return: tuple\r\n (Annual alpha, beta)\r\n \"\"\"\r\n rf = (1 + risk_free) ** (1 / self._frequency) - 1\r\n adj_factor_returns = self._adjust_returns(self._factor_returns(factor_series), rf)\r\n adj_returns = self._adjust_returns(self.simple_returns(), rf)\r\n beta = self._trend(adj_returns, adj_factor_returns, self._beta, up=False)\r\n alpha = np.mean(adj_returns - adj_factor_returns * beta) * self._frequency\r\n return (alpha, beta)\r\n\r\n @staticmethod\r\n def _trend(returns, factor_returns, func, up, **kwargs):\r\n \"\"\"\r\n Calls function func when the factor goes up or down \r\n :param returns: pd.Series\r\n :param factor_returns: pd.Series\r\n :param func: callable object\r\n :param up: boolean\r\n :param kwargs: \r\n :return: depends on the callable object\r\n \"\"\"\r\n if up:\r\n idx = factor_returns > 0\r\n else:\r\n idx = factor_returns < 0\r\n returns = returns[idx]\r\n factor_returns = factor_returns[idx]\r\n return func(returns, factor_returns, **kwargs)\r\n\r\n @staticmethod\r\n def pct_change(TimeSeries):\r\n \"\"\"\r\n Gives the return rate. Default simple return rate. (Log-form to be added)\r\n :param TimeSeries: pd.Series\r\n :return: pd.Series\r\n \"\"\"\r\n return (TimeSeries.diff() / TimeSeries.shift()).dropna()\r\n\r\n @staticmethod\r\n def _adjust_returns(returns, adjust_factors=0.0):\r\n \"\"\"\r\n Adjust the returns\r\n :param returns: pd.Series\r\n :param adjust_factors: float or pd.Series\r\n :return: pd.Series\r\n \"\"\"\r\n if type(adjust_factors) in (int, float):\r\n return returns - adjust_factors\r\n # if two series didn't match, error would be raised\r\n return returns - adjust_factors[returns.index]\r\n\r\n @staticmethod\r\n def _capture(returns, factor_returns):\r\n \"\"\"\r\n reference: http://www.investorwords.com/11536/capture_ratio.html\r\n However, I changed the definition since original version is not so obvious\r\n Recommend to use up_alpha_beta and down_alpha_beta to measure it.\r\n :param returns: \r\n :param factor_returns: \r\n :return: \r\n \"\"\"\r\n return returns.mean() / factor_returns.mean()\r\n\r\n @staticmethod\r\n def _beta(adj_returns, adj_factor_returns):\r\n \"\"\"\r\n Calculate the beta\r\n :param adj_returns: pd.Series \r\n :param adj_factor_returns: pd.Series\r\n :return: float\r\n \"\"\"\r\n joint = np.vstack([adj_returns, adj_factor_returns])\r\n joint = joint[:, ~np.isnan(joint).any(axis=0)]\r\n\r\n if joint.shape[1] < 2:\r\n return np.nan\r\n cov = np.cov(joint, ddof=0)\r\n if np.absolute(cov[1, 1]) < 1.0e-30:\r\n return np.nan\r\n return cov[0, 1] / cov[1, 1]\r\n\r\n @staticmethod\r\n def print_statistics(array):\r\n sta = scs.describe(array)\r\n # print(\"{:14s} {:15d}\".format(\"size\",sta[0]))\r\n # print(\"{:14s} {:15.3%}\".format(\"min\",sta[1][0]))\r\n # print(\"{:14s} {:15.3%}\".format(\"max\",sta[1][1]))\r\n # print(\"{:14s} {:15.3%}\".format(\"mean\",sta[2]))\r\n # print(\"{:14s} {:15.3%}\".format(\"std\",np.sqrt(sta[3])))\r\n # print(\"{:14s} {:15.3f}\".format(\"skew\",sta[4]))\r\n # print(\"{:14s} {:15.3f}\".format(\"kurtosis\",sta[5]))\r\n\r\n print(\"%14s %15d\" % ('size', sta[0]))\r\n print(\"%14s %15.5f%%\" % ('min', 100 * sta[1][0]))\r\n print(\"%14s %15.5f%%\" % ('max', 100 * sta[1][1]))\r\n print(\"%14s %15.5f%%\" % ('mean', 100 * sta[2]))\r\n print(\"%14s %15.5f%%\" % ('std', 100 * np.sqrt(sta[3])))\r\n print(\"%14s %15.5f\" % ('skew', sta[4]))\r\n print(\"%14s %15.5f\" % ('kurtosis', sta[5]))\r\n\r\n\r\ndef test_using():\r\n import pandas_datareader.data as web\r\n import empyrical\r\n import datetime\r\n start = datetime.datetime(2010, 1, 1)\r\n end = datetime.datetime(2013, 1, 27)\r\n\r\n data = web.DataReader(\"F\", 'google', start, end)\r\n\r\n factor_series = web.DataReader(\"DJIA\", 'fred', start, end).iloc[:, 0] # 市场指数\r\n risk_free = 0.0 # 年化无风险利率\r\n\r\n TimeSeries = data.iloc[:, 0] # 我们策略的净值数据\r\n TimeSeries.dropna(inplace=True)\r\n\r\n t = TimeSeries(TimeSeries)\r\n t.simple_returns()\r\n t.accumulate_return()\r\n t.cum_returns()\r\n t.aggregate_returns(\"W\")\r\n t.max_drawback()\r\n t.annual_returns()\r\n t.annual_volatility()\r\n t.sharpe_ratio()\r\n t.alpha(factor_series, risk_free)\r\n t.beta(factor_series, risk_free)\r\n\r\n t.up_alpha_beta(factor_series, risk_free)\r\n t.down_alpha_beta(factor_series, risk_free)\r\n\r\n # draw pictures\r\n t.draw_aggregate_returns(convert_to=\"M\")\r\n t.draw_growth(factor_series)\r\n t.draw_simple_returns()\r\n t.draw_up_down_compare(factor_series, risk_free)\r\n\r\n # print statistics\r\n t.describe(factor_series=factor_series, risk_free=risk_free)\r\n\r\n\r\ndef my_unit_test():\r\n import pandas_datareader.data as web\r\n import empyrical\r\n import datetime\r\n start = datetime.datetime(2010, 1, 1)\r\n end = datetime.datetime(2013, 1, 27)\r\n\r\n data = web.DataReader(\"F\", 'google', start, end)\r\n\r\n factor_series = web.DataReader(\"DJIA\", 'fred', start, end).iloc[:, 0]\r\n TimeSeries = data.iloc[:, 0]\r\n TimeSeries.dropna(inplace=True)\r\n\r\n r = (TimeSeries.diff() / TimeSeries.shift()).dropna()\r\n t = TimeSeries(TimeSeries) ##\r\n # self = t\r\n\r\n factor_returns = t._factor_returns(factor_series)\r\n general_decimal = 8\r\n annual_decimal = 3\r\n risk_free = 0\r\n # simple return, total return\r\n np.testing.assert_almost_equal(np.sum(t.simple_returns() - r), 0.0, decimal=general_decimal)\r\n np.testing.assert_almost_equal(empyrical.cum_returns_final(r), t.accumulate_return(), decimal=general_decimal)\r\n\r\n # accumulate return, aggregate return\r\n np.testing.assert_almost_equal((t.cum_returns()).values, empyrical.cum_returns(r).values, decimal=general_decimal)\r\n np.testing.assert_almost_equal(t.aggregate_returns(\"W\").values, empyrical.aggregate_returns(r, \"weekly\").values,\r\n decimal=general_decimal)\r\n\r\n # max drawback, downside_risk\r\n np.testing.assert_almost_equal(t.max_drawback(), empyrical.max_drawdown(r), decimal=general_decimal)\r\n np.testing.assert_almost_equal(t.downside_risk(), empyrical.downside_risk(r), decimal=annual_decimal)\r\n\r\n # annual return, annual volatility\r\n np.testing.assert_almost_equal(t.annual_returns(), empyrical.annual_return(r), decimal=annual_decimal)\r\n np.testing.assert_almost_equal(t.annual_volatility(), empyrical.annual_volatility(r), decimal=annual_decimal)\r\n\r\n # sharpe ratio, sortino ratio\r\n np.testing.assert_almost_equal(t.sharpe_ratio(), empyrical.sharpe_ratio(r), decimal=annual_decimal)\r\n np.testing.assert_almost_equal(t.sortino_ratio(), empyrical.sortino_ratio(r), decimal=annual_decimal)\r\n\r\n # alpha, beta\r\n np.testing.assert_almost_equal(t.beta(factor_series), empyrical.beta(r, factor_returns), decimal=annual_decimal)\r\n np.testing.assert_almost_equal(t.alpha(factor_series), empyrical.alpha(r, factor_returns), decimal=annual_decimal)\r\n\r\n # draw pictures\r\n t.draw_aggregate_returns(convert_to=\"M\")\r\n t.draw_growth(factor_series)\r\n t.draw_simple_returns()\r\n t.draw_up_down_compare(factor_series, risk_free)\r\n\r\n # print statistics\r\n t.describe(factor_series=factor_series, risk_free=risk_free)\r\n\r\n # up capture\r\n empyrical.up_capture(r, factor_returns)\r\n empyrical.down_capture(r, factor_returns)\r\n empyrical.capture(r, factor_returns)\r\n t.up_capture(factor_series)\r\n t.down_capture(factor_series)\r\n t._capture(t.simple_returns(), t._other_series(factor_series).simple_returns())\r\n\r\n t.up_alpha_beta(factor_series, risk_free)\r\n t.down_alpha_beta(factor_series, risk_free)\r\n","sub_path":"Backtest_Framework/utils/empirical.py","file_name":"empirical.py","file_ext":"py","file_size_in_byte":27714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"210136389","text":"y_mpg = dfcars.mpg\nx_wt = dfcars.wt\nx_hp = dfcars.hp\n\nfig_wt, ax_wt = plt.subplots(1,1, figsize=(10,6))\nax_wt.scatter(x_wt, y_mpg)\nax_wt.set_xlabel(r'Car Weight')\nax_wt.set_ylabel(r'Car MPG')\n\nfig_hp, ax_hp = plt.subplots(1,1, figsize=(10,6))\nax_hp.scatter(x_hp, y_mpg)\nax_hp.set_xlabel(r'Car HP')\nax_hp.set_ylabel(r'Car MPG')\n","sub_path":"content/labs/lab03/notebook/solutions/cars_simple_EDA.py","file_name":"cars_simple_EDA.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"283845448","text":"\"\"\"\nThis File contains the custom errors\n\"\"\"\nfrom datahub_lib.framework.job_status_writer import JobStatusWriter\n\n\nclass JobError(RuntimeError):\n \"\"\"\n This RuntimeError based class is used for throwing exceptions\n \"\"\"\n INTERNAL_ERROR_SHORT = \"500\"\n INTERNAL_ERROR = \"Job internal error\"\n\n\n def __init__(self, value: object, code: str, is_transient):\n self.value = value or self.INTERNAL_ERROR\n self.code = str(code) or self.INTERNAL_ERROR_SHORT\n self.is_transient = is_transient\n\n # __str__ is to print() the value\n\n\n def __str__(self):\n return repr(self.value)\n\n\n @staticmethod\n def write_to_status_file(err: Exception, blob_url: str):\n \"\"\"\n Writes given error info from exception to status file at blob_url.\n If given exception is of JobError type then richer info is written\n Otherwise only basic info\n Returns the status file contents as dict\n \"\"\"\n writer = JobStatusWriter(blob_url)\n if isinstance(err, JobError):\n writer.set_error(err.code, err.value, err.is_transient)\n else:\n # Assume all unknown errors to be transient; err on the side of hope\n writer.set_error(JobError.INTERNAL_ERROR_SHORT,\n JobError.INTERNAL_ERROR,\n is_transient=True)\n return writer.flush()\n","sub_path":"datahub_lib/framework/job_errory.py","file_name":"job_errory.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"513644626","text":"# Node class\nclass Node:\n \n # Constructor to initialize the node object\n def __init__(self, data):\n self.data = data\n self.next = None\n \nclass LinkedList:\n def __init__(self):\n self.head = None\n self.size = 0\n \n def push(self,value):\n node = Node(value)\n if not self.head:\n self.head = node\n else:\n node.next = self.head\n self.head = node\n self.size += 1\n \n def search(self, value):\n current = self.head\n while current != None:\n if current.data == value:\n return current\n else:\n current = current.next\n raise Exception('Value does not exist')\n \n def print_me(self):\n current = self.head\n while current != None:\n print(current.data)\n current = current.next\n \n def reverse(self):\n current = self.head\n reversed_head = None\n forward_head = current\n \n while current != None:\n forward_head = current.next\n current.next = reversed_head\n reversed_head = current\n current = forward_head\n \n self.head = reversed_head","sub_path":"preparation/foundation_blocks_1/lists_stack_queue/reverse-linked-list.py","file_name":"reverse-linked-list.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"415330244","text":"import codecs\nimport xml.etree.ElementTree as etree\n\ntree = etree.parse('speakers.xml')\nroot = tree.getroot()\nprint(root)\n\noutputpath = 'Z:\\\\_speakers\\\\drafts\\\\'\nfilename = None\ntitle = None\ntwitter = None\nwww = None\nemail = None\npagecontent = None\n\n\nfor speaker in root:\n # We're looking at the individual speakers now.\n for child in speaker:\n # This is the properties of each speaker:\n # Email, Phone, Name, etc.\n if child.tag == 'filename':\n filename = child.text\n if child.tag == 'title':\n title = child.tag + ': \\\"' + child.text + '\\\"'\n if child.tag == 'twitter':\n if child.text is not None:\n twitter = child.tag + ': \\\"https://twitter.com/' + child.text + '\\\"'\n else:\n twitter = None\n if child.tag == 'www':\n if child.text is not None:\n www = child.tag + ': \\\"' + child.text + '\\\"'\n else:\n www = None\n if child.tag == 'email':\n if child.text is not None:\n email = child.tag + ': \\\"' + child.text + '\\\"'\n else:\n email = None\n if child.tag == 'pagecontent':\n if child.text is not None:\n content = child.text.encode(\"utf8\")\n pagecontent = content\n else:\n pagecontent = None\n # Once all of the tags are set, we can write the speaker to the md file.\n print (filename)\n targetpath = outputpath + filename\n target = open(targetpath, 'w')\n target.truncate()\n target.write('---')\n target.write('\\n')\n target.write(title)\n target.write('\\n')\n target.write(\"social: \")\n target.write('\\n')\n if twitter is not None:\n target.write(\" {}\".format(twitter))\n target.write('\\n')\n if www is not None:\n target.write(\" {}\".format(www))\n target.write('\\n')\n if email is not None:\n target.write(\" {}\".format(email))\n target.write('\\n')\n target.write('---')\n if pagecontent is not None:\n target.write('\\n')\n target.write(pagecontent)\n target.write('\\n')\n target.write('')\n target.write('\\n')\n target.write('')\n else:\n target.write('\\n')\n target.write('No bio provided.')\n target.write('\\n')\n target.close()\n\n\n# ---\n# title: \"Greg Major\"\n# image-sm: \"/images/speakers/greg-major.jpg\"\n# social:\n# github: \"https://github.com/gregmajor\"\n# facebook: \"https://www.facebook.com/greg.major\"\n# linkedin: \"https://www.linkedin.com/in/gregmajor\"\n# twitter: \"https://twitter.com/gregmajor\"\n# www: \"http://www.gregmajor.com\"\n# ---\n#\n# I'm a polyglot software architect, husband, father, woodworker,\n# homebrewer, amateur radio operator (KD0FEP), guitar player, runner,\n# and cyclist. I've lived in Stillwater, OK, Corpus Christi, TX, Houston,\n# TX, Caribou, ME, Minneapolis, MN, and now I call Houston home sweet home.\n# \n# \n","sub_path":"OldSiteConversionScript/ConvertOldSpeakers.py","file_name":"ConvertOldSpeakers.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"333991050","text":"from __future__ import print_function\nimport argparse\nimport os\nfrom train import get_smallNORB_test_data, get_smallNORB_train_data\nimport torchvision\n\nparser = argparse.ArgumentParser(description='Saves the processed smallNORB images')\nparser.add_argument('--data-folder', type=str, default='./data', metavar='DF',\n help='Path to where the smallNORB dataset is stored')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n\nclasses = ['animal', 'human', 'plane', 'truck', 'car']\nelevations = [30, 35, 40, 45, 50, 55, 60, 65, 70]\n\ndef main(_args):\n path = os.path.join(_args.data_folder, 'smallNORB')\n args = [path, 1]\n kwargs = {'cuda':not _args.no_cuda, 'shuffle':False}\n save_path= os.path.join(path, 'images')\n \n if not os.path.exists(os.path.join(save_path, 'train')):\n os.makedirs(os.path.join(save_path, 'train'))\n\n for image, label, meta in get_smallNORB_train_data(*args, **kwargs):\n meta = meta.squeeze()\n name = '%s_%d_%d_%d_%d.jpg' % (classes[label], meta[0], elevations[meta[1]], meta[2]*10, meta[3])\n torchvision.utils.save_image(image, os.path.join(save_path, 'train', name))\n \n if not os.path.exists(os.path.join(save_path, 'test')):\n os.makedirs(os.path.join(save_path, 'test'))\n \n for image, label, meta in get_smallNORB_test_data(*args, **kwargs):\n meta = meta.squeeze()\n name = '%s_%d_%d_%d_%d.jpg' % (classes[label], meta[0], elevations[meta[1]], meta[2]*10, meta[3])\n torchvision.utils.save_image(image, os.path.join(save_path, 'test', name))\n\nif __name__ == '__main__':\n main(parser.parse_args())","sub_path":"save_smallnorb_images.py","file_name":"save_smallnorb_images.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"422190056","text":"\"\"\"\n[Homework]\n1. Map two lists into a dictionary for day of week\nlist1 = ['MON','TUE','WED','THU','FRI','SAT','SUN']\nlist2 = ['MONDAY','TUESDAY','WEDNESDAY','THURSDAY','FRIDAY','SATURDAY','SUNDAY']\n2. Create a dictionary properly and sorting by age in ascending order, then sorting by score in descending order.\nBoth results are required to print out.\nname age score\nAmy 22 92\nLily 24 100\nSandy 21 87\nPeter 23 96\nJack 22 94\n3. There are 3 candidates in the election for President in ABC country.\nThey got voted in every state/province in ABC country.\nYou are asked to calculate who got the highest vote in the election.\nName Voted in State1 Voted in State2 Voted in State3\nJason 300 360 270\nBill 280 340 291\nWilliam 350 310 324\n\"\"\"\n\n# Question 1.\nprint(\"Question 1.\")\nlist1 = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']\nlist2 = ['MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY']\nmydict = dict(zip(list1, list2))\nprint(mydict)\n\n# Question 2.\nprint(\"Question 2.\")\n\"\"\"\nname age score\nAmy 22 92\nLily 24 100\nSandy 21 87\nPeter 23 96\nJack 22 94\n\"\"\"\n\n# redundant\n\nage_student = {\n \"Amy\": 22,\n \"Lily\": 24,\n \"Sandy\": 21,\n \"Peter\": 23,\n \"Jack\": 22\n}\n\nscore_student = {\n \"Amy\": 92,\n \"Lily\": 100,\n \"Sandy\": 87,\n \"Peter\": 96,\n \"Jack\": 94\n}\n\nimport operator\n\nsorted_age_student = dict(sorted(age_student.items(), key=operator.itemgetter(1)))\nprint(\"Student's age in ascending order\", sorted_age_student)\n\nsorted_score_student = dict(sorted(score_student.items(), key=operator.itemgetter(1), reverse=True))\nprint(\"Student's score in descending order\", sorted_score_student)\n\n# Question 3.\nprint(\"Question 3. \")\n\n\"\"\"\nName Voted in State1 Voted in State2 Voted in State3\nJason 300 360 270\nBill 280 340 291\nWilliam 350 310 324\n\"\"\"\n\n\n\n\n","sub_path":"py200912b_python2m6/day05_201010/homework/stem_1402b_python_homework_4_Kevin.py","file_name":"stem_1402b_python_homework_4_Kevin.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"448366338","text":"import random\nfrom .exceptions import *\n\n\nclass GuessAttempt(object):\n def __init__(self, character, miss=None, hit=None):\n# self.game = game\n \n if miss and hit:\n raise InvalidGuessAttempt('Invalid Attempt')\n \n self.character = character\n self.miss = miss\n self.hit = hit\n \n def is_miss(self):\n return bool(self.miss)\n \n def is_hit(self):\n return bool(self.hit)\n \n\nclass GuessWord(object):\n def __init__(self, word):\n if not word:\n raise InvalidWordException('Please enter a word')\n \n self.answer = word.lower()\n self.masked = self._mask_word(self.answer)\n \n def perform_attempt(self, character):\n self.character = character.lower()\n if len(self.character) > 1:\n raise InvalidGuessedLetterException('That letter is invalid.')\n \n unmasked = self.masked\n if self.character in self.answer:\n for idx, char in enumerate(self.answer):\n if self.character == char:\n unmasked = self.masked[:idx] + char + self.masked[idx+1:]\n self.masked = unmasked.lower()\n return GuessAttempt(character, hit=True)\n else: \n return GuessAttempt(character, miss=True)\n\n def _mask_word(self, word):\n if not word:\n raise InvalidWordException('That word is invalid.')\n return '*' * len(self.answer)\n\n\nclass HangmanGame(object):\n WORD_LIST = ['rmotr', 'python', 'awesome'] \n \n def __init__(self, word_list=None, number_of_guesses=5):\n self.number_of_guesses = number_of_guesses\n \n if not word_list:\n word_list = self.WORD_LIST\n \n self.word = GuessWord(self.select_random_word(word_list))\n self.previous_guesses = []\n self.remaining_misses = number_of_guesses\n \n def is_won(self):\n return self.word.masked == self.word.answer\n \n def is_lost(self):\n return self.word.masked != self.word.answer and self.remaining_misses == 0\n \n def is_finished(self):\n return self.is_lost() or self.is_won()\n \n def guess(self, character):\n self.character = character.lower()\n \n if self.character not in self.previous_guesses:\n self.previous_guesses.append(self.character)\n else:\n raise InvalidGuessedLetterException()\n \n if self.is_finished():\n raise GameFinishedException('Game over.')\n \n attempt = self.word.perform_attempt(self.character)\n \n if attempt.is_miss():\n self.remaining_misses -= 1\n \n if self.is_won():\n raise GameWonException('You won!')\n \n if self.is_lost():\n raise GameLostException('You lose!')\n \n return attempt\n \n @classmethod\n def select_random_word(cls, word_list=None):\n if not word_list:\n raise InvalidListOfWordsException('Must enter a word')\n\n return random.choice(word_list)\n \n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"190063424","text":"# -*- coding: utf-8 -*-\n\nimport math\n\nfrom config import Config_dev\n\n\nclass Evcard(object):\n def __init__(self, distance=Config_dev.evcard_distance, db=Config_dev.db_evcard):\n self.distance_th = distance # 查找evcard的距离\n self.db = db\n self.shanghai_all = None\n if self.shanghai_all is None:\n self.shanghai_all = self.read_data_from_mongo()\n\n def read_data_from_mongo(self):\n try:\n data = self.db.shanghai.find()\n return [i for i in data]\n except Exception:\n return None\n\n @classmethod\n def init(cls, app):\n app.evcard_algo = cls()\n\n def plan_trip(self, o_lat, o_lng):\n dis,key = self.find_nearest_evcard_station(o_lat, o_lng, self.distance_th)\n return self.evcard_out_put(dis,key)\n\n def find_nearest_evcard_station(self, o_lat, o_lng, dis):\n o_dmin = [dis, None]\n ominlat, omaxlat, ominlng, omaxlng = self.get_area(o_lat, o_lng, dis / 1000) # 只需要起点的\n # dminlat, dmaxlat, dminlng, dmaxlng = self.get_area(d_lat, d_lng, dis / 1000)\n if self.shanghai_all is None:\n self.shanghai_all = self.read_data_from_mongo()\n if self.shanghai_all is None:\n return o_dmin\n for key in self.shanghai_all:\n t_lat, t_lng = key['lat_bd'], key['long_bd']\n if t_lat > ominlat and t_lat < omaxlat and t_lng > ominlng and t_lng < omaxlng:\n o_d = self.get_distance(o_lat, o_lng, t_lat, t_lng)\n o_dmin = ([o_d, key] if o_d < o_dmin[0] else o_dmin)\n\n return o_dmin\n\n def evcard_out_put(self, dis, key):\n \"\"\"\n :param dis: 距离\n :param key: 站点信息\n :return:\n \"\"\"\n ret = {}\n ret['dis'] = dis\n if key is None:\n ret['shop'] = {}\n return ret\n ret['shop'] = {}\n for k in Config_dev.evcard_filds:\n try:\n ret['shop'][k] = key[k]\n except KeyError:\n ret['shop'][k] = None\n return ret\n\n def get_area(self, latitude, longitude, dis):\n r = 6371.137\n dlng = 2 * math.asin(math.sin(dis / (2 * r)) / math.cos(latitude))\n dlng = math.degrees(dlng)\n dlat = dis / r\n dlat = math.degrees(dlat)\n minlat = latitude - dlat\n maxlat = latitude + dlat\n minlng = longitude - dlng\n maxlng = longitude + dlng\n return minlat, maxlat, minlng, maxlng\n\n # 根据经纬度坐标获取两点间距离(米)\n def get_distance(self, lat1, lng1, lat2, lng2):\n lng1, lat1, lng2, lat2 = map(math.radians, [lng1, lat1, lng2, lat2])\n dlng = lng2 - lng1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlng / 2) ** 2\n c = 2 * math.asin(math.sqrt(a))\n r = 6371\n return c * r * 1000\n\nif __name__ == '__main__':\n lat,long = 31.2851314007,121.1705021627\n ev = Evcard()\n print(ev.plan_trip(lat,long))\n # dis,key = ev.plan_trip(lat,long)\n # print(int(dis),key)","sub_path":"util_v_0_6/evcard.py","file_name":"evcard.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"429652444","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.optim as optim\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom visdom import Visdom\r\n\r\nimport time\r\nimport sys\r\nimport os\r\nos.environ['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = '1'\r\n\r\nfrom model import Word2Vec\r\nfrom dataloader import SkipGramDataset\r\nfrom util import TSNE_Wordsim, make_plot, log, compute_similarity, calc_spearman\r\nfrom loss import sigmoid_dot_loss, neg_loss\r\n\r\nfrom wordsim353 import WordSim353\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom scipy.stats import spearmanr\r\n\r\nuse_cuda = torch.cuda.is_available()\r\nuse_cuda = True\r\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\r\ncudnn.benchmark = True \r\n\r\nuse_visdom = False\r\n\r\nexpt_no = 6\r\nif not os.path.exists(f\"models\\\\{expt_no}\"):\r\n os.system(f'mkdir models\\\\{expt_no}')\r\nif not os.path.exists(f\"tsne_plots\\\\{expt_no}\"):\r\n os.system(f'mkdir tsne_plots\\\\{expt_no}')\r\n\r\nif len(sys.argv) < 2:\r\n mode = 'Test'\r\nelse:\r\n mode = sys.argv[1]\r\n\r\nprint(f'Mode is {mode}')\r\n\r\nvocab_size = 298822\r\nembed_size = 300\r\ncontext_size = 5\r\n\r\nMAX_EPOCHS = 100\r\nBATCH_SIZE = 16\r\n\r\nparams = [vocab_size, embed_size, context_size, MAX_EPOCHS, BATCH_SIZE]\r\n\r\nlog(expt_no, params, True)\r\n\r\nload_prev = False\r\n\r\nif mode == 'Train':\r\n print(f'Beginning training with vocab_size: {vocab_size}, embed_size: {embed_size}')\r\n\r\n if use_visdom:\r\n viz = Visdom()\r\n\r\n if load_prev:\r\n #net = torch.load('./models/4/model_0_6000.pth')\r\n net = torch.load('./models/5/model_1_0.pth')\r\n else:\r\n net = Word2Vec(vocab_size, embed_size)\r\n if use_cuda:\r\n net = net.to(device)\r\n net.train()\r\n #print(net.embedding(torch.tensor([1]).cuda()).size())\r\n dataset = SkipGramDataset('./../dicts/simple_wiki', context_size)\r\n num_titles = dataset.total_articles()\r\n\r\n tsne_plotter = TSNE_Wordsim('./..', use_visdom)\r\n optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\r\n\r\n prev_title = 0\r\n try:\r\n epoch = 0\r\n iters = 0\r\n running_loss = 0.0\r\n loss_array = []\r\n spearmans_array = []\r\n while(epoch < MAX_EPOCHS):\r\n\r\n for g in optimizer.param_groups:\r\n g['lr'] = 0.001 * np.exp(-(iters/70000)) * (1-epoch) + 2.5e-4\r\n # if epoch == 1:\r\n # g['lr'] = 2.5e-5\r\n #inputs = dataset.get_next(BATCH_SIZE)\r\n pinputs, ninputs = dataset.get_next(BATCH_SIZE)\r\n if pinputs.size()[0] == 0:\r\n print('found nothing')\r\n continue\r\n # print(pinputs.size())\r\n # print(ninputs.size())\r\n\r\n optimizer.zero_grad()\r\n if use_cuda:\r\n poutputs = net(pinputs.cuda()[:,0], pinputs.cuda()[:,1], 1)\r\n noutputs = net(ninputs.cuda()[:,0], ninputs.cuda()[:,1], -1)\r\n else:\r\n poutputs = net(pinputs[:,0], pinputs[:,1], 1)\r\n noutputs = net(ninputs[:,0], ninputs[:,1], -1)\r\n\r\n # print(poutputs)\r\n # print(noutputs)\r\n #loss = sigmoid_dot_loss(outputs, use_cuda)\r\n loss = neg_loss(poutputs, noutputs)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n running_loss += loss.item()\r\n\r\n print(f'{epoch}_{iters} : {loss/(poutputs.size()[0]+noutputs.size()[0])}')\r\n\r\n title_idx, line_idx = dataset.current_position()\r\n if prev_title > title_idx:\r\n epoch += 1\r\n iters = 0\r\n prev_title = title_idx\r\n\r\n if iters % 100 == 0:\r\n running_loss/= 100\r\n loss_array.append(running_loss)\r\n make_plot(viz, loss_array, 'loss', epoch, iters, use_visdom)\r\n running_loss = 0.0\r\n\r\n if iters % 10000 == 0:\r\n torch.save(net, f'./models/{expt_no}/model_{epoch}_{iters}.pth')\r\n\r\n if iters % 10000 == 10:\r\n print('Generating tsne....')\r\n net.eval()\r\n tsne_plotter.apply_tsne(net.u_embedding, viz, epoch, iters, expt_no)\r\n coeff = calc_spearman(net.u_embedding)\r\n spearmans_array.append(coeff)\r\n make_plot(viz, spearmans_array, 'spearman', epoch, iters, use_visdom)\r\n net.train()\r\n\r\n iters += 1\r\n\r\n except KeyboardInterrupt:\r\n torch.save(net, f'./models/{expt_no}/exp_quitsave.pth')\r\n\r\n # except:\r\n # print(f'failed at {title_idx}.{line_idx}')\r\n\r\n log(expt_no, f'{title_idx} : {line_idx}', False)\r\n torch.save(net, f'./models/{expt_no}/exp_exitsave.pth')\r\n\r\nelif mode == 'Test':\r\n\r\n #for j in range(32):\r\n j=31\r\n net = torch.load(f'./models/5/model_1_70000.pth')\r\n #net = torch.load('./models/5/exp_quitsave.pth')\r\n net.eval()\r\n #net= net.cpu()\r\n\r\n w = WordSim353('./../wordsim353/combined.csv')\r\n scores = []\r\n result_file = f'results/results_{time.time()}.csv'\r\n sorted_data = w.sort()\r\n words = []\r\n for i in range(w.len()):\r\n w1 = sorted_data.iloc[i, 0].lower()\r\n w2 = sorted_data.iloc[i, 1].lower()\r\n words.append([w1, w2])\r\n scores = compute_similarity(net.u_embedding, words, './../')\r\n scores = scores.detach().cpu().numpy()\r\n #scores.append(score)\r\n for i in range(w.len()): \r\n with open(result_file, 'a') as f:\r\n w1 = sorted_data.iloc[i, 0].lower()\r\n w2 = sorted_data.iloc[i, 1].lower()\r\n score = scores[i]\r\n f.write(f'{w1}, {w2}, {score*10}\\n')\r\n\r\n scores = np.array(scores)\r\n\r\n #spearmans = spearmanr(scores, sorted_data.iloc[:,2])\r\n\r\n #print(scores.shape)\r\n # idxs = np.argsort(scores)[::-1] + 1\r\n\r\n #np.save('word2vec_idx', idx-1)\r\n #print(idxs)\r\n true_idxs = np.arange(1, 1+idxs.shape[0], 1)\r\n\r\n # diffs = abs(true_idxs-idxs)\r\n # diff_idxs = np.argsort(diffs)[::-1]\r\n\r\n # for a in diff_idxs[:10]:\r\n # print(sorted_data.iloc[idxs[a]])\r\n # print(a, idxs[a], scores[a])\r\n\r\n spearmans = 1- 6* ((true_idxs-idxs)**2).sum()/((idxs.shape[0]**2-1) * idxs.shape[0])\r\n #pearson = np.corrcoef(np.array([scores, sorted_data.iloc[:, 2]]))\r\n print(f'spearmans coeff : {spearmans}')\r\n #print(f'pearson coeff : {pearson}')","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"102519567","text":"import pandas as pd\nimport pylab as plt\n\ndef analyze(input):\n df = pd.read_csv(input, sep = '\\t', low_memory=False)\n\n dates = {'11/21/2015':'r^',\n '11/22/2015':'k^',\n '11/23/2015':'b^',\n '11/24/2015':'g^',\n '11/25/2015':'m^',\n '11/26/2015':'r*',\n '11/27/2015':'k*',\n '11/28/2015':'b*',\n '11/29/2015':'g*',\n '11/30/2015':'ko',\n '12/01/2015':'go',\n '12/02/2015':'bo',\n '12/03/2015':'ro'\n }\n for key in dates.keys():\n ax = plt.subplot(\"311\")\n plt.plot(df.ix[df['date'] == key, 'hotel_id'], df.ix[df['date'] == key, 'txn'], dates[key], label=key)\n ax.set_title(\"LM txn\")\n ax = plt.subplot(\"312\")\n plt.plot(df.ix[df['date'] == key, 'hotel_id'], df.ix[df['date'] == key, 'eff'], dates[key], label=key)\n ax.set_title(\"LM eff\")\n ax = plt.subplot(\"313\")\n plt.plot(df.ix[df['date'] == key, 'hotel_id'], df.ix[df['date'] == key, 'click'], dates[key], label=key)\n ax.set_title(\"LM click\")\n plt.plot(df.ix[df['date'] == key, 'hotel_id'], df.ix[df['date'] == key, 'mc'], dates[key], label=key)\n ax.set_title(\"LM mc\")\n ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.show()\n\nanalyze(\"LM_Click_TXN/input_nz.csv\")\n#analyze(\"LM_Click_TXN/input_au.csv\")","sub_path":"python/Lastminute.py","file_name":"Lastminute.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"420687409","text":"from math import floor\nfrom random import shuffle\nfrom nltk import word_tokenize\nimport string\n\nbawe_sentences = open(\"./dataset_processing/bawe/sentences.txt\", \"r\").readlines()\nbawe_scores = open(\"./dataset_processing/bawe/multi_scores.txt\", \"r\").readlines()\n\nyork_sentences = open(\"./dataset_processing/york/sentences.txt\", \"r\").readlines()\nyork_scores = open(\"./dataset_processing/york/multi_scores.txt\", \"r\").readlines()\n\ntotal_samples = len(bawe_sentences) + len(york_sentences)\n\nn_test_samples = floor(total_samples * 0.3)\nn_train_samples = total_samples - n_test_samples\n\nprint(total_samples)\nprint(n_test_samples)\nprint(n_train_samples)\n\ntest_sentences_file = open(\"./dataset_processing/combined/multi_objective/sentences.test.txt\", \"w+\")\ntest_scores_file = open(\"./dataset_processing/combined/multi_objective/scores.test.txt\", \"w+\")\n\ntrain_sentences_file = open(\"./dataset_processing/combined/multi_objective/sentences.train.txt\", \"w+\")\ntrain_scores_file = open(\"./dataset_processing/combined/multi_objective/scores.train.txt\", \"w+\")\n\nsent_scores = list(zip(york_sentences, york_scores))\nshuffle(sent_scores)\nyork_sentences, york_scores = zip(*sent_scores)\n\ncount = 0\nfor sentence, score in zip(york_sentences, york_scores):\n sentence = sentence.strip(\"\\n\")\n score = score.strip(\"\\n\")\n if count < n_test_samples:\n test_sentences_file.write(f\"{sentence}\\n\")\n test_scores_file.write(f\"{score}\\n\")\n else:\n train_sentences_file.write(f\"{sentence}\\n\")\n train_scores_file.write(f\"{score}\\n\")\n count += 1\n\nfor sentence, score in zip(bawe_sentences, bawe_scores):\n sentence = sentence.translate(str.maketrans('','', string.punctuation)).lower().strip(\"\\n\").strip()\n sentence = \" \".join(word_tokenize(sentence))\n score = score.strip(\"\\n\")\n train_sentences_file.write(f\"{sentence}\\n\")\n train_scores_file.write(f\"{score}\\n\")\n","sub_path":"scripts/training/reflection/training/dataset_processing/multi_objective/multi_split.py","file_name":"multi_split.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"403531211","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('about/', views.about, name='about'),\n path('terms_of_use/', views.terms_of_use, name='terms_of_use'),\n path('privacy_policy/', views.privacy_policy,\n name='privacy_policy'),\n path('faq/', views.faq, name='faq'),\n]\n","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"598760796","text":"import matplotlib.pyplot as plt, geopandas as gpd\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef plot_points_shape_file(\n shape_file, list_values, list_longitude, list_latitude, title\n):\n scaler = MinMaxScaler(feature_range=(50, 1000))\n list_final = scaler.fit_transform([[i] for i in (list_values)])\n gdf = gpd.read_file(shape_file)\n fig, ax = plt.subplots(figsize=(20, 10))\n ax.axis(\"off\")\n for i in range(len(list_values)):\n plt.scatter(x=[list_longitude[i]], y=[list_latitude[i]], s=list_final[i])\n plt.title(title)\n plt.tight_layout()\n gdf.plot(facecolor=\"none\", ax=ax, legend=True, linewidth=1, edgecolor=\"black\")\n","sub_path":"common/plot_points_shp.py","file_name":"plot_points_shp.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"437761946","text":"# \"String Matching Algo problem, figuring out whether there is a right bracket\n# matching the left bracket.\n\n# question from: https://github.com/starandtina/backend-interview-questions\n\n\ndef brackets_do_match(s, left_bracket='[', right_bracket=']'):\n '''O(log(n)) storage, O(n) performance\n '''\n\n unmatched_lefts = 0\n for i, char in enumerate(s):\n if char == left_bracket:\n unmatched_lefts += 1\n elif char == right_bracket:\n unmatched_lefts += -1\n\n if unmatched_lefts < 0:\n return False\n\n if unmatched_lefts > len(s) - i:\n return False\n\n return unmatched_lefts == 0\n\n\ndef all_brackets_do_match(s):\n '''O(n) storage, O(n) performance\n '''\n left_brackets_enum = {'[': 1, '(': 2, '{': 3}\n right_brackets_enum = {']': -1, ')': -2, '}': -3}\n\n bracket_stack = []\n for char in s:\n code = 0\n if char in left_brackets_enum:\n code = left_brackets_enum[char]\n elif char in right_brackets_enum:\n code = right_brackets_enum[char]\n if code != 0:\n if len(bracket_stack) > 0 and bracket_stack[-1] == -code:\n bracket_stack.pop()\n else:\n bracket_stack.append(code)\n return len(bracket_stack) == 0\n\n\n################################################################################\nimport unittest\n\n\nclass Test_Brackets_Do_Match(unittest.TestCase):\n def test_true(self):\n self.assertTrue(brackets_do_match('[]'))\n self.assertTrue(brackets_do_match('[[test]]'))\n\n def test_false(self):\n self.assertFalse(brackets_do_match('[]]'))\n self.assertFalse(brackets_do_match('[[]'))\n\n\nclass Test_All_Brackets_Do_Match(unittest.TestCase):\n def test_true(self):\n self.assertTrue(all_brackets_do_match('[]'))\n self.assertTrue(all_brackets_do_match('{[[(test)]]}'))\n\n def test_false(self):\n self.assertFalse(all_brackets_do_match('[]())'))\n self.assertFalse(all_brackets_do_match('[]}'))\n\n\n# This slightly odd call lets me run the tests in jupyter.\nif __name__ == '__main__':\n unittest.main(argv=['first-arg-is-ignored'], exit=False)\n","sub_path":"bracket_matching.py","file_name":"bracket_matching.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"601967320","text":"from flask import Flask, render_template, request, json\n\n\napp = Flask(__name__)\n\n\n# The index page. In this case, the only GET request\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n@app.route('/calculate', methods=['POST'])\ndef calculate():\n form_data = request.form\n full_equation = form_data['full-equation']\n\n if full_equation == '':\n return 'Error! You have submitted an empty equation'\n\n split_equation = full_equation.split(' ')\n numbers = split_equation[::2]\n operators = split_equation[1::2]\n\n if len(numbers) != len(operators) + 1:\n return 'Error! You have ended the equation with an operator. Please finish with a number'\n\n # test data!\n # make sure they are numbers\n try:\n for num in numbers:\n int(num)\n except (KeyError, ValueError):\n return 'Error! One of the values is not a number. Please try again.'\n\n # make sure the operator is as expected\n for operator in operators:\n if operator not in ['+', '-', '*', '/']:\n return 'Error! Somehow the operators are not correct'\n\n # calculate total\n total = get_total(numbers, operators)\n\n # alert for division by 0\n try:\n if not total.isnumeric():\n return total\n except AttributeError:\n # numbers don't have the method isnumeric(), so we want to pass\n pass\n\n log_calculation('{} = {}'.format(full_equation, total))\n return 'success'\n\n\n# returns the output for all users on the index page\n@app.route('/get_output', methods=['POST'])\ndef get_output():\n return read_last_10_entries()\n\n\n# writes entries to a log file\ndef log_calculation(entry):\n f = open('calculations.txt', 'a+')\n f.write('{}\\n'.format(entry))\n f.close()\n return True\n\n\n# returns the 10 most recent entries, last to first\ndef read_last_10_entries():\n try:\n file = open('calculations.txt', 'r')\n except FileNotFoundError:\n return ''\n lines = file.readlines()\n\n # get the most recent 10 items from a list\n entries = reversed(lines[-10:])\n entries = strip_new_lines_from_list_elements(entries)\n return render_template('output.html', entries=entries)\n\n\n\n# strip the new lines from list elements\ndef strip_new_lines_from_list_elements(list):\n new_list = []\n for item in list:\n new_list.append(item.strip())\n return new_list\n\n\n# do calculations\ndef get_total(numbers, operators):\n # do operators in this order\n for operator_loop in ['*', '/', '+', '-']:\n index = 0\n while operator_loop in operators[:]:\n if operator_loop == operators[index]:\n # no division by 0 here!\n if operator_loop == '/' and numbers[index+1] == '0':\n return 'Error! You are attempting to divide by 0.'\n # calculate new number\n new_num = eval(\"{}{}{}\".format(numbers[index], operator_loop, numbers[index+1]))\n # condense lists, as we combined the numbers already\n del operators[index]\n del numbers[index+1]\n del numbers[index]\n # add new number to the list.\n numbers.insert(index,new_num)\n else:\n # only push index if no calculation was made, since you might do an operation on the same index\n index += 1\n\n return numbers[0]\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"339527989","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\" Create the Wheel Package \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport stat\nimport shutil\nfrom setuptools import sandbox\nimport six\n\nPATH_HERE = os.path.dirname(os.path.realpath(__file__))\nPATH_TEMP = os.path.join(PATH_HERE, \"Temp\")\nUNIXLE_EXTENSIONS = [\".py\", \".xml\", \".json\", \".pml\", \".txt\", \".htm\", \".html\", \".css\"]\nUNIXLE_EXCLUDED = [\".git\", \".dll\", \".so\", \".pyd\", \".dylib\"]\n\ndef unix_line_ending(folder, extensions=None, excluded=None):\n \"\"\" Replace Windows Line Endings with Unix Line Endings \"\"\"\n if extensions is None:\n extensions = UNIXLE_EXTENSIONS\n if excluded is None:\n excluded = UNIXLE_EXCLUDED\n for dname, _dirs, files in os.walk(folder):\n if dname in excluded:\n continue\n for fname in files:\n if fname in excluded:\n continue\n fext = os.path.splitext(fname)[-1]\n if fext not in extensions and fname not in extensions:\n continue\n fpath = os.path.join(dname, fname)\n with open(fpath, \"rb\") as filer:\n fdata = filer.read()\n filer.close()\n try:\n if \"\\r\\n\" not in fdata:\n continue\n except Exception:\n continue\n with open(fpath, \"wb\") as filew:\n filew.write(fdata.replace(\"\\r\\n\", \"\\n\"))\n filew.close()\n print(\"Unix Line Ending : %s\" % fpath)\n\ndef folder_cleanup(folder, names=None, extensions=None):\n \"\"\" Remove all sub folders and files by name or extension \"\"\"\n if not isinstance(names, list):\n names = []\n if not isinstance(extensions, list):\n extensions = []\n for dname, dirs, files in os.walk(folder):\n for dirname in dirs:\n if dirname not in names:\n continue\n dirpath = os.path.join(dname, dirname)\n if os.path.isdir(dirpath):\n if not os.access(dirpath, os.W_OK):\n os.chmod(dirpath, stat.S_IWRITE)\n shutil.rmtree(dirpath, ignore_errors=True)\n for fname in files:\n fpath = os.path.join(dname, fname)\n if fname in names:\n if os.path.isfile(fpath):\n if not os.access(fpath, os.W_OK):\n os.chmod(fpath, stat.S_IWRITE)\n os.remove(fpath)\n fext = os.path.splitext(fname)[-1]\n if fext in extensions or fname in extensions:\n if os.path.isfile(fpath):\n if not os.access(fpath, os.W_OK):\n os.chmod(fpath, stat.S_IWRITE)\n os.remove(fpath)\n\ndef clean_access_rights(path):\n \"\"\" Allow write access to a path \"\"\"\n if os.path.isfile(path):\n if not os.access(path, os.W_OK):\n os.chmod(path, stat.S_IWRITE)\n os.chmod(path, stat.S_IWUSR)\n elif os.path.isdir(path):\n if not os.access(path, os.W_OK):\n os.chmod(path, stat.S_IWRITE)\n os.chmod(path, stat.S_IWUSR)\n for dname, dirs, files in os.walk(path):\n for dirname in dirs:\n dirpath = os.path.join(dname, dirname)\n if os.path.isdir(dirpath):\n if not os.access(dirpath, os.W_OK):\n os.chmod(dirpath, stat.S_IWRITE)\n os.chmod(dirpath, stat.S_IWUSR)\n for fname in files:\n fpath = os.path.join(dname, fname)\n if os.path.isfile(fpath):\n if not os.access(fpath, os.W_OK):\n os.chmod(fpath, stat.S_IWRITE)\n os.chmod(fpath, stat.S_IWUSR)\n\ndef cleanup_folders(dist=False):\n \"\"\" Prepare the Folders for a platform \"\"\"\n if dist is True:\n path_dist = os.path.join(PATH_HERE, \"dist\")\n if os.path.isdir(path_dist):\n shutil.rmtree(path_dist, ignore_errors=True)\n path_egg = os.path.join(PATH_HERE, \"qicore.egg-info\")\n if os.path.isdir(path_egg):\n shutil.rmtree(path_egg, ignore_errors=True)\n path_build = os.path.join(PATH_HERE, \"build\")\n if os.path.isdir(path_build):\n shutil.rmtree(path_build, ignore_errors=True)\n path_temp = os.path.join(PATH_HERE, \"temp\")\n if os.path.isdir(path_temp):\n shutil.rmtree(path_temp, ignore_errors=True)\n\ndef create():\n \"\"\" Create a Package \"\"\"\n print(\"- Clean Access Rights\")\n clean_access_rights(PATH_HERE)\n print(\"- Remove Building Folders\")\n cleanup_folders(dist=True)\n print(\"- Clean .pyc Files\")\n folder_cleanup(PATH_HERE, names=None, extensions=[\".pyc\"])\n unix_line_ending(PATH_HERE, extensions=None, excluded=None)\n print(\"- Build Package\")\n sandbox.run_setup(os.path.join(PATH_HERE, \"setup.py\"), [\"bdist_wheel\"])\n print(\"- Remove Building Folders\")\n cleanup_folders()\n print(\"- Package Generation Finished\")\n\nif __name__ == \"__main__\":\n create()\n","sub_path":"python-qicore/wheel_create.py","file_name":"wheel_create.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"21131987","text":"\"\"\"adds Show model\n\nRevision ID: 9ece6f9fa978\nRevises: ad210f366e77\nCreate Date: 2021-05-24 19:19:01.694816\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9ece6f9fa978'\ndown_revision = 'ad210f366e77'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('Show',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('artist_id', sa.Integer(), nullable=True),\n sa.Column('venue_id', sa.Integer(), nullable=True),\n sa.Column('start_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('Show')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/9ece6f9fa978_adds_show_model.py","file_name":"9ece6f9fa978_adds_show_model.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"546568678","text":"#%%\nimport pickle as pk\nfrom gensim.models import KeyedVectors\nimport numpy as np\nfrom scipy.spatial.distance import cosine\n\n# Reglas:\n# Si un keyword tiene mas de una palabra, se separa y se suman los vectors y de eso hacemos la distancia coseno\n# Esto funciona? 🤔🤔🤔🤔\n# Si aparece covid, lo ponemos como respiratorio\n# Covid no está en el diccionario del modelo porque es viejo\n# Si una palabra no está en el modelo, se skippea y se almacena cuál fue\n\n\n# Toma de decisions\n# Distancia coseno entre cada categoria y los terminos, me fijo\n\nclass generic_class():\n def __init__(self):\n pass\n def load(self, filename):\n with open(filename, 'rb') as input:\n tmp_dict = pk.load(input)\n self.__dict__.update(tmp_dict)\n \n\ndef process_keywords(keywords: list):\n # results for the keywords of the article\n results = {}\n\n for key in keywords:\n if ' ' in key:\n k = key.split(' ')\n for w in k:\n try:\n model.get_vector(w)\n except Exception:\n results['skipped'] = True\n\n\ndef main():\n arts = generic_class()\n arts.load('../data-collection/articles.pkl')\n\n model = KeyedVectors.load_word2vec_format(\n './GoogleNews-vectors-negative300.bin', \n binary=True\n )\n\n categorias = [\n 'cardiovascular', 'respiratory', 'gastric', 'immunologic', 'trauma',\n 'neurologic', 'genetic', 'cancer', 'hormonal', 'epidemiology'\n ]\n\n categorias_vec = [model.get_vector(c) for c in categorias]\n\n for k, v in arts.articles.items():\n process_keywords(v['keywords'])\n\n","sub_path":"data-processing/OLD-process_articles.py","file_name":"OLD-process_articles.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"539928250","text":"import numpy as np\nimport matplotlib.pyplot as plt\n#import matplotlib\nimport tensorflow as tf\nimport os\n#from sklearn.preprocessing import OneHotEncoder\nimport PIL\nfrom PIL import Image, ImageOps\nfrom numpy import *\n\nfrom sklearn.utils import shuffle\n#from sklearn.model_selection import train_test_split\n\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras import backend as K\n\n# visualisation imports\n#from keras.callbacks import TensorBoard\nfrom time import time\n\nfrom datetime import datetime\n\n# input image dimensions\nimg_rows, img_cols = 227, 227\n\n# number of channels\nimg_channels = 3\n\n# directory for output and logs\nlog_dir = 'Optimizer_2'\n\n# data\npath1 = 'BaumbilderBA_augmented_training_resized_227'\npath2 = 'BaumbilderBA_augmented_training_resized_227_rotated'\npath_val = 'BaumbilderBA_augmented_validation_resized_227'\npath_valrot = 'BaumbilderBA_augmented_validation_resized_227_rotated'\n\n# find images\nimlist1 = os.listdir(path1)\nimlist2 = os.listdir(path2)\nimlist = imlist1 + imlist2\n\nimlistval1 = os.listdir(path_val)\nimlistvalrot = os.listdir(path_valrot)\nimlistval = imlistval1 + imlistvalrot\n\nif '.DS_Store' in imlist:\n imlist.remove('.DS_Store')\n\n# get the number of images\nimnbr = len(imlist)\nimnval = len(imlistval)\n#print(\"Anzahl Training Files:\")\n#print(imnbr)\n#print(\"Anzahl Validation Files:\")\n#print(imnval)\n\n# test with some images\n#imnbr = 40000\n#imlist1 = imlist[:40000]\n\n#imnbr = 10000\n#imnval = imnbr\n#imlistval = imlistval[:10000]\n\n# create matrix to store all flattened images\nimmatrix1 = array([array(Image.open(path1 + '/' + im2)).flatten()\n for im2 in imlist1],'f')\nimmatrix2 = array([array(Image.open(path2 + '/' + im3)).flatten()\n for im3 in imlist2], 'f')\nimmatrix_val1 = array([array(Image.open(path_val + '/' + im4)).flatten()\n for im4 in imlistval1],'f')\nimmatrix_valrot = array([array(Image.open(path_valrot + '/' + im5)).flatten()\n for im5 in imlistvalrot], 'f')\nprint(\"Shape von immatrix1, 2 und gesamt\")\nprint(immatrix1.shape)\nprint(immatrix2.shape)\nimmatrix = np.concatenate((immatrix1, immatrix2))\nprint(immatrix.shape)\nnum_samples = imnbr\n\n\nprint(\"Shape von immatrix_val1, _valrot und gesamt\")\nimmatrix_val = np.concatenate((immatrix_val1, immatrix_valrot))\nprint(immatrix_val1.shape)\nprint(immatrix_valrot.shape)\nprint(immatrix_val.shape)\nnum_samples_val = imnval\n\n# label\nlabel=np.ones((num_samples,),dtype = int)\nlabel_val=np.ones((imnval,),dtype = int)\n\nprint(\"Label setzen\")\ndef get_label(imlist,label):\n i=0\n while i < len(imlist):\n fileName = imlist[i]\n #print(str(i) + \"->\" + fileName)\n if \"Bergahorn\" in fileName:\n label[i] = 0\n #print(\"Bergahorn\")\n if \"Spitzahorn\" in fileName:\n label[i] = 1\n #print(\"Spitzahorn\")\n if \"Feldahorn\" in fileName:\n label[i] = 2\n #print(\"Feldahorn\")\n if \"Buche\" in fileName:\n label[i] = 3\n #print(\"Buche\")\n if \"Birke\" in fileName:\n label[i] = 4\n #print(\"Birke\")\n if \"Eiche\" in fileName:\n label[i] = 5\n #print(\"Eiche\")\n if \"Stechpalme\" in fileName:\n label[i] = 6\n #print(\"Stechpalme\")\n if \"Ulme\" in fileName:\n label[i] = 7\n #print(\"Ulme\")\n if \"Linde\" in fileName:\n label[i] = 8\n #print(\"Linde\")\n if \"Kirsche\" in fileName:\n label[i] = 9\n #print(\"Kirsche\")\n if \"Esche\" in fileName:\n label[i] = 10\n #print(\"Esche\")\n #print(str(i) + \"->\" + imlist[i])\n #print(label[i])\n i += 1\n\nget_label(imlist, label)\nget_label(imlistval, label_val)\n\n# prepare data and labels\n# the method shuffle() randomizes the items of a list in place.\ndata,Label = shuffle(immatrix,label, random_state=2)\ntrain_data = [data,Label]\n\n\n# prepare parameters and data\nbatch_size = 100\nnum_classes = 11\nepochs = 50\n\n# prepare training data\n(X_train, y_train) = (train_data[0],train_data[1])\n\n# prepare validation data\nvaldata,vallabel = shuffle(immatrix_val,label_val, random_state=2)\ntest_data = [valdata,vallabel]\n\n(X_test, y_test) = (test_data[0],test_data[1])\n\nX_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, img_channels)\nX_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, img_channels)\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\n\nX_train /= 255\nX_test /= 255\n\nprint('X_train shape:', X_train.shape)\nprint('X_test shape:', X_test.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\n# save class labels to disk to color data points in TensorBoard accordingly\n#with open(log_dir +'/metadata.tsv', 'w') as f:\n# np.savetxt(f, y_test)\n\n# prepare cnn\n# convert class vectors to binary class matrices\ny_train = tf.keras.utils.to_categorical(y_train, num_classes)\ny_test = tf.keras.utils.to_categorical(y_test, num_classes)\n\ninput_shape = (img_rows, img_cols, img_channels)\n\n# build cnn\nmodel = Sequential()\n\n# convolutional layer 1\nconv_1 = model.add(Conv2D(filters=96, kernel_size=(11, 11), strides=4, activation='relu', input_shape=input_shape))\n# max pooling 1\nmax_1 = model.add(MaxPooling2D(pool_size=(3, 3), strides=2))\n\n# convolutional layer 2\nconv_2 = model.add(Conv2D(filters=256, kernel_size=(5, 5), strides=1, activation='relu'))\n# max pooling 2\nmax_2 = model.add(MaxPooling2D(pool_size=(3, 3), strides=2))\n# convolutional layer 3\nconv_3 = model.add(Conv2D(filters=384, kernel_size=(3, 3), strides=1,\n activation='relu'))\n# convolutional layer 4\nconv_4 = model.add(Conv2D(filters=384, kernel_size=(3, 3), strides=1,\n activation='relu'))\n# convolutional layer 5\nconv_5 = model.add(Conv2D(filters=256, kernel_size=(3, 3), strides=1,\n activation='relu'))\n#max pooling 3\nmax_3 = model.add(MaxPooling2D(pool_size=(3, 3), strides=2))\n# dropout\ndropout_1 = model.add(Dropout(0.5))\nflatten = model.add(Flatten())\n# fully connected layer 1\nfully_1 = model.add(Dense(256*6*6, activation='relu')) #ohne dropout 12*12?\ndropout_2 = model.add(Dropout(0.5))\n# fully connected layer 2\nfully_2= model.add(Dense(4096, activation='relu'))\n# fully connected layer 3\nfully_3 = model.add(Dense(num_classes, activation='softmax'))\n\n# Visualisation\n# Launch the graph in a session.\n#sess = tf.Session()\n# Create a summary writer, add the 'graph' to the event file.\n#writer = tf.summary.FileWriter(log_dir, sess.graph)\n\n#embedding_layer_names = set(layer.name\n# for layer in model.layers\n# if layer.name.startswith('conv'))\n\n#tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=5, batch_size = batch_size, write_graph = True, write_grads = True, write_images = True, embeddings_freq = 25, embeddings_layer_names = embedding_layer_names, embeddings_data=X_test, update_freq='epoch')\n# embeddings_metadata = [], embeddings_data = []\n# embeddings_layer_names:\n# a list of names of layers to keep eye on. If NULL or empty list all the embedding layers will be watched.\n\n#embeddings = tf.Variable(tf.random_normal([num_samples, num_classes], -1.0, 1.0, name='tree_embedding'))\n#random_uniform?\n\nmodel.summary()\n\ndatagen_train = ImageDataGenerator(\n #featurewise_std_normalization=True,\n\n #width_shift_range=0.2, # randomly shift images horizontally \n #height_shift_range=0.2,# randomly shift images vertically \n\n horizontal_flip=True, # randomly flip images horizontally\n vertical_flip=True)\n\n# fit augmented image generator on data\ndatagen_train.fit(X_train)\n\noptimizer = Adam(lr=0.0005)\n# initialize\nmodel.compile(optimizer=optimizer,\n loss='categorical_crossentropy',\n #optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n# train and evaluate\n'''history = model.fit(X_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(X_test, y_test)#,\n # callbacks=[tensorboard]\n )'''\nhistory = model.fit_generator(datagen_train.flow(X_train, y_train, batch_size=batch_size),\n validation_data=(X_test, y_test),\n epochs=epochs,\n steps_per_epoch=X_train.shape[0]/batch_size,\n verbose=1)\n #callbacks=[checkpointer,lrate,tensorboard], verbose=1\n\nscore = model.evaluate(X_test, y_test, verbose=0)\n#model.save('21-12-18_3_Augmented_150epochs.h5')\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\nprint(datetime.now())\n\n\n# https://www.kaggle.com/amarjeet007/visualize-cnn-with-keras\n# plotting training and validation loss\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(1, len(loss) + 1)\nplt.plot(epochs, loss, color='red', label='Training loss')\nplt.plot(epochs, val_loss, color='green', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n#plt.show()\nplt.savefig(log_dir+'/loss.png')\nplt.clf()\n\n# plotting training and validation accuracy\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nplt.plot(epochs, acc, color='blue', label='Training acc')\nplt.plot(epochs, val_acc, color='magenta', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\n#plt.show()\nplt.savefig(log_dir+'/accuracy.png')\n\nprint(\"on validation data\")\npred1=model.evaluate(X_test,y_test)\nprint(\"accuaracy\", str(pred1[1]*100))\nprint(\"Total loss\",str(pred1[0]*100))\n\n# predict results\n#results = model.predict(X_test)\n\n# select the indix with the maximum probability\n#results = np.argmax(results,axis = 1)\n\n#submissions=pd.DataFrame({\"ImageId\": list(range(1,len(results)+1)),\n# \"Label\": results})\n#submissions.to_csv(\"validierung.csv\", index=False, header=True)\n\nmodelpath = log_dir+'/10-01-19_optimizer.hdf5'\nprint(\"Model saved to: \")\nprint(modelpath)\nmodel.save(modelpath)\n","sub_path":"DGX Station/10-01-19_Optimizer_2.py","file_name":"10-01-19_Optimizer_2.py","file_ext":"py","file_size_in_byte":10368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"161095125","text":"import random\nimport string\n\nimport redis as _redis\nfrom flask import session, g\nfrom flask_session import Session as FlaskSession, RedisSessionInterface\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy as SQLAlchemyBase\nfrom werkzeug.contrib.cache import RedisCache\nfrom itsdangerous import JSONWebSignatureSerializer\n\n\nmigrate = Migrate()\n\n\nclass RedisCacheExt(object):\n _redis_cache = None\n\n def init_app(self, my_app):\n self._redis_cache = RedisCache(\n host=my_app.config['REDIS_HOST']\n )\n\n def set(self, key, value, timeout=None):\n self._redis_cache.set(key, value, timeout)\n\n def get(self, key):\n return self._redis_cache.get(key)\n\n\nredis_cache = RedisCacheExt()\n\n\nclass RedisExt(_redis.Redis):\n\n def init_app(self, my_app):\n self.__init__(host=my_app.config['REDIS_HOST'])\n\n\nredis = RedisExt()\n\n\nclass SQLAlchemy(SQLAlchemyBase):\n \"\"\"Se usará PgBouncer, configurar NullPoll para que SQLAlchemy no maneje su propio Pool.\"\"\"\n\n def apply_driver_hacks(self, app, info, options):\n super(SQLAlchemy, self).apply_driver_hacks(app, info, options)\n from sqlalchemy.pool import NullPool\n options['poolclass'] = NullPool\n options.pop('pool_size', None)\n options.pop('max_overflow', None)\n\n\ndb = SQLAlchemy()\n\n\nclass JWSerializerEX(object):\n \"\"\"Crea la instancia para el JWSSignature serializar, puede en otras versiones cambie el\n nombre del algoritmo por defecto (ya pasó 0.24 .. 1.1.0), es mejor espeficiar.\"\"\"\n\n def __init__(self):\n self.jws: JSONWebSignatureSerializer = None\n\n def init_app(self, my_app):\n self.jws = JSONWebSignatureSerializer(my_app.config['SECRET_KEY'], algorithm_name='HS512')\n\n def loads(self, s):\n return self.jws.loads(s)\n\n def dumps(self, obj):\n return self.jws.dumps(obj)\n\n\njws_serializer = JWSerializerEX()\n\n\nclass RedisSessionHacked(RedisSessionInterface):\n \"\"\"Clase customizada para evitar crear y mantener\n las key de las sesiones en redis en cada request cuando\n las Apis públicas son consumidas, en vez de eso el request\n envia el token de autorización.\"\"\"\n\n def save_session(self, my_app, the_session, response):\n if g.get('no_session'):\n return\n\n super().save_session(my_app, the_session, response)\n\n\nclass SessionHacked(FlaskSession):\n \"\"\"Clase customizada para asignar el redis de manera\n directa el redis como backed para las sesiones\"\"\"\n\n def init_app(self, my_app):\n config = my_app.config.copy()\n # my_app.session_interface = self._get_interface(my_app)\n my_app.session_interface = RedisSessionHacked(\n redis=config['SESSION_REDIS'],\n key_prefix=config['SESSION_KEY_PREFIX'],\n use_signer=config['SESSION_USE_SIGNER'],\n permanent=config['SESSION_PERMANENT']\n )\n\n\ndef generate_csrf_token():\n if 'csrf_token' not in session:\n csrf_token = ''.join([random.choice(string.ascii_letters + string.digits)\n for i in range(32)])\n session['csrf_token'] = csrf_token\n return session['csrf_token']\n\n","sub_path":"app/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"311418908","text":"from flask import Flask, render_template\nimport bs4 as bs\nimport urllib.request\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n driver_data = [] #initiates driver data\n points_data = [] #inititates points data\n\n source = urllib.request.urlopen('https://www.bbc.com/sport/formula1/drivers-world-championship/standings').read() #current standings on bbc\n soup = bs.BeautifulSoup(source, 'lxml')\n\n table = soup.find('table') #finds all tables\n\n table_rows = table.find_all('tr') #finds all table rows\n\n for points in table_rows: #finds points in table rows\n cols = points.find_all('td', class_=\"table__cell table__cell--right\")\n cols = [ele.text.strip() for ele in cols]\n points_data.append([ele for ele in cols if ele])\n\n for driver in table_rows: #finds driver names in table rows\n cols = driver.find_all('abbr', class_=\"medium-abbr-off\")\n cols = [ele.text.strip() for ele in cols]\n driver_data.append([ele for ele in cols if ele])\n\n points_data.remove([]) #removes fluff\n\n points = [] #creates new lists which will store the flattened lists\n driver = []\n\n for sublist in points_data: #flattens the points data list\n for item in sublist:\n points.append(item)\n\n for sublist in driver_data: #flattens the driver names list\n for item in sublist:\n driver.append(item)\n\n return render_template('index.html', points=points, names=driver) #returns template assigns the lists to the objects points and names\n\nif __name__ == '__main__':\n app.run()\n\n#used for when debugging\n\"\"\"if __name__ == '__main__':\n app.run(debug=True)\"\"\" \n","sub_path":"site_init.py","file_name":"site_init.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"290112592","text":"import requests\nimport threading\nimport time\nfrom flask import Flask\nfrom db_management import new_flat_checker\nfrom spectral_investor import app\nfrom flask import render_template\nfrom spectral_investor import admin\n\n\n#New flat checker ran before flask app start\n#@app.before_first_request\ndef activate_job():\n thread = threading.Thread(target=new_flat_checker,args=(True,'wroclaw',1))\n thread.start()\n\n@app.route(\"/\")\ndef index():\n return render_template('home.html')\n\n\ndef start_runner():\n def start_loop():\n not_started = True\n while not_started:\n print('In start loop')\n try:\n r = requests.get('http://127.0.0.1:5000/')\n print(r.status_code)\n if r.status_code == 200:\n print('Server started, quiting start_loop')\n not_started = False\n print(r.status_code)\n except:\n print('Server not yet started')\n time.sleep(2)\n print('Started runner')\n thread = threading.Thread(target=start_loop)\n thread.start()\n\n#app starter\nif __name__ == \"__main__\":\n #start_runner()\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"36945251","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 18 20:12:38 2020\n\n@author: manan\n\"\"\"\nimport nltk\nimport re\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom nltk.tokenize import PunktSentenceTokenizer\n\n\n\n#reading the data:\npst = PunktSentenceTokenizer()\nopenfile = open(\"data.txt\")\ndata = openfile.read()\ndata=data.decode('utf-8')\n\n#tokenizing:\ntokenized_sentence = pst.tokenize(data)\n\n#making a list for storing pos vs original word\nwordlist = []\n\n\n#chunk formation and converting to a string:\nstringlist = []\nfor i in tokenized_sentence:\n #try:\n words = nltk.word_tokenize(i)\n tagged = nltk.pos_tag(words)\n count=0\n for i in words:#loop to map pos form to original word\n #print(tagged[count][0]+\"/\"+tagged[count][1])\n if len(tagged[count])==2:\n wordlist.append([tagged[count][0]+\"/\"+tagged[count][1]])\n count = count+1\n\n chunkGram = r\"\"\"Chunk: {{0,2}{0,1}{1,2}{0,1}{0,2}{0,1}}\"\"\"\n chunkParser = nltk.RegexpParser(chunkGram)\n chunked = chunkParser.parse(tagged)\n print(chunked)\n stringlist.append(chunked.pformat().encode('ascii','ignore'))\n #except Exception as e:\n # print(str(e))\n\n\n\n\n#string extraction:\nindex = 0\nlistoflist = []\nfor f in stringlist:\n String = f\n chunklist = []\n iter = re.finditer(r\"\\Chunk\\b\", String)\n indices = [m.start(0) for m in iter]\n for x in indices:\n j=1\n temp =\"\"\n while(stringlist[index][x+5+j]!=')'):\n temp = temp + stringlist[index][x+5+j]\n j = j+1\n chunklist.append(temp)\n index = index + 1\n listoflist.append(chunklist)\n#print(listoflist)\n\n\n\n\n\n\n#graph connection :\nsource = []\ntarget = []\n\nfor i in listoflist:\n temp_source = []\n temp_target = []\n for j in i:\n temp_source.append(j)\n temp_target.append(j)\n if len(temp_source)!=0 and len(temp_target)!=0:\n temp_source.pop(len(temp_source)-1)\n temp_target.pop(0)\n for x in temp_source:\n source.append(x)\n for y in temp_target:\n target.append(y)\nfinal_source = []\nfinal_target = []\nfor i in source:\n final_source.append('('+ i +')')\n\nfor i in target:\n final_target.append('('+ i + ')')\n\n\n\n\n#putting data into the data frame and creating graph:\nkg_df = pd.DataFrame({'source':final_source, 'target':final_target})\nprint(\"printing pandas data frame--------\")\nprint(kg_df)\nG=nx.from_pandas_edgelist(kg_df, \"source\", \"target\")\n\n\n\nmax = 0\nfinal_k1=0\nfinal_k2=0\nfinal_k3=0\nmatch_value=0\n#doing data analysis:\nfor k1 in range(0,3):\n for k2 in range(0,3):\n for k3 in range(0,3):\n somelist = [[0,\"\"]]\n done = []\n for i in final_source :\n temp_sum = 0\n conn_nodes = 0\n temp_count = 0\n for j in final_target:\n try:\n temp_sum = temp_sum + nx.shortest_path_length(G,i,j)\n if nx.shortest_path_length(G,i,j) == 1 :\n conn_nodes = conn_nodes + 1#directly connected nodes\n for temp in final_source :\n if temp == i :\n temp_count = temp_count+1\n except:\n temp_sum = temp_sum + 0\n if i not in done :\n somelist.append((k1*(conn_nodes)+k2*(temp_count)+k3*(len(i)),i))\n done.append((i))\n #somelist.sort()\n #print(\"sort()\")\n #for i in somelist:\n #print(i)\n somelist.sort(reverse = True)\n #print(\"reverse_sort()\")\n #for i in somelist:\n #print(i)\n\n\n ##extract info for control loop\n output = []\n temp_somlist_count = 0\n for i in somelist:\n if temp_somlist_count==10:\n break\n else :\n output.insert(temp_somlist_count,i)\n temp_somlist_count = temp_somlist_count+1\n\n print(output)\n\n #find if matching is possible:\n expected_output = [\"committee of decision\",\"ensemble learning\",\"machine learning model\",\"help of source\",\"high confidence\",\"different decision ensemble\",\"training data\",\"updated sample distribution\",\"weighted majority voting\",\"artificial training example\"]\n final_count = 0\n for i in output:\n for j in expected_output:\n #print(j)\n relist = re.split(r'\\s',j)\n #print(relist)\n for temp in relist:\n if re.search(temp,i[1]):\n #print(\"finding in\")\n #print(i[1])\n final_count = final_count+1\n \n print(str(final_count)+str(\":\")+str(k1)+str(\":\")+str(k2)+str(\":\")+str(k3))\n if final_count>max:\n max = final_count\n print(\"improved:\")\n print(str(max)+str(\":\")+str(k1)+str(\":\")+str(k2)+str(\":\")+str(k3))\n match_value = max\n final_k1 = k1\n final_k2 = k2\n final_k3 = k3\nprint(\"final_values\")\nprint(match_value)\nprint(final_k1)\nprint(final_k2)\nprint(final_k3)\n#plotting the graph:\nplt.figure(figsize=(12,12))\npos = nx.spring_layout(G)\nnx.draw(G, with_labels=True, node_color='skyblue', edge_cmap=plt.cm.Blues, pos = pos)\n#plt.show()\n","sub_path":"kntool/test_version.py","file_name":"test_version.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"217756915","text":"from django import forms\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom flowback.models import Well_Profile, Well_Data\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field, Fieldset\nfrom crispy_forms.bootstrap import AppendedText, PrependedText, FormActions\n\nclass CrispyModelForm(forms.ModelForm):\n class Meta:\n model = Well_Profile\n fields = '__all__'\n widgets = {\n 'well_name': forms.TextInput(attrs={'placeholder': 'Please Enter Well Name', 'required': True}),\n }\n\n def __init__(self, *args, **kwargs):\n super(CrispyModelForm, self).__init__(*args, **kwargs)\n self.fields['initial_shut_in_psi'].label = \"Initial Shut In Pressure (psi)\"\n self.fields['initial_res_psi'].label = \"Initial Reservoir Pressure (psi)\"\n self.fields['total_frac_fluid'].label = \"Total Frac Fluid Pumped (bbl)\"\n self.fields['total_sand_pumped'].label = \"Total Frac Sand Pumped (lb)\"\n self.fields['prod_path_diameter'].label = \"Diameter of flowpath (in)\"\n # If you pass FormHelper constructor a form instance\n # It builds a default layout with all its fields\n self.helper = FormHelper(self)\n # You can dynamically adjust your layout\n self.helper.layout.append(Submit('Submit', 'Submit'))\n\n\nclass CrispyDataForm(forms.ModelForm):\n class Meta:\n model = Well_Data\n fields = ('data_well_name','data_hour','data_tubing_psi','data_csg_psi','data_choke_size','data_sep_psi','data_oil_rate','data_water_rate','data_gas_rate','data_flowline_psi','data_chlorides','data_sand_percent','data_h2s','data_remarks')\n widgets = {\n #'well_name': forms.ChoiceField(choices=well_choice_gen()),\n }\n def __init__(self, *args, **kwargs):\n super(CrispyDataForm, self).__init__(*args, **kwargs)\n self.fields['data_well_name'].label = \"Well name\"\n #self.fields['data_date'].label = \"Date (mm/dd/yyyy)\"\n self.fields['data_hour'].label = \"Total hours on flowback\"\n self.fields['data_tubing_psi'].label = \"Tubing pressure (psi)\"\n self.fields['data_csg_psi'].label = \"Casing pressure (psi)\"\n self.fields['data_choke_size'].label = \"Choke size (xx/64 or xxx/128)\"\n self.fields['data_sep_psi'].label = \"Separator pressure (psi)\"\n self.fields['data_oil_rate'].label = \"Oil rate (bbl/hr)\"\n self.fields['data_water_rate'].label = \"Water rate (bbl/hr)\"\n self.fields['data_gas_rate'].label = \"Gas rate (mcf/d)\"\n self.fields['data_flowline_psi'].label = \"Flowline pressure (psi)\"\n self.fields['data_chlorides'].label = \"Chlorides (ppm)\"\n self.fields['data_sand_percent'].label = \"Sand percent (%/10mL)\"\n self.fields['data_h2s'].label = \"H2S (ppm)\"\n self.fields['data_remarks'].label = \"Comments\"\n # If you pass FormHelper constructor a form instance\n # It builds a default layout with all its fields\n self.helper = FormHelper(self)\n # You can dynamically adjust your layout\n self.helper.layout = Layout(\n Div(\n Div('data_well_name', css_class='span12'),\n #Div('data_date', css_class='span12'),\n Div('data_hour', css_class='span12'),\n Div('data_tubing_psi', css_class='span12'),\n Div('data_csg_psi', css_class='span12'),\n Div('data_choke_size', css_class='span12'),\n Div('data_sep_psi', css_class='span12'),\n Div('data_oil_rate', css_class='span12'),\n Div('data_water_rate', css_class='span12'),\n Div('data_gas_rate', css_class='span12'),\n Div('data_flowline_psi', css_class='span12'),\n Div('data_chlorides', css_class='span12'),\n Div('data_sand_percent', css_class='span12'),\n Div('data_h2s', css_class='span12'),\n Div('data_remarks', css_class='span12'),\n css_class='row'),\n )\n self.helper.layout.append(Submit('Submit', 'Submit'))\n","sub_path":"tensorai_root/flowback/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"526523218","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLanguage: python3\nGoal: Draw the rects for face detect & recognization\n\"\"\"\nimport os\nimport sys\nimport time\nimport cv2\nfrom concurrent.futures import ProcessPoolExecutor, wait\nimport numpy as np\n\nprint(sys.version)\nprint(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))\n### Defs region\nclass FaceRects:\n def __init__(self, faceID):\n self.mFaceID = faceID\n self.faceName = None\n self.detScore = None\n self.recgScore = None\n self.mFaceL = None\n self.mFaceT = None\n self.mFaceR = None\n self.mFaceB = None\n return\n\nclass FrameRects:\n def __init__(self, frameID):\n self.mFrameID = frameID\n self.mFile = None\n self.detRects = []\n self.recgRects = []\n self.recgRectsIV = []\n self.recgRectsV = []\n self.recgRectsE = []\n self.recgRectsC = []\n return\n\n def printSelf(self):\n print(self.mFrameID)\n print(self.mFile)\n for it in self.detRects:\n print(\"Detection: \" + str(it.mFaceID))\n tmpS = str(it.mFaceL) + ' ' + str(it.mFaceT) + ' ' \\\n + str(it.mFaceR) + ' ' + str(it.mFaceB)\n print(tmpS)\n\n for it in self.recgRectsIV:\n print(\"Invalid: \" + str(it.mFaceID))\n tmpS = str(it.mFaceL) + ' ' + str(it.mFaceT) + ' ' \\\n + str(it.mFaceR) + ' ' + str(it.mFaceB)\n print(tmpS)\n\n for it in self.recgRectsV:\n print(\"Valid: \" + str(it.mFaceID))\n tmpS = str(it.mFaceL) + ' ' + str(it.mFaceT) + ' ' \\\n + str(it.mFaceR) + ' ' + str(it.mFaceB)\n print(tmpS)\n print(it.faceName)\n print(str(it.recgScore))\n return\n\ndef getRectInfo(fOriInfo, fRectsMap):\n tmpI = int(fOriInfo.split(' ')[4])\n tmpR = FaceRects(tmpI)\n tmpR.mFaceL = int(fOriInfo.split()[5])\n tmpR.mFaceT = int(fOriInfo.split()[6])\n tmpR.mFaceR = int(fOriInfo.split()[7])\n tmpR.mFaceB = int(fOriInfo.split()[8])\n\n idx = fOriInfo.split(\"|D||frameID: \")[1].split(\" status: \")[0]\n if idx not in fRectsMap.keys():\n fRectsMap[idx] = FrameRects(idx)\n return tmpR, idx\n\ndef saveAssist(fStoreDst, fReplace, fNewOne):\n storeOne = fStoreDst.replace(fReplace, fNewOne)\n storeRoute = os.path.split(storeOne)\n if not os.path.exists(storeRoute[0]):\n os.makedirs(storeRoute[0])\n return storeOne\n\ndef saveRects(fImg, fDst, fDate, fReplace):\n for rects in fDate.detRects:\n cv2.putText(fImg, str(round(100 * rects.detScore, 2)), \\\n (rects.mFaceL + 3, rects.mFaceT + 13), cv2.FONT_ITALIC, 0.5, \\\n (255, 255, 255), thickness = 1)\n cv2.rectangle(fImg, (rects.mFaceL, rects.mFaceT), \\\n (rects.mFaceR, rects.mFaceB), (255, 255, 255), 2)\n faceRoi = fImg[rects.mFaceT:rects.mFaceB, rects.mFaceL:rects.mFaceR]\n tmpStore = fDst.replace(\".jpg\", \"_D.jpg\")\n cv2.rectangle(faceRoi, (0, 0), (faceRoi.shape[1], faceRoi.shape[0]), \\\n (255, 255, 255), 2)\n cv2.imwrite(tmpStore, faceRoi, cvImgSaver)\n storeRect = saveAssist(tmpStore, fReplace, rectsFolder)\n cv2.imwrite(storeRect, faceRoi, cvImgSaver)\n\n for rects in fDate.recgRectsC:\n cv2.putText(fImg, str(rects.recgScore), \\\n (rects.mFaceL, rects.mFaceT - 10), cv2.FONT_ITALIC, 0.5, \\\n (255, 0, 0), thickness = 1)\n cv2.rectangle(fImg, (rects.mFaceL, rects.mFaceT), \\\n (rects.mFaceR, rects.mFaceB), (0, 255, 0), 2)\n faceRoi = fImg[rects.mFaceT:rects.mFaceB, rects.mFaceL:rects.mFaceR]\n tmpStore = fDst.replace(\".jpg\", \"_R.jpg\")\n cv2.rectangle(faceRoi, (0, 0), (faceRoi.shape[1], faceRoi.shape[0]), \\\n (0, 255, 0), 2)\n cv2.imwrite(tmpStore, faceRoi, cvImgSaver)\n storeRect = saveAssist(tmpStore, fReplace, rectsFolder)\n cv2.imwrite(storeRect, faceRoi, cvImgSaver)\n\n for rects in fDate.recgRectsE:\n cv2.putText(fImg, str(rects.recgScore), \\\n (rects.mFaceL, rects.mFaceT - 10), cv2.FONT_ITALIC, 0.5, \\\n (255, 0, 0), thickness = 1)\n cv2.rectangle(fImg, (rects.mFaceL, rects.mFaceT), \\\n (rects.mFaceR, rects.mFaceB), (0, 0, 255), 2)\n faceRoi = fImg[rects.mFaceT:rects.mFaceB, rects.mFaceL:rects.mFaceR]\n tmpStore = fDst.replace(\".jpg\", \"_E.jpg\")\n cv2.rectangle(faceRoi, (0, 0), (faceRoi.shape[1], faceRoi.shape[0]), \\\n (0, 0, 255), 2)\n cv2.imwrite(tmpStore, faceRoi, cvImgSaver)\n storeRect = saveAssist(tmpStore, fReplace, rectsFolder)\n cv2.imwrite(storeRect, faceRoi, cvImgSaver)\n\n for rects in fDate.recgRectsIV:\n cv2.rectangle(fImg, (rects.mFaceL, rects.mFaceT), \\\n (rects.mFaceR, rects.mFaceB), (0, 255, 255), 2)\n faceRoi = fImg[rects.mFaceT:rects.mFaceB, rects.mFaceL:rects.mFaceR]\n tmpStore = fDst.replace(\".jpg\", \"_I.jpg\")\n cv2.rectangle(faceRoi, (0, 0), (faceRoi.shape[1], faceRoi.shape[0]), \\\n (0, 255, 255), 2)\n cv2.imwrite(tmpStore, faceRoi, cvImgSaver)\n storeRect = saveAssist(tmpStore, fReplace, rectsFolder)\n cv2.imwrite(storeRect, faceRoi, cvImgSaver)\n\n return fImg\n\ndef concurrentJob(fTmpS, fOriData, fObj):\n fTmpS = fTmpS.replace(suffix, \".jpg\")\n store = saveAssist(fTmpS, dataFolder, dstImgRoot)\n storeWhole = saveAssist(store, level3Folder, fullImgFolder)\n if not os.path.exists(store):\n imgYuv = np.fromstring(fOriData, dtype = np.uint8)\n imgYuv = np.reshape(imgYuv, (-1, cols))\n imgBgr = cv2.cvtColor(imgYuv, cv2.COLOR_YUV2BGR_NV21)\n else:\n imgBgr = cv2.imread(store, cv2.IMREAD_COLOR)\n\n saveRects(imgBgr, store, fObj, level3Folder)\n cv2.imwrite(store, imgBgr, cvImgSaver)\n cv2.imwrite(storeWhole, imgBgr, cvImgSaver)\n\n### Params region\nlogFolder = \"/home/devin/Desktop/TestResults/\"\ndataFolder = \"/media/devin/OpenImage600/face3/\"\nlevel3Folder = \"cameraData\"\nrectsFolder = \"Rects\"\nfullImgFolder = \"FullImage\"\nnameMap = {\"yanchangjian\" : \"颜长建\", \"guangming\" : \"广明\", \"yukeke\" : \"珂珂\"}\nobjs = [\"yanchangjian\", \"guangming\", \"yukeke\"]\n\ncols = 1280\nrows = 720\ndstImgRoot = \"/home/devin/Desktop/tmpPng/\"\nsuffix = \".nv21\"\nfileBytes = cols * rows * 3 / 2\ncvImgSaver = [int(cv2.IMWRITE_JPEG_QUALITY), 90]\n\n### Job region\nif __name__ == '__main__':\n futuresList = []\n executor = ProcessPoolExecutor(max_workers = 3)\n for obj in objs:\n oriData = None\n tmpList = []\n rectsMap = {}\n dataSet = dataFolder + obj + \"/\" + level3Folder + \"/\"\n logSet = logFolder + obj + \"/\"\n for rt, dirs, files in os.walk(logSet):\n for name in files:\n # print(name)\n with open(os.path.join(rt, name), 'r') as f:\n for line in f.readlines():\n if -1 != line.find(\"status: detectInfo\") \\\n or -1 != line.find(\"status: recgInvalidRoi\") \\\n or -1 != line.find(\"status: recgValidRoi\") \\\n or -1 != line.find(\"file: \"):\n tmpList.append(line)\n\n for line in tmpList:\n if -1 != line.find(\"status: detectInfo\"):\n tmpR, idx = getRectInfo(line, rectsMap)\n tmpR.detScore = float(line.split()[9])\n rectsMap.get(idx).detRects.append(tmpR)\n\n if -1 != line.find(\"status: recgInvalidRoi\"):\n tmpR, idx = getRectInfo(line, rectsMap)\n rectsMap.get(idx).recgRectsIV.append(tmpR)\n\n if -1 != line.find(\"status: recgValidRoi\"):\n tmpR, idx = getRectInfo(line, rectsMap)\n tmpR.faceName = line.split()[10]\n tmpR.recgScore = float(line.split()[12])\n\n if -1 != tmpR.faceName.find(nameMap.get(obj)):\n rectsMap.get(idx).recgRectsC.append(tmpR)\n else:\n rectsMap.get(idx).recgRectsE.append(tmpR)\n\n for it in rectsMap.keys():\n for line in tmpList:\n idx = line.split(\"|D||frameID: \")[1].split(\" file: \")[0]\n if -1 != line.find(\"file: \") and idx == it:\n tmpS = line.split(\"/\")[3] + '/'\n tmpS += line.split(\"/\")[4].split(\" status:\")[0]\n # print(tmpS)\n rectsMap.get(it).mFile = tmpS\n\n # for it in rectsMap.keys():\n # rectsMap.get(it).printSelf()\n print(obj + \" Record frames Num: \" + str(len(rectsMap)))\n\n if os.path.exists(dataSet):\n for it in rectsMap.keys():\n tmpS = dataSet + rectsMap.get(it).mFile\n objX = rectsMap.get(it)\n if not os.path.exists(tmpS):\n print(\"no file: \" + tmpS)\n continue\n else:\n with open(tmpS, 'rb') as fr:\n oriData = fr.read()\n if not len(oriData) == fileBytes:\n print(\"Size mismatch: \" + tmpS)\n continue\n\n future = executor.submit(concurrentJob, tmpS, oriData, objX)\n futuresList.append(future)\n else:\n print(\"No Source!!!\")\n sys.exit(0)\n wait(futuresList)\n\nprint(os.linesep)\nprint(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))","sub_path":"jobTool/AnalysisRect.py","file_name":"AnalysisRect.py","file_ext":"py","file_size_in_byte":9444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"318706548","text":"import datetime\r\nimport os\r\nimport subprocess\r\nfrom tempfile import NamedTemporaryFile\r\nfrom typing import Tuple\r\n\r\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.AbstractKey import AbstractKey\r\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.RawCertificate import RawCertificate\r\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.RawKeyRevocationList import RawKeyRevocationList\r\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.RawPrivateKey import RawPrivateKey\r\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.RawPublicCAKey import RawPublicCAKey\r\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.RawPublicKey import RawPublicKey\r\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.Signer.DatetimeNow import correct_datetime, datetime_now\r\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.Signer.Errors import SignError\r\nfrom .SignerInterface import SignerInterface\r\n\r\n\r\nclass SshKeygenSignerBackend(SignerInterface):\r\n TIMEFORMAT = '%Y%m%d%H%M%S'\r\n\r\n @classmethod\r\n def makePublicPrivateKeyPair(cls):\r\n # type: () -> Tuple[RawPublicKey, RawPrivateKey]\r\n with NamedTemporaryFile(mode='r') as privatekey_file:\r\n # print(\"created tmpfile @\", privatekey_file.name)\r\n privatekey_filename = os.path.basename(privatekey_file.name)\r\n tmpdir = os.path.dirname(privatekey_file.name)\r\n # ssh-keygen -b 4096 -t rsa -f ~/.ssh/id_rsa\r\n # ssh-keygen -b 4096 -t rsa -f example-com-ca -C \"CA key for example.com\"\r\n yes_proc = subprocess.Popen(['echo', 'y'], stdout=subprocess.PIPE)\r\n result = subprocess.run(['ssh-keygen', '-b', '4096', '-t', 'rsa', '-N', '', '-f', privatekey_file.name],\r\n shell=False,\r\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=yes_proc.stdout)\r\n yes_proc.wait()\r\n yes_proc.stdout.close()\r\n if result.returncode != 0:\r\n raise SignError(result.stderr.decode('utf-8') if result.stderr else \"ssh-keygen failed unexpectedly\")\r\n publickey_filename = os.path.join(tmpdir, privatekey_filename + '.pub')\r\n if not os.path.exists(publickey_filename):\r\n raise SignError(\"file not found \" + publickey_filename)\r\n with open(publickey_filename, 'r') as f:\r\n publickey = RawPublicKey(f.read())\r\n privatekey = RawPrivateKey(privatekey_file.file.read())\r\n os.remove(publickey_filename)\r\n return publickey, privatekey\r\n\r\n @classmethod\r\n def generatePublicPrivateCAKey(cls, hostname):\r\n with NamedTemporaryFile(mode='r') as privatekey_file:\r\n # print(\"created tmpfile @\", privatekey_file.name)\r\n privatekey_filename = os.path.basename(privatekey_file.name)\r\n # print(privatekey_filename)\r\n tmpdir = os.path.dirname(privatekey_file.name)\r\n # print(tmpdir)\r\n # ssh-keygen -b 4096 -t rsa -f example-com-ca -C \"CA key for example.com\"\r\n comment = hostname\r\n comment = comment.replace(' ', '')\r\n yes_proc = subprocess.Popen(['echo', 'y'], stdout=subprocess.PIPE)\r\n result = subprocess.run(['ssh-keygen', '-b', '4096', '-t', 'rsa', '-f', privatekey_file.name, '-C',\r\n comment, '-N', ''], shell=False,\r\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=yes_proc.stdout)\r\n # fixes a bug where a warning gets printed, that a subprocess (popen) is still running\r\n yes_proc.wait()\r\n yes_proc.stdout.close()\r\n assert yes_proc.returncode is not None\r\n\r\n if result.returncode != 0:\r\n raise SignError(result.stderr.decode('utf-8') if result.stderr else \"ssh-keygen failed unexpectedly\")\r\n publickey_filename = os.path.join(tmpdir, privatekey_filename + '.pub')\r\n if not os.path.exists(publickey_filename):\r\n raise SignError(\"file not found \" + publickey_filename)\r\n with open(publickey_filename, 'r') as f:\r\n publickey = RawPublicCAKey(f.read())\r\n privatekey = RawPrivateKey(privatekey_file.file.read())\r\n os.remove(publickey_filename)\r\n return publickey, privatekey\r\n\r\n @classmethod\r\n def signPublicKeyWithPrivateKeyForUserForProject(cls, publickey, privatekey, principals, key_id, serial,\r\n duration=datetime.timedelta(days=90),\r\n critical_options=None,\r\n extensions=None):\r\n # print(\"singing\")\r\n # print(publickey.str_repr)\r\n # print(\"with\")\r\n # print(privatekey.str_repr)\r\n # print(\"for user\", username, \"group\", groupname, \"in\", projectname, \"@\", projectdomain, \"serial\", serial)\r\n with cls._abstract_key_to_tmp_file(publickey) as publickey_file:\r\n publickey_filename = os.path.basename(publickey_file.name)\r\n tmpdir = os.path.dirname(publickey_file.name)\r\n with cls._abstract_key_to_tmp_file(privatekey) as privatekey_file:\r\n # ssh-keygen -s example-com-ca -n user -V +52w -I example.com-user id_rsa.pub\r\n if type(duration) is datetime.timedelta:\r\n start = datetime_now() # - datetime.timedelta(days=1)\r\n end = start + duration\r\n elif (type(duration) in (tuple, list)\r\n and len(duration) == 2\r\n and type(duration[0]) is datetime.datetime\r\n and type(duration[1]) is datetime.datetime):\r\n start, end = duration\r\n start = correct_datetime(start) # - datetime.timedelta(days=1)\r\n end = correct_datetime(end) # + datetime.timedelta(days=1)\r\n # start = start-datetime.timedelta(days=1)\r\n else:\r\n raise TypeError(repr(duration))\r\n delta = start.strftime(cls.TIMEFORMAT) + ':' + end.strftime(cls.TIMEFORMAT)\r\n args = ['ssh-keygen',\r\n '-s', privatekey_file.name,\r\n '-n', ','.join(principals),\r\n '-V', delta,\r\n '-z', str(serial),\r\n '-I', key_id]\r\n args.extend(['-O', 'clear'])\r\n if critical_options:\r\n for n, v in critical_options.items():\r\n if v:\r\n args.extend(['-O', 'critical:' + str(n) + '=' + str(v)])\r\n else:\r\n args.extend(['-O', 'critical:' + str(n)])\r\n if extensions:\r\n for n, v in extensions.items():\r\n if v:\r\n args.extend(['-O', 'extension:' + str(n) + '=' + str(v)])\r\n else:\r\n args.extend(['-O', 'extension:' + str(n)])\r\n args.append(publickey_file.name)\r\n result = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n # Todo: pipe stderr to null\r\n if result.returncode != 0:\r\n # print(privatekey_file.name, open(privatekey_file.name).read())\r\n # print(publickey_file.name, open(publickey_file.name).read())\r\n raise SignError(\r\n result.stderr.decode('utf-8') if result.stderr else \"ssh-keygen failed unexpectedly\")\r\n cert_file = os.path.join(tmpdir, publickey_filename + '-cert.pub')\r\n if not os.path.exists(cert_file):\r\n raise SignError(\"file not found \" + cert_file)\r\n with open(cert_file, 'r') as f:\r\n cert_content = f.read()\r\n os.remove(cert_file)\r\n # print(\"cert is\")\r\n # print(cert_content)\r\n return RawCertificate(cert_content, key_id, serial, start, end)\r\n\r\n @classmethod\r\n def generateKRL(cls, serial_list, key_id_list, publiccakey):\r\n # if not serial_list and not key_id_list:\r\n # return RawKeyRevocationList(b'', [])\r\n with NamedTemporaryFile('w+b') as ids_to_revoke, NamedTemporaryFile(\r\n 'rb') as revoked_keys, cls._abstract_key_to_tmp_file(publiccakey) as publicca_key:\r\n if serial_list:\r\n for serial in serial_list:\r\n ids_to_revoke.file.write(str(\"serial: \" + str(serial) + os.linesep).encode('utf-8'))\r\n if key_id_list:\r\n for key_id in key_id_list:\r\n ids_to_revoke.file.write(str(\"id: \" + str(key_id) + os.linesep).encode('utf-8'))\r\n ids_to_revoke.file.flush()\r\n\r\n result = subprocess.run(['ssh-keygen',\r\n '-k',\r\n '-f', revoked_keys.name,\r\n '-s', publicca_key.name,\r\n ids_to_revoke.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n if result.returncode != 0:\r\n # print(ids_to_revoke.name)\r\n # print(revoked_keys.name)\r\n # print(publicca_key.name)\r\n raise SignError(\r\n result.stderr.decode('utf-8') if result.stderr else \"ssh-keygen failed unexpectedly\")\r\n return RawKeyRevocationList(revoked_keys.file.read(), serial_list)\r\n\r\n @classmethod\r\n def _abstract_key_to_tmp_file(cls, abstract_key):\r\n # type: (AbstractKey) -> NamedTemporaryFile\r\n tmp_file = NamedTemporaryFile(mode='w')\r\n tmp_file.file.write(abstract_key.str_repr)\r\n tmp_file.file.flush()\r\n tmp_file.file.seek(0) # for reading, return to 0 index\r\n return tmp_file\r\n","sub_path":"KeyManagementToolLib/Helpers/Crypto/Primitives/Signer/SshKeygenSignerBackend.py","file_name":"SshKeygenSignerBackend.py","file_ext":"py","file_size_in_byte":10022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"108314160","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'KonSona'\nSITENAME = u'The Sorcerer on the Hill'\nSITEURL = 'http://www.hillsorcerer.com'\n\nPATH = 'content'\n\nTIMEZONE = 'America/Los_Angeles'\n\nDEFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nTYPOGRIFY = True\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('You can add links in your config file', '#'),\n ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = False\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\nTHEME = 'sorcerous-elegant'\n\nDIRECT_TEMPLATES = (('index', 'tags', 'categories','archives', 'search', '404'))\n\nSTATIC_PATHS = ['downloads',\n 'downloads/magick',\n 'images',\n 'extra/robots.txt',\n 'extra/favicon.ico',\n 'extra/CNAME',\n 'extra/google77a40defb75ab765.ht']\n\nEXTRA_PATH_METADATA = {\n 'extra/google77a40defb75ab765.ht': {'path': 'google77a40defb75ab765.html'},\n 'extra/robots.txt': {'path': 'robots.txt'},\n 'extra/favicon.ico': {'path': 'favicon.ico'},\n 'extra/CNAME': {'path': 'CNAME'}\n}\n\n#import mdx_pullquote\n#pullquote = mdx_pullquote.makeExtension()\n\nPAGE_EXCLUDES = ['downloads/runes/magick', 'extra/google77a40defb75ab765.html']\nARTICLE_EXCLUDES = PAGE_EXCLUDES\n\nMD_EXTENSIONS = ['pullquote',\n 'extra',\n 'tables',\n 'iconfonts(prefix=magick-)'\n]\n\nPLUGINS = ['tipue_search',\n 'sitemap',\n 'summary',\n 'neighbors']\n\nSITE_DESCRIPTION = \"A blog about sorcery, magic, religion, rituals, poetry and anything else related that catches my eye.\"\n\nDEFAULT_DATE_FORMAT = \"%d %b %Y\"\n\nSUMMARY_MAX_LENGTH = 20\n\nARTICLE_URL = '{category}/{slug}.html'\nARTICLE_SAVE_AS = '{category}/{slug}.html'\n\nPROJECTS = [\n {\n 'name' : \"Astrology\",\n 'url' : '/astrology/',\n 'description' : 'Reference and mnemonics'\n },\n {\n 'name' : \"Geomancy\",\n \"url\" : \"/geomancy/\",\n \"description\" : \"Geomancy Reference\"\n },\n {\n 'name': 'Runes',\n 'url': '/runes/',\n 'description': 'Resources, including desktop font, web font, and meanings'\n }]\n\nLANDING_PAGE_ABOUT = {\n 'title': 'Sorcery, Magic, Spirit Work and Such.',\n 'details': 'Sorcerer, Husband, Father, Programmer, Priest.'\n }\n\nSITEMAP = {'format': 'xml'}\n\nCOMMENTS_INTRO = 'Please, let me know what you think. (Click \"comments\" below to open.)'\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"480432173","text":"\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy\nimport cv2\nimport base64\nfrom flask import Flask, url_for\nfrom flask import jsonify\nimport time\n\napp = Flask(__name__)\n\n\ndef batchrequest(image_path='../images/'):\n image_batches = dict()\n # mypath = '/home/cpchung/recent-learning/python/flask'\n mypath = image_path\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n images = numpy.empty(len(onlyfiles), dtype=object)\n for i in range(0, len(onlyfiles)):\n\n images[i] = cv2.imread(join(mypath, onlyfiles[i]))\n\n l, w, c = images[i].shape\n # create a key as a UNIX timestamp w/ array shape appended to end of key delimited by '|'\n array_dtype = str(images[i].dtype)\n key = '{0}|{1}#{2}#{3}'.format(int(time.time()), array_dtype, l, w)\n\n val = base64.b64encode(images[i])\n image_batches[key] = val.decode('utf-8')\n\n # cv2.imshow('frame', images[n])\n # cv2.waitKey(0)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n # When everything done, release the capture\n # cv2.destroyAllWindows()\n return image_batches\n\n\n@app.route('/facebatchrequest')\ndef api_face_batch_request():\n face_path = '../images/'\n js = batchrequest(face_path)\n # print (type(js))\n return jsonify(js)\n\n\n@app.route('/bodybatchrequest')\ndef api_body_batch_request():\n body_path = '../images/'\n return jsonify(batchrequest(body_path))\n\n\n@app.route('/articles')\ndef api_articles():\n return 'List of ' + url_for('api_articles')\n\n\n@app.route('/')\ndef api_root():\n # return 'Welcome'\n test = {}\n test[1] = 2\n return jsonify(test)\n\n\n# @app.route('/articles/')\n# def api_article(articleid):\n# return 'You are reading ' + articleid\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","sub_path":"python/compact/examples/detection_producer.py","file_name":"detection_producer.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"189526790","text":"from django.conf.urls import patterns, include, url\nfrom tune import views\n\n# note: \"(?P...)\" Creates a named capturing group.\n\nurlpatterns = patterns('',\n\n ## Homepage ##\n\n # e.g. /\n url(r'^$', views.index, name='index'),\n\n ## User routes ##\n\n # e.g. /login - routes to login page\n url(r'^login$', views.userLogin, name='userLogin'),\n # e.g. /logout - routes to logout page\n url(r'^logout$', views.userLogout, name='userLogout'),\n # e.g. /newUser - redirects to index after validation\n url(r'^validateUser$', views.validateUser, name='validateUser'),\n\n ## Search routes ##\n\n # e.g. /search?query=xyz\n url(r'^search', views.search, name='search'),\n # e.g. /detail/'href_stuff'\n url(r'^detail/(?P.+)$', views.detail, name='detail'),\n\n ## Track manipulation routes ##\n\n # e.g. /addTrack - redirect to index after addition\n url(r'^addTracks$', views.addTracks, name='addTracks'),\n\n ## CRUD routes ##\n\n # e.g. /new/\n url(r'^new$', views.new, name='new'),\n # e.g. /create - redirects to 'show' view after creation\n url(r'^create$', views.create, name='create'),\n # e.g. /show/2\n url(r'^show/(?P\\d+)$', views.show, name='show'),\n # e.g. show//5/edit/\n url(r'^show/(?P\\d+)/edit$', views.update, name='update'),\n # e.g. show/5/delete - redirects to index after deletion\n url(r'^show/(?P\\d+)/delete$', views.delete, name='delete'),\n)\n\n","sub_path":"tune/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"253445036","text":"\n\nfrom xai.brain.wordbase.verbs._codify import _CODIFY\n\n#calss header\nclass _CODIFIED(_CODIFY, ):\n\tdef __init__(self,): \n\t\t_CODIFY.__init__(self)\n\t\tself.name = \"CODIFIED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"codify\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_codified.py","file_name":"_codified.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"126458974","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Importing libraries\n\nimport numpy as np\nfrom numpy import newaxis\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport streamlit as sl\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM, Dropout\nimport streamlit.components.v1 as components\nfrom PIL import Image\n\n\n# # Loading and splitting the dataset\nsl.set_page_config(layout=\"wide\")\nimg = Image.open(\"ENIGMA.png\")\nsl.image(img)\n\nsl.write('''\n # Introduction\n\n #### What Is Diversification? \n Diversification is a risk management strategy that mixes a wide variety of investments within a portfolio. A diversified portfolio contains a mix of distinct asset types and investment vehicles in an attempt at limiting exposure to any single asset or risk. The rationale behind this technique is that a portfolio constructed of different kinds of assets will, on average, yield higher long-term returns and lower the risk of any individual holding or security.\n\n''')\n\ncol1,col2 = sl.columns(2)\n\ncol1.header(\"Past Performance\")\nimg2 = Image.open(\"chartcomb.jpeg\")\ncol1.image(img2, use_column_width=True)\n\n\ncol2.header(\"How we chose our data\")\ncol2.write('''\nwe have choosen 15 stocks 3 from different sectors ranging automobile, banking, metals, medical, and technology.\n\nwe plan to optimize by diversyfying our portfolio so as to decrease the risk and that we do by choosing the maximum returns calculated by lstm model and which are highly correlated to their competitors.\nlets understand this by taking a hypothetical example.\nI have to invest money in 2 stocks A and B, both tech giants and i find stock A gives 20 percent higher returns compared to B. But i need to reduce my risk because they are highly correlated so if one stock drops there is a higher probability that the other stock drops too, therefore i choose to invest in A and another stock which is less likely correlated to A to reduce my risk.\n''')\n\n\n\n\nsl.write(''' # Predicting Stock prices''')\n\ncol3,col4 = sl.columns(2)\ncol3.header('LSTM Output')\ncol3.write('''\nwe have choosen 15 stocks 3 from different sectors ranging automobile, banking, metals, medical, and technology.\n\nwe plan to optimize by diversyfying our portfolio so as to decrease the risk and that we do by choosing the maximum returns calculated by lstm model and which are highly correlated to their competitors.\nlets understand this by taking a hypothetical example.\nI have to invest money in 2 stocks A and B, both tech giants and i find stock A gives 20 percent higher returns compared to B. But i need to reduce my risk because they are highly correlated so if one stock drops there is a higher probability that the other stock drops too, therefore i choose to invest in A and another stock which is less likely correlated to A to reduce my risk.\n''')\n\ncol4.header(\"\")\nimg3 = Image.open(\"lstmoutput.jpeg\")\ncol4.image(img3, use_column_width=True)\n\n#################\n\ncol5,col6 = sl.columns((1,2))\ncol5.header('Comparing different models')\ncol5.write('''\nWe are using LSTM to predict the stock prices as it outperforms both, Arima and seq2seq models with the least Mean Absolute Error.\n''')\n\ncol6.header(\"\")\nimg4 = Image.open(\"lstmcompare.png\")\ncol6.image(img4, use_column_width=True)\n\n\n\n#Loading the data\ndata = pd.read_excel(r'Combined_Stocks.xlsx', date_parser = True)\ndata=data.dropna()\n# Taking data from 2000 to 2020 as training set\ndata_training = data[data['Date']<'01-01-2020'].copy()\n#0-3702\n\n#test data\ndata_test = data[data['Date']>='01-01-2020'].copy()\n\n# Data preprocessing of training data\ntraining_data = data_training.drop(['Date'], axis=1)\ntesting_data = data_test.drop(['Date'], axis=1)\n\n\n\n\n\n\nfrom keras.callbacks import EarlyStopping\nearlyStop=EarlyStopping(monitor=\"val_loss\",verbose=2,mode='min',patience=3)\n\n\n# # Using LSTM to predict stock prices\n\n\n# for i in range(0,len(training_data.columns)):\n# stock = training_data[training_data.columns[i]]\n# stocks_train = stock.to_frame()\n# stock_test = testing_data[testing_data.columns[i]]\n# stocks_test = stock_test.to_frame()\n \n# scaler = MinMaxScaler()\n# stocks = scaler.fit_transform(stocks_train)\n \n \n# X_train = []\n# y_train = []\n# for j in range(200, stocks.shape[0]):\n# X_train.append(stocks[j-200:j]) \n# y_train.append(stocks[j,0])\n \n# X_train, y_train = np.array(X_train), np.array(y_train)\n\n# past_200_days = stocks_train.tail(200)\n# df = past_200_days.append(stocks_test, ignore_index=True)\n \n# inputs = scaler.transform(df)\n\n# X_test = []\n# y_test = []\n \n# for j in range(200, inputs.shape[0]):\n# X_test.append(inputs[j-200:j])\n# y_test.append(inputs[j,0])\n \n# X_test, y_test = np.array(X_test), np.array(y_test)\n \n# model = Sequential()\n# model.add(LSTM(units = 60, activation = 'relu', return_sequences = True, input_shape = (X_train.shape[1],1)))\n# model.add(Dropout(0.2))\n\n# model.add(LSTM(units = 60, activation = 'relu', return_sequences = True))\n# model.add(Dropout(0.2))\n\n# model.add(LSTM(units = 80, activation = 'relu', return_sequences = True))\n# model.add(Dropout(0.2))\n\n# model.add(LSTM(units = 120, activation = 'relu'))\n# model.add(Dropout(0.2))\n\n# model.add(Dense(units = 1))\n# #model.summary()\n\n# model.compile(optimizer='adam', loss = 'mse', metrics = ['accuracy'])\n \n# history = model.fit(X_train, y_train, validation_data = (X_test, y_test), epochs=1, batch_size=32, verbose=2, callbacks=[earlyStop])\n\n \n# y_pred = model.predict(X_test)\n \n# scale = 1/scaler.scale_[0]\n \n# y_pred = y_pred*scale\n# y_test = y_test*scale\n \n# plt.figure(figsize=(14,5))\n# plt.plot(y_test, color = 'red')\n# plt.xlabel(data.columns[i+1])\n# plt.ylabel(\"values\")\n# plt.plot(y_pred, color = 'blue')\n\n# sl.pyplot(plt)\n\n \n# first=np.mean(y_pred[150:180])\n# last=np.mean(y_pred[373:403])\n# percentage=(last-first)/first*100\n \n# print(\"stock name= \"+ data.columns[i+1] )\n\n# print(percentage)\n\n# print(\"\")\n\n# if(i==0):\n# break\n\n\n\nsl.write(''' # Analysing the Predictions''')\nsl.write('''## Pearson Correlation - ''')\nsl.write(data.corr(method='pearson'))\n\n\n\n\n\nsl.write(''' # Asset Allocation ''')\nsl.write(''' ## Efficient Frontier - ''')\n\ncol7,col8 = sl.columns((2,2))\ncol7.header(\"\")\nimg3 = Image.open(\"efchart.png\")\ncol7.image(img3, use_column_width=True)\n\n\nimg5 = Image.open(\"efoutput.png\")\ncol8.image(img5, use_column_width=True)\n\n\n\n\n\nsl.write(''' # Results ''')\nimport ffn\nprices = ffn.get('sbin.ns,ICICIBANK.NS,HEROMOTOCO.NS,wipro.ns,jswsteel.ns,SUNPHARMA.NS,tatasteel.ns', start='2005-01-01')\n\n\n#selected top stocks from that group on the basis of annual return if they were highly correlated\n#WE WILL USE RETURNS FROM LSTM AND CORRELATION TO FIND STOCKS TO SELECT IN PORTFOLIO\n#plotting stock performace since 2015\nax = prices.rebase().plot()\n\n\n\n\nnp.seterr(all='ignore')\nstats = prices.calc_stats()\n\n\n\n\n\n# We put random weight in starting which will be optimized further\nweights = np.asarray([0.2,0.2,0.1,0.1,0.1,0.1,0.2])\n\nreturns = prices.pct_change()\n \n# mean daily return and covariance of daily returns\nmean_daily_returns = returns.mean()\ncov_matrix = returns.cov()\n\n# portfolio return and volatility\npf_return = round(np.sum(mean_daily_returns * weights) * 252, 3)\npf_std_dev = round(np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) * np.sqrt(252), 3)\n\n\n\n\n\n\nprint(\"Volatility: \" + \"{:.1%}\".format(pf_std_dev))\n\nsl.write(\"Expected annualized return: \" + \"{:.1%}\".format(pf_return))\nsl.write(\"Volatility: \" + \"{:.1%}\".format(pf_std_dev))\n\n\nfrom pypfopt import expected_returns\nfrom pypfopt import risk_models\nfrom pypfopt import discrete_allocation\nfrom pypfopt.cla import CLA\n\nfrom pypfopt.efficient_frontier import EfficientFrontier\n\nimport matplotlib\nfrom matplotlib.ticker import FuncFormatter\n\nexp_returns = expected_returns.mean_historical_return(prices)\ncovar = risk_models.sample_cov(prices)\n\n#have to use sortino here, hopefully that gives us better returns\n\n# Optimise portfolio for maximum Sharpe Ratio\nef = EfficientFrontier(exp_returns, covar)\nraw_weights = ef.max_sharpe()\npf = ef.clean_weights()\nprint(pf)\n\nperf = ef.portfolio_performance(verbose=True)\n\n\n\nef = EfficientFrontier(exp_returns, covar, weight_bounds=(-1, 1))\npf = ef.efficient_return(target_return=perf[0])\nprint(pf)\nsl.write(pf)\nsl.write(\"***\")\nperf = ef.portfolio_performance(verbose=True)\n\n\n\n","sub_path":"nus.py","file_name":"nus.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"488471361","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nclass Automaton(object):\n transitions = {}\n initialState = None\n finalStates = set()\n\n def __init__(self, path=None):\n if path != None:\n file = open(path, 'r')\n lines = file.readlines()\n self.setInitialState(lines[0].replace('\\n', ''))\n del lines[0]\n for elm in lines[-1].replace('\\n', '').split(' '):\n self.setFinalState(elm)\n del lines[-1]\n for line in lines:\n line = line.replace('\\n', '').split('->')\n initialState = line[0]\n transition = line[1].split(',')[0]\n resultState = line[1].split(',')[1].replace(' ','')\n self.setTransition(initialState, transition, resultState)\n\n def __str__(self):\n return \"Transições: {}\\nEstado Inicial: {}\\nEstados Finais: {}\".format(self.transitions,self.initialState,self.finalStates)\n\n def setTransition(self, initialState, transition, resultState):\n if initialState not in self.transitions:\n self.transitions[initialState] = {transition:resultState}\n else:\n self.transitions[initialState][transition] = resultState\n\n def setFinalState(self, finalState):\n self.finalStates.add(finalState)\n\n def setInitialState(self, state):\n self.initialState = state\n\n def isFinalState(self, state):\n \"\"\"Verifica se o estado informado é estado final\"\"\"\n return True if state in self.finalStates else False\n\n def isDFA(self):\n for transition in self.transitions[self.initialState]:\n pass\n\n def hasUnrechableState(self):\n \"\"\"Verifica se existe algum estado inacessível\"\"\"\n s = set()\n for state in self.transitions:\n for transition in self.transitions[state]:\n if self.transitions[state][transition] not in s:\n s.add(self.transitions[state][transition])\n return True if s < set(self.transitions) else False\n\n\n\nif __name__ == '__main__':\n a1 = Automaton(path='autom.txt')\n print(a1)\n \n #Teste\n","sub_path":"automaton-listas.py","file_name":"automaton-listas.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"551671797","text":"#!/usr/bin/python\n\nimport argparse\n\n\n# takes a list as input\ndef find_max_profit(prices):\n max_price = 0\n min_price = prices[0]\n for i in range(1, len(prices)):\n if prices[i] >= max_price:\n max_price = prices[i]\n for j in range(0, i):\n if prices[j] < min_price:\n min_price = prices[j]\n\n profit = max_price - min_price\n\n # return f\"Max price ${max_price}, min price ${min_price}, profit ${profit}\"\n return profit\n\n\nprint(find_max_profit([100, 55, 4, 98, 10, 18,\n 90, 95, 43, 11, 47, 67, 89, 42, 49, 79]))\n\n\nif __name__ == '__main__':\n # This is just some code to accept inputs from the command line\n parser = argparse.ArgumentParser(\n description='Find max profit from prices.')\n parser.add_argument('integers', metavar='N', type=int,\n nargs='+', help='an integer price')\n args = parser.parse_args()\n\n print(\"A profit of ${profit} can be made from the stock prices {prices}.\".format(\n profit=find_max_profit(args.integers), prices=args.integers))\n","sub_path":"stock_prices/stock_prices.py","file_name":"stock_prices.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"200589016","text":"#얼굴 영역 검출 및 영역 표시하기\n\nimport cv2\nimport sys\n\ncascade_file = \"haarcascade_frontalface_default.xml\"\ncascade = cv2.CascadeClassifier(cascade_file)\n\nimage_file = \"./data/face1.jpg\"\nimage = cv2.imread(image_file)\nimage_gs = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\nface_list = cascade.detectMultiScale(image_gs, scaleFactor=1.1, minNeighbors=1, minSize=(150, 150))\n\nif len(face_list) > 0:\n print(face_list)\n color = (0, 0, 255) # 사각형 색상\n for face in face_list:\n x, y, w, h = face\n cv2.rectangle(image, (x,y), (x+w, y+h), color, thickness=8)\n cv2.imwrite(\"facedetect-output2.png\",image)\nelse:\n print(\"no face\")","sub_path":"ch04(openCV)/facedetect/cv_face_ex01.py","file_name":"cv_face_ex01.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"649313019","text":"import numpy as np\nimport IPython\nfrom itertools import cycle, islice\nfrom noise import pnoise1\nfrom cairo_utils.dcel.constants import VertE, EdgeE, FaceE, SampleE\nfrom cairo_utils.constants import QUARTERPI, TWOPI\nfrom cairo_utils.dcel.Line import Line\nimport cairo_utils.dcel.sample_specs as sample_specs\nfrom cairo_utils.math import bezier1cp, bezier2cp, get_midpoint\nfrom cairo_utils import easings\nimport cairo_utils as utils\nfrom . import heightmap\nfrom .operatorTemplate import OperatorTemplate\nimport logging as root_logger\nlogging = root_logger.getLogger(__name__)\n\n\nAMNT = 10\n\nclass RoadOperator(OperatorTemplate):\n \"\"\" Operator to create a road, ignoring intersections\n Potential Growth Rules: (*: selected)\n Random *\n Growth\n Cul de Sac\n Hole Closing\n Island Connecting\n Terrain Following\n Shape Laying\n Wheel\n Face Exploration\n Edge exploration and return\n \"\"\"\n\n def __init__(self, minMaxWidth, minMaxLength, amnt=AMNT, hbal=0.5, hbounds=None):\n super().__init__()\n self.minMaxWidth = minMaxWidth\n self.minMaxLength = minMaxLength\n self.useCount = 0\n self.targetAmnt = amnt\n self.hbal = hbal\n self.hbounds = hbounds\n\n if self.hbounds is None:\n self.hbounds = [200,500]\n \n def operate(self, draw=True, override=False):\n \"\"\" Performs the operator, returns all changes as a list \"\"\"\n self.delta = []\n edges = []\n maxmin, ranges, mids = super().setup_values(self.dc.bbox)\n \n \n if np.random.random() > self.hbal:\n #Choose two random points and draw\n ps = np.random.random((2,2)) * ranges\n v1, v2 = [self.dc.newVertex(x) for x in ps]\n edges.append(self.dc.newEdge(v1, v2, edata={\"road\" : True,\n EdgeE.TEXT : True}))\n else:\n #or make a horizontal line\n rLen = self.hbounds[0] + (np.random.random() * (self.hbounds[1] - self.hbounds[0]))\n amnt = np.array([rLen,0])\n pos = (maxmin[:,0] + amnt) + (np.random.random((2,)) * (maxmin[:,1] - amnt))\n edges.append(self.dc.createEdge(pos, pos + amnt, edata={\"road\":True,EdgeE.TEXT: True}))\n \n \n self.delta += edges \n return self.delta\n \n def is_oneshot(self):\n if self.useCount < self.targetAmnt:\n return False\n return True\n\n def __exit__(self, type, value, traceback):\n super().__exit__(type,value,traceback)\n if type is None:\n self.useCount += 1\n\n \n def unwind(self):\n self.dc.purge(targets=self.delta)\n self.delta = []\n","sub_path":"citygen/roadOperator.py","file_name":"roadOperator.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"8548637","text":"# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved\n\"\"\"\nThis script is used to convert the GQA annotations to COCO format as expected by MDETR.\ndata_path : path to original GQA annotations to be downloaded from https://cs.stanford.edu/people/dorarad/gqa/download.html\nimg_path : path to original GQA images to be downloaded from https://cs.stanford.edu/people/dorarad/gqa/download.html\nsg_path : path to original GQA scene graphs to be downloaded from https://cs.stanford.edu/people/dorarad/gqa/download.html\nvg_img_data_path : path to image info for VG images to be downloaded from https://visualgenome.org/static/data/dataset/image_data.json.zip\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport pickle\nimport re\nfrom collections import defaultdict\nfrom pathlib import Path\nimport sys\nPACKAGE_PARENT = \"..\"\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\nfrom tqdm import tqdm\nfrom utils.spans import consolidate_spans\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Conversion script\")\n\n parser.add_argument(\n \"--data_path\",\n required=True,\n type=str,\n help=\"Path to the gqa dataset\",\n )\n parser.add_argument(\n \"--img_path\",\n required=True,\n type=str,\n help=\"Path to the gqa image dataset\",\n )\n parser.add_argument(\n \"--sg_path\",\n required=True,\n type=str,\n help=\"Path to the gqa dataset scene graph\",\n )\n\n parser.add_argument(\n \"--vg_img_data_path\",\n required=True,\n type=str,\n help=\"Path to image meta data for VG\"\n )\n\n parser.add_argument(\n \"--coco_path\",\n required=True,\n type=str,\n help=\"Path to coco 2014, 2015, 2017 dataset.\",\n )\n\n parser.add_argument(\n \"--out_path\",\n default=\"\",\n type=str,\n help=\"Path where to export the resulting dataset.\",\n )\n return parser.parse_args()\n\n\ndef convert(split, data_path, sg_path, output_path, imid2data, type, coco_path):\n\n if split == \"train\" and type == \"all\":\n data = {}\n for i in tqdm(range(10)):\n with open(data_path / f\"train_all_questions/train_all_questions_{i}.json\", \"r\") as f:\n data.update(json.load(f))\n print(len(data))\n else:\n with open(data_path / f\"{split}_{type}_questions.json\", \"r\") as f:\n data = json.load(f)\n\n if split in [\"train\", \"val\"]:\n with open(sg_path / f\"{split}_sceneGraphs.json\", \"r\") as f:\n sg_data = json.load(f)\n\n img2ann = defaultdict(dict)\n for k, v in data.items():\n img2ann[v[\"imageId\"]][k] = v\n\n if split in [\"train\", \"val\", \"testdev\"]:\n\n # Add missing annotations by inspecting the semantic field\n regexp = re.compile(r\"([0-9]+)\")\n regexp2 = re.compile(r\"([A-z]+)\")\n count = 0\n\n for k, v in img2ann.items():\n for ann_id, annotations in v.items():\n expected_boxes = []\n for item in annotations[\"semantic\"]:\n if item[\"operation\"] == \"select\":\n if len(regexp.findall(item[\"argument\"])) > 0:\n expected_boxes.append(\n (regexp2.findall(item[\"argument\"])[0].strip(), regexp.findall(item[\"argument\"])[0])\n )\n question_boxes = [v for k, v in annotations[\"annotations\"][\"question\"].items()]\n\n for name, box_id in expected_boxes:\n if box_id not in question_boxes:\n count += 1\n beg = annotations[\"question\"].find(name)\n end = beg + len(name)\n annotations[\"annotations\"][\"question\"][(beg, end)] = box_id\n\n # Add annotations for the questions where there is a box for the answer but not for the question (what/where/who questions)\n for k, v in img2ann.items():\n for ann_id, ann in v.items():\n question_objects = [vv for kk, vv in ann[\"annotations\"][\"question\"].items()]\n answer_objects = [vv for kk, vv in ann[\"annotations\"][\"answer\"].items()]\n if len(set(answer_objects) - set(question_objects)) > 0:\n\n for box_id in answer_objects:\n if box_id not in question_objects:\n\n if ann[\"question\"].find(\"What\") > -1:\n beg = ann[\"question\"].find(\"What\")\n end = beg + len(\"What\")\n elif ann[\"question\"].find(\"what\") > -1:\n beg = ann[\"question\"].find(\"what\")\n end = beg + len(\"what\")\n elif ann[\"question\"].find(\"Who\") > -1:\n beg = ann[\"question\"].find(\"Who\")\n end = beg + len(\"Who\")\n elif ann[\"question\"].find(\"who\") > -1:\n beg = ann[\"question\"].find(\"who\")\n end = beg + len(\"who\")\n elif ann[\"question\"].find(\"Where\") > -1:\n beg = ann[\"question\"].find(\"Where\")\n end = beg + len(\"Where\")\n elif ann[\"question\"].find(\"where\") > -1:\n beg = ann[\"question\"].find(\"where\")\n end = beg + len(\"where\")\n else:\n continue\n\n ann[\"annotations\"][\"question\"][(beg, end)] = box_id\n\n print(f\"Dumping {split}...\")\n next_img_id = 0\n next_id = 0\n\n annotations = []\n images = []\n\n d_name = \"gqa\"\n\n if split in [\"testdev\", \"test\", \"challenge\", \"submission\"]:\n with open(f\"{coco_path}/annotations/image_info_test2015.json\", \"r\") as f:\n iminfo = json.load(f)\n imid2data = {x[\"id\"]: x for x in iminfo[\"images\"]}\n\n for k, v in tqdm(img2ann.items()):\n\n for ann_id, annotation in v.items():\n question = annotation[\"question\"]\n questionId = ann_id\n\n filename = f\"{k}.jpg\"\n if split in [\"submission\"]:\n cur_img = {\n \"file_name\": filename,\n \"height\": 400,\n \"width\": 800,\n \"id\": next_img_id,\n \"original_id\": k,\n \"caption\": question,\n \"tokens_negative\": [(0, len(question))],\n \"dataset_name\": d_name,\n \"question_type\": None,\n \"answer\": None,\n \"questionId\": questionId,\n }\n\n elif split in [\"test\", \"challenge\", \"submission\"]:\n cur_img = {\n \"file_name\": filename,\n \"height\": imid2data[int(k.strip(\"n\"))][\"height\"],\n \"width\": imid2data[int(k.strip(\"n\"))][\"width\"],\n \"id\": next_img_id,\n \"original_id\": k,\n \"caption\": question,\n \"tokens_negative\": [(0, len(question))],\n \"dataset_name\": d_name,\n \"question_type\": None,\n \"answer\": None,\n \"questionId\": questionId,\n }\n\n elif split == \"testdev\":\n cur_img = {\n \"file_name\": filename,\n \"height\": imid2data[int(k.strip(\"n\"))][\"height\"],\n \"width\": imid2data[int(k.strip(\"n\"))][\"width\"],\n \"id\": next_img_id,\n \"original_id\": k,\n \"caption\": question,\n \"tokens_negative\": [(0, len(question))],\n \"dataset_name\": d_name,\n \"question_type\": annotation[\"types\"][\"semantic\"],\n \"answer\": annotation[\"answer\"],\n \"questionId\": questionId,\n }\n else:\n cur_img = {\n \"file_name\": filename,\n \"height\": imid2data[int(k)][\"height\"],\n \"width\": imid2data[int(k)][\"width\"],\n \"id\": next_img_id,\n \"original_id\": k,\n \"caption\": question,\n \"tokens_negative\": [(0, len(question))],\n \"dataset_name\": d_name,\n \"question_type\": annotation[\"types\"][\"semantic\"],\n \"answer\": annotation[\"answer\"],\n \"questionId\": questionId,\n }\n\n if (\n split not in [\"testdev\", \"test\", \"challenge\", \"submission\"]\n and len(annotation[\"annotations\"][\"question\"]) > 0\n ):\n\n for text_tok_id, box_anno_id in annotation[\"annotations\"][\"question\"].items():\n target_bbox = sg_data[k][\"objects\"][box_anno_id]\n x, y, h, w = target_bbox[\"x\"], target_bbox[\"y\"], target_bbox[\"h\"], target_bbox[\"w\"]\n target_bbox = [x, y, w, h]\n\n if isinstance(text_tok_id, str):\n if \":\" in text_tok_id:\n text_tok_id = text_tok_id.split(\":\")\n if isinstance(text_tok_id, list) and len(text_tok_id) > 1:\n beg = sum([len(x) for x in question.split()[: int(text_tok_id[0])]]) + int(text_tok_id[0])\n end = (\n sum([len(x) for x in question.split()[: int(text_tok_id[1]) - 1]])\n + int(text_tok_id[1])\n - 1\n )\n end = end + len(question.split()[int(text_tok_id[1]) - 1])\n else:\n beg = sum([len(x) for x in question.split()[: int(text_tok_id)]]) + int(text_tok_id)\n end = beg + len(question.split()[int(text_tok_id)])\n else:\n beg, end = text_tok_id\n\n cleaned_span = consolidate_spans([(beg, end)], question)\n\n cur_obj = {\n \"area\": h * w,\n \"iscrowd\": 0,\n \"category_id\": 1,\n \"bbox\": target_bbox,\n \"tokens_positive\": cleaned_span,\n \"image_id\": next_img_id,\n \"id\": next_id,\n }\n\n next_id += 1\n annotations.append(cur_obj)\n\n next_img_id += 1\n images.append(cur_img)\n\n ds = {\"info\": [], \"licenses\": [], \"images\": images, \"annotations\": annotations, \"categories\": []}\n with open(output_path / f\"finetune_gqa_{split}_{type}.json\", \"w\") as j_file:\n json.dump(ds, j_file)\n return next_img_id, next_id\n\n\ndef main(args):\n data_path = Path(args.data_path)\n sg_path = Path(args.sg_path)\n output_path = Path(args.out_path) if args.out_path is not None else data_path\n with open(f\"{args.vg_img_data_path}/image_data.json\", \"r\") as f:\n image_data = json.load(f)\n imid2data = {x[\"image_id\"]: x for x in image_data}\n\n os.makedirs(str(output_path), exist_ok=True)\n\n # GQA train made of train and val, results reported on testdev\n for split in [\"submission\", \"testdev\", \"test\", \"challenge\", \"val\", \"train\"]:\n for type in [\"all\", \"balanced\"]:\n convert(split, data_path, sg_path, output_path, imid2data, type=type, coco_path=args.coco_path)\n\n\nif __name__ == \"__main__\":\n main(parse_args())\n","sub_path":"scripts/fine-tuning/gqa_coco_format.py","file_name":"gqa_coco_format.py","file_ext":"py","file_size_in_byte":11902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"163339693","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_serialization import jsonutils\n\nfrom nailgun.test.base import BaseIntegrationTest\nfrom nailgun.utils import reverse\n\n\nclass TestHandlers(BaseIntegrationTest):\n\n def get_template(self, cluster_id, expect_errors=False):\n resp = self.app.get(\n reverse(\n 'TemplateNetworkConfigurationHandler',\n kwargs={'cluster_id': cluster_id}\n ),\n headers=self.default_headers,\n expect_errors=expect_errors\n )\n\n return resp\n\n def test_network_template_upload(self):\n cluster = self.env.create_cluster(api=False)\n template = {'template': 'test'}\n resp = self.app.put(\n reverse(\n 'TemplateNetworkConfigurationHandler',\n kwargs={'cluster_id': cluster.id},\n ),\n jsonutils.dumps(template),\n headers=self.default_headers\n )\n self.assertEqual(200, resp.status_code)\n\n resp = self.get_template(cluster.id)\n self.assertEqual(200, resp.status_code)\n self.assertEqual('test', resp.json_body.get('template'))\n\n def test_template_not_set(self):\n resp = self.get_template(1, expect_errors=True)\n self.assertEqual(404, resp.status_code)\n\n def test_delete_template(self):\n cluster = self.env.create_cluster(api=False)\n template = {'template': 'test'}\n resp = self.app.put(\n reverse(\n 'TemplateNetworkConfigurationHandler',\n kwargs={'cluster_id': cluster.id},\n ),\n jsonutils.dumps(template),\n headers=self.default_headers\n )\n self.assertEquals(200, resp.status_code)\n\n resp = self.app.delete(\n reverse(\n 'TemplateNetworkConfigurationHandler',\n kwargs={'cluster_id': cluster.id},\n ),\n headers=self.default_headers\n )\n self.assertEquals(204, resp.status_code)\n\n resp = self.get_template(cluster.id)\n self.assertEquals(None, resp.json_body)\n","sub_path":"nailgun/nailgun/test/unit/test_network_template_handler.py","file_name":"test_network_template_handler.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"643669437","text":"\nimport random\nimport math\n\np=0\nq=0\nt=0\nl3 = []\n\ndef question2(res):\n global p\n global t\n l1 = res.keys()\n l2 = res.values()\n q = 0\n d = 0\n l3 = []\n for x, y in zip(l1, l2):\n l3.append(max(x, y))\n d = 0\n for x in l3[:]:\n d = d + x\n d = d / 10\n p =p+d\n q = 0\n for x in l3[:]:\n q = q + ((x - d) ** 2)\n q=math.sqrt(q)\n q = q / 10\n t += q\n\n\n\nfor x in range(0,30):\n l1 = [random.uniform(0,1) for i in range(100)]\n l2 = [random.uniform(0,1) for i in range(100)]\n res = dict(zip(l1, l2))\n question2(res)\n\np = p / 300\nprint(\"E(X) is :-\", p)\nt = t / 300\nprint(\"SD(X) is:-\", (t))\n\n","sub_path":"112551276_Abhay_Goyal HW1/112551276_Abhay_Goyal/Question 2.2.py","file_name":"Question 2.2.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"546806646","text":"import os\n\nimport shutil\nfrom db.db_helper import SQLHelper\n\n\ndef get_data(name):\n result = SQLHelper.fetch_one(\n 'SELECT p.id id, p.name name, p.author author,p.pdf_file_name pdf_name,cover_image_name image_name, c.name c_name ,c1.name p_name FROM products p JOIN categories c ON c.id = p.category_id JOIN categories c1 ON c1.id = c.parent_id WHERE p.name = %s',\n [name])\n return result\n\n\nroot_dir = r'E:\\coasts\\all_file'\nc_dir = r'E:\\coasts\\图书'\n\nfor _file in os.listdir(root_dir):\n _file = _file.replace('.pdf', '')\n data = get_data(_file)\n if data is None:\n print(_file, \"没有找到\")\n break\n p_name = data.get('p_name')\n c_name = data.get('c_name')\n dir_name = os.path.join(c_dir, p_name + '\\\\' + c_name)\n if os.path.exists(dir_name) is False:\n os.makedirs(dir_name)\n _file = _file + '.pdf'\n src = os.path.join(root_dir, _file)\n dst = os.path.join(dir_name, _file)\n shutil.copy(src, dst)\n print(dst)\n","sub_path":"excel/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"306466888","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport math\n\ndef mysum2(a):\n\treturn np.sum(a)\n\n\ndef plotcircle1(cx, cy, r, sample_size = 20): \n\tx = []\n\ty = []\n\tfor i in range(sample_size):\n\t\tx.append(cx + r * math.sin(math.radians(i * 360 / sample_size)))\n\t\ty.append(cy + r * math.cos(math.radians(i * 360 / sample_size)))\n\tplt.axes().set_aspect('equal')\n\tplt.scatter(x, y)\n\tplt.show()\n\n\ndef plotnorm1(mu, sigma):\n\ts = np.random.normal(mu, sigma, 1000) # gen 1000 points\n\tcount, bins, ignored = plt.hist(s, 20, density=True)\n\tplt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu)**2 / (2 * sigma**2) ))\n\tplt.show()\n\n\nif __name__ == '__main__':\n\tprint(mysum2([1,5,7]))\n\tprint(mysum2([]))\n\tprint(mysum2([-5,3]))\n\n\tplotcircle1(0, 0, 1)\n\tplotcircle1(3, 4, 5, 36)\n\n\tplotnorm1(0, 0.1)","sub_path":"pa1/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"15365370","text":"############## Task1.1 - ArUco Detection ##############\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport cv2.aruco as aruco\r\nimport sys\r\nimport math\r\nimport time\r\n\r\ndef detect_ArUco(img):\r\n ## function to detect ArUco markers in the image using ArUco library\r\n ## argument: img is the test image\r\n ## return: dictionary named Detected_ArUco_markers of the format {ArUco_id_no : corners}, where ArUco_id_no indicates ArUco id and corners indicates the four corner position of the aruco(numpy array)\r\n ## \t\t for instance, if there is an ArUco(0) in some orientation then, ArUco_list can be like\r\n ## \t\t\t\t{0: array([[315, 163],\r\n #\t\t\t\t\t\t\t[319, 263],\r\n #\t\t\t\t\t\t\t[219, 267],\r\n #\t\t\t\t\t\t\t[215,167]], dtype=float32)}\r\n\r\n Detected_ArUco_markers = {}\r\n ## enter your code here ##\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n \r\n aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)\r\n\r\n parameters = aruco.DetectorParameters_create()\r\n\r\n corners, ids, _ = aruco.detectMarkers(gray, aruco_dict, parameters = parameters)\r\n \r\n for i in xrange(len(corners)):\r\n Detected_ArUco_markers[ids[i][0]] = corners[i][0]\r\n\r\n return Detected_ArUco_markers\r\n\r\n\r\ndef Calculate_orientation_in_degree(Detected_ArUco_markers):\r\n ## function to calculate orientation of ArUco with respective to the scale mentioned in Problem_Statement.pdf\r\n ## argument: Detected_ArUco_markers is the dictionary returned by the function detect_ArUco(img)\r\n ## return : Dictionary named ArUco_marker_angles in which keys are ArUco ids and the values are angles (angles have to be calculated as mentioned in the ProblemStatement.pdf)\r\n ##\t\t\tfor instance, if there are two ArUco markers with id 1 and 2 with angles 120 and 164 respectively, the \r\n ##\t\t\tfunction should return: {1: 120 , 2: 164}\r\n\r\n ArUco_marker_angles = {}\r\n ## enter your code here ##\r\n for i in Detected_ArUco_markers:\r\n points = Detected_ArUco_markers[i]\r\n top_left = points[0]\r\n top_right = points[1]\r\n bottom_right = points[2]\r\n origin = ((top_left[0]+bottom_right[0])/2.0, (top_left[1]+bottom_right[1])/2.0) #ORIGIN IS CENTER OF MARKER\r\n center = ((top_left[0]+top_right[0])/2.0,(top_left[1]+top_right[1])/2.0) #CENTER IS MID POINT OF TOP-LEFT POINT AND TOP-RIGHT POINT\r\n \r\n degrees = math.degrees(math.atan2(origin[1]-center[1], center[0]-origin[0]))\r\n\r\n ##correct the angle\r\n if degrees < 0:\r\n degrees = 360 + degrees\r\n ArUco_marker_angles[i] = int(round(degrees))\r\n \r\n\r\n return ArUco_marker_angles\t## returning the angles of the ArUco markers in degrees as a dictionary\r\n\r\n\r\ndef mark_ArUco(img,Detected_ArUco_markers,ArUco_marker_angles):\r\n ## function to mark ArUco in the test image as per the instructions given in problem_statement.pdf \r\n ## arguments: img is the test image \r\n ##\t\t\t Detected_ArUco_markers is the dictionary returned by function detect_ArUco(img)\r\n ##\t\t\t ArUco_marker_angles is the return value of Calculate_orientation_in_degree(Detected_ArUco_markers)\r\n ## return: image namely img after marking the aruco as per the instruction given in Problem_statement.pdf\r\n\r\n ## enter your code here ##\r\n for i in Detected_ArUco_markers:\r\n points = Detected_ArUco_markers[i]\r\n top_left = points[0]\r\n top_right = points[1]\r\n bottom_right = points[2]\r\n bottom_left = points[3]\r\n #color = [ Gray, Green, Pink, White, Red, Blue]\r\n color = [(125,125,125), (0,255,0), (180,105,255), (255,255,255), (0,0,255), (255,0,0)]\r\n origin = (int(top_left[0]+bottom_right[0])//2, int(top_left[1]+bottom_right[1])//2) #ORIGIN IS THE CENTER OF MARKER\r\n center = (int(top_left[0]+top_right[0])//2, int(top_left[1]+top_right[1])//2) #CENTER IS MID POINT OF TOP-LEFT POINT AND TOP-RIGHT POINT\r\n \r\n for j in xrange(4):\r\n cv2.circle(img, tuple(points[j]), 5, color[j], -1)\r\n\r\n cv2.circle(img, (origin), 5, color[4], -1)\r\n cv2.line(img, (origin), (center), color[5], 3)\r\n\r\n id_coord = (origin[0]+20, origin[1])\r\n angle_coord = (origin[0]-80, origin[1])\r\n\r\n font = cv2.FONT_HERSHEY_SIMPLEX \r\n cv2.putText(img, str(i), id_coord, font, 1, color[4], 2, cv2.LINE_AA)\r\n cv2.putText(img, str(ArUco_marker_angles[i]), angle_coord, font, 1, color[1], 2, cv2.LINE_AA)\r\n\r\n return img\r\n\r\n\r\n","sub_path":"Task4_ws/sample_indoor/ArUco_library.py","file_name":"ArUco_library.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"108047243","text":"\n\n#calss header\nclass _TRY():\n\tdef __init__(self,): \n\t\tself.name = \"TRY\"\n\t\tself.definitions = [u'to attempt to do something: ', u'to test something to see if it is suitable or useful or if it works: ', u'used by many people and proved to be effective: ', u'to examine a person accused of committing a crime in a law court by asking them questions and considering known facts, and then decide if they are guilty: ', u\"to worry or annoy someone or upset a person's patience with many, often slight, difficulties: \"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_try.py","file_name":"_try.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"166859275","text":"from django.contrib.auth.forms import AuthenticationForm, UserCreationForm\nfrom django.core.exceptions import ValidationError\nfrom django.forms import ModelForm, TextInput\nfrom .models import Player\n\n\nclass LoginForm(AuthenticationForm):\n def __init__(self, *args, **kwargs):\n super(LoginForm, self).__init__(*args, **kwargs)\n self.fields['username'].widget.attrs.update({'autofocus': 'autofocus'})\n\n\nclass RegisterForm(UserCreationForm):\n def __init__(self, *args, **kwargs):\n super(RegisterForm, self).__init__(*args, **kwargs)\n self.fields['username'].widget.attrs.update({'autofocus': 'autofocus'})\n\n\nclass PlayerForm(ModelForm):\n class Meta:\n model = Player\n fields = ['name', 'position']\n widgets = {'name': TextInput(attrs={'autofocus': 'autofocus'})}\n\n def __init__(self, user, *args, **kwargs):\n super(PlayerForm, self).__init__(*args, **kwargs)\n self.user = user\n\n def validate_unique(self):\n self.instance.user = self.user\n try:\n self.instance.validate_unique()\n except ValidationError as e:\n self._update_errors(e)\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"395524287","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\ndef get_title_from_index(index):\r\n return df[df.index == index][\"title\"].values[0]\r\n\r\ndef get_index_from_title(title):\r\n return df[df.title == title][\"index\"].values[0]\r\n\r\n\r\ndf=pd.read_csv(\"movie_dataset.csv\") #loading the dataset\r\n\r\n\r\nfeature=[\"keywords\",\"cast\",\"crew\",\"director\"]\r\n\r\n\r\n\r\nfor i in feature:\r\n df[i]=df[i].fillna('')\r\n\r\n\r\ndf[\"features\"]=df[\"keywords\"]+df[\"cast\"]+df[\"crew\"]+df[\"director\"]\r\n\r\n\r\n\r\ncv=CountVectorizer()\r\n\r\ncount_matrix=cv.fit_transform(df[\"features\"])\r\n\r\n\r\n\r\nsimilarity_score=cosine_similarity(count_matrix)\r\n\r\n\r\nmovie_user_likes=\"Spectre\"\r\n\r\nmovie_index=get_index_from_title(movie_user_likes)\r\n\r\nsimilar_movies=list(enumerate(similarity_score[movie_index]))\r\n\r\nsorted_similar_movies=sorted(similar_movies,key=lambda x:x[1],reverse=True)\r\n\r\ni=0\r\nfor movie in sorted_similar_movies:\r\n if i<10:\r\n print(get_title_from_index(movie[0]))\r\n i=i+1\r\n else:\r\n break\r\n","sub_path":"movie_recommender.py","file_name":"movie_recommender.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"453228734","text":"import re\n\nfrom sqlalchemy import Column, Table, ForeignKey, Integer\nfrom sqlalchemy.orm import relationship\n\nimport zeeguu\ndb = zeeguu.db\n\nfrom zeeguu.model.exercise_source import ExerciseSource\nfrom zeeguu.model.exercise import Exercise\n\nfrom zeeguu.model.exercise_outcome import ExerciseOutcome\nfrom zeeguu.model.user_word import UserWord\nfrom zeeguu.model.ranked_word import RankedWord\nfrom datetime import datetime\n\n\nbookmark_translation_mapping = Table('bookmark_translation_mapping', db.Model.metadata,\n Column('bookmark_id', Integer, ForeignKey('bookmark.id')),\n Column('translation_id', Integer, ForeignKey('user_word.id'))\n)\n\nbookmark_exercise_mapping = Table('bookmark_exercise_mapping', db.Model.metadata,\n Column('bookmark_id', Integer, ForeignKey('bookmark.id')),\n Column('exercise_id', Integer, ForeignKey('exercise.id'))\n)\n\nWordAlias = db.aliased(UserWord, name=\"translated_word\")\n\n\nclass Bookmark(db.Model):\n __table_args__ = {'mysql_collate': 'utf8_bin'}\n\n id = db.Column(db.Integer, primary_key=True)\n origin_id = db.Column(db.Integer, db.ForeignKey('user_word.id'))\n origin = db.relationship(\"UserWord\", primaryjoin=origin_id == UserWord.id,\n backref=\"translations\")\n translations_list = relationship(\"UserWord\", secondary=\"bookmark_translation_mapping\")\n\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n user = db.relationship(\"User\", backref=\"bookmarks\")\n\n text_id = db.Column(db.Integer, db.ForeignKey('text.id'))\n text = db.relationship(\"Text\", backref=\"bookmarks\")\n\n time = db.Column(db.DateTime)\n\n exercise_log = relationship(\"Exercise\", secondary=\"bookmark_exercise_mapping\")\n\n def __init__(self, origin, translation, user, text, time):\n self.origin = origin\n self.translations_list.append(translation)\n self.user = user\n self.time = time\n self.text = text\n\n def add_new_exercise(self, exercise):\n self.exercise_log.append(exercise)\n\n def translation(self):\n return self.translations_list[0]\n\n def translations_rendered_as_text(self):\n return \", \".join(self.translation_words_list())\n\n def translation_words_list(self):\n translation_words=[]\n for translation in self.translations_list:\n translation_words.append(translation.word)\n return translation_words\n\n def add_new_translation(self, translation):\n self.translations_list.append(translation)\n\n def context_is_not_too_long(self):\n return len(self.text.content) < 60\n\n def events_prevent_further_study(self):\n from zeeguu.model.smartwatch.watch_interaction_event import WatchInteractionEvent\n events_for_self = WatchInteractionEvent.events_for_bookmark(self)\n return any([x.prevents_further_study() for x in events_for_self])\n\n def good_for_study(self):\n # ML TODO: Must replace call to check_is_latest_outcome... with has_been_learned!\n return not self.check_is_latest_outcome_too_easy() and not self.events_prevent_further_study()\n\n def remove_translation(self,translation):\n if translation in self.translations_list:\n self.translations_list.remove(translation)\n\n def add_exercise_outcome(self, exercise_source, exercise_outcome, exercise_solving_speed):\n new_source = ExerciseSource.query.filter_by(\n source = exercise_source\n ).first()\n new_outcome=ExerciseOutcome.query.filter_by(\n outcome=exercise_outcome\n ).first()\n exercise = Exercise(new_outcome,new_source,exercise_solving_speed, datetime.now())\n self.add_new_exercise(exercise)\n db.session.add(exercise)\n\n def split_words_from_context(self):\n words_of_bookmark_content = []\n bookmark_content_words = re.findall(r'(?u)\\w+', self.text.content)\n words_of_bookmark_content.extend(bookmark_content_words)\n return words_of_bookmark_content\n\n def context_words_with_rank(self):\n ranked_context_words = self.split_words_from_context()\n while self.origin.word in ranked_context_words: ranked_context_words.remove(self.origin.word)\n filtered_words_known_from_user = []\n for word_known in ranked_context_words:\n if RankedWord.exists(word_known.lower(), self.origin.language):\n filtered_words_known_from_user.append(word_known)\n return filtered_words_known_from_user\n\n def json_serializable_dict(self, with_context=True):\n result = dict(\n id=self.id,\n to=self.translation_words_list(),\n from_lang=self.origin.language_id,\n to_lang=self.translation().language.id,\n title=self.text.url.title,\n url=self.text.url.as_string(),\n origin_rank=self.origin.get_rank()\n )\n result[\"from\"] = self.origin.word\n if with_context:\n result['context'] = self.text.content\n return result\n\n def calculate_probabilities_after_adding_a_bookmark(self, user,language):\n a = datetime.now()\n\n from zeeguu.model.known_word_probability import KnownWordProbability\n from zeeguu.model.exercise_based_probability import ExerciseBasedProbability\n from zeeguu.model.encounter_based_probability import EncounterBasedProbability\n\n # TODO: This should take the DB as an argument!\n # TODO: Should be moved to the KnownWordProbability\n \"\"\"\n ML: This has to be refactored.\n It's a mess.\n\n\n The idea is: you've just added a bookmark.\n There are two things to do:\n\n 1. update the probabilities of the context words (they have been\n encountered, and not translated)\n\n 2. update the probabilities of the word itself\n\n -\n\n\n :param user:\n :param language:\n :return:\n \"\"\"\n\n # 1. computations for adding encounter based probability for the context words\n for word in self.context_words_with_rank():\n enc_prob = EncounterBasedProbability.find_or_create(word, user, language)\n zeeguu.db.session.add(enc_prob)\n # zeeguu.db.session.commit()\n user_word = None\n ranked_word = enc_prob.ranked_word\n if UserWord.exists(word,language):\n user_word = UserWord.find(word,language)\n if ExerciseBasedProbability.exists(user,user_word): #checks if exercise based probability exists for words in context\n ex_prob = ExerciseBasedProbability.find_or_create(user,user_word)\n known_word_prob = KnownWordProbability.find(user,user_word,ranked_word)\n known_word_prob.probability = known_word_prob.calculate_known_word_prob(ex_prob.probability, enc_prob.probability) #updates known word probability as exercise based probability already existed.\n else:\n if KnownWordProbability.exists(user, user_word,ranked_word):\n known_word_prob = KnownWordProbability.find(user,user_word,ranked_word)\n known_word_prob.probability = enc_prob.probability # updates known word probability as encounter based probability already existed\n else:\n known_word_prob = KnownWordProbability.find(user,user_word,ranked_word, enc_prob.probability) # new known word probability created as it did not exist\n zeeguu.db.session.add(known_word_prob)\n\n # 2. Update the probabilities of the word itself\n\n # 2.a) exercise based prob\n # ML: Should this thing change?\n # The ex based probability should probably not change after I add a bookmark\n # Commenting out the following lines: s\n # ex_prob = ExerciseBasedProbability.find_or_create(user, self.origin)\n # if ex_prob:\n # ex_prob.update_probability_after_adding_bookmark_with_same_word(self,user)\n # zeeguu.db.session.add(ex_prob)\n\n # 2.b) encounter based prob\n ranked_word = RankedWord.find(self.origin.word, language)\n if ranked_word: #checks if ranked_word exists for that looked up word\n if EncounterBasedProbability.exists(user, ranked_word): # checks if encounter based probability exists for that looked up word\n enc_prob = EncounterBasedProbability.find(user, ranked_word)\n enc_prob.word_has_just_beek_bookmarked()\n db.session.add(enc_prob)\n # db.session.commit()\n\n # 2.c) update known word probability if it exists\n if KnownWordProbability.exists(user, self.origin,ranked_word):\n known_word_prob = KnownWordProbability.find(user,self.origin,ranked_word)\n known_word_prob.word_has_just_beek_bookmarked()\n db.session.add(known_word_prob)\n # db.session.commit()\n\n db.session.commit()\n\n b = datetime.now()\n delta = b - a\n print (\"calculating proabilities for user {1} and bookmark {2} took {0}ms\".\n format(int(delta.total_seconds() * 1000),\n user.id,\n self.id))\n\n\n @classmethod\n def find_by_specific_user(cls, user):\n return cls.query.filter_by(\n user= user\n ).all()\n\n @classmethod\n def find_all(cls):\n return cls.query.filter().all()\n\n @classmethod\n def find_all_for_text(cls,text):\n return cls.query.filter(cls.text == text).all()\n\n @classmethod\n def find(cls, b_id):\n return cls.query.filter_by(\n id= b_id\n ).first()\n\n @classmethod\n def find_all_by_user_and_word(cls, user, word):\n return cls.query.filter_by(\n user = user,\n origin = word\n ).all()\n\n @classmethod\n def find_all_by_user_word_and_text(cls, user, word, text):\n return cls.query.filter_by(\n user = user,\n origin = word,\n text = text\n ).all()\n\n\n\n\n\n # @classmethod\n # def is_sorted_exercise_log_after_date_outcome(cls,outcome, bookmark):\n # sorted_exercise_log_after_date=sorted(bookmark.exercise_log, key=lambda x: x.time, reverse=True)\n # if sorted_exercise_log_after_date:\n # if sorted_exercise_log_after_date[0].outcome.outcome == outcome:\n # return True\n # return False\n\n def check_is_latest_outcome_too_easy(self, add_to_result_time=False):\n sorted_exercise_log_by_latest=sorted(self.exercise_log, key=lambda x: x.time, reverse=True)\n for exercise in sorted_exercise_log_by_latest:\n if exercise.outcome.outcome == ExerciseOutcome.TOO_EASY:\n if add_to_result_time:\n return True, exercise.time\n return True\n elif exercise.outcome.outcome == ExerciseOutcome.SHOW_SOLUTION or exercise.outcome.outcome == ExerciseOutcome.WRONG:\n if add_to_result_time:\n return False, None\n return False\n if add_to_result_time:\n return False, None\n return False\n\n def check_if_learned_based_on_exercise_outcomes (self, add_to_result_time=False):\n \"\"\"\n TODO: This should replace check_is_latest_outcome in the future...\n\n :param add_to_result_time:\n :return:\n \"\"\"\n sorted_exercise_log_by_latest=sorted(self.exercise_log, key=lambda x: x.time, reverse=True)\n\n if sorted_exercise_log_by_latest:\n last_exercise = sorted_exercise_log_by_latest[0]\n\n # If last outcome is TOO EASY we know it\n if last_exercise.outcome.outcome == ExerciseOutcome.TOO_EASY:\n if add_to_result_time:\n return True, last_exercise.time\n return True\n\n CORRECTS_IN_A_ROW = 5\n if len(sorted_exercise_log_by_latest) > CORRECTS_IN_A_ROW:\n\n # If we got it right for the last CORRECTS_IN_A_ROW times, we know it\n if all(exercise.outcome.outcome == ExerciseOutcome.CORRECT for exercise in sorted_exercise_log_by_latest[0:CORRECTS_IN_A_ROW-1]):\n return True, last_exercise.time\n\n if add_to_result_time:\n return False, None\n return False\n\n\n def events_indicate_its_learned(self):\n from zeeguu.model.smartwatch.watch_interaction_event import WatchInteractionEvent\n events_for_self = WatchInteractionEvent.events_for_bookmark(self)\n\n for event in events_for_self:\n if event.is_learned_event():\n return True, event.time\n\n return False, None\n\n\n def has_been_learned(self, also_return_time=False):\n # TODO: This must be stored in the DB together with the\n # bookmark... once a bookmark has been learned, we shoud\n # not ever doubt it ...\n\n \"\"\"\n :param also_return_time: should the function return also the time when\n the bookmark has been learned?\n\n :return: boolean indicating whether the bookmark has already been learned,\n togetgher with the time when it was learned if also_return_time is set\n \"\"\"\n\n # The first case is when we have an exercise outcome set to Too EASY\n learned, time = self.check_if_learned_based_on_exercise_outcomes(True)\n if learned:\n if also_return_time:\n return True, time\n else:\n return True\n\n # The second case is when we have an event in the smartwatch event log\n # that indicates that the word has been learned\n learned, time = self.events_indicate_its_learned()\n if learned:\n return learned, time\n\n if also_return_time:\n return False, None\n\n return False\n","sub_path":"zeeguu/model/bookmark.py","file_name":"bookmark.py","file_ext":"py","file_size_in_byte":13827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"247237230","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sklearn\r\nimport scipy.stats as stats\r\nimport seaborn as sns\r\n\r\ndf = pd.read_csv('creditcard.csv')\r\n\r\nprint(df.shape[0], df.shape[1])\r\n\r\ndf.sample(5)\r\ndf.info()\r\n\r\ndf.loc[:, ['Time','Amount']].describe()\r\n\r\nsns.distplot(df.Time)\r\n\r\nplt.title('Distribution of Monetary Value Feature')\r\nsns.distplot(df.Amount)\r\n\r\n#fraud vs normal transactions\r\ncounts = df.Class.value_counts()\r\nnormal = counts[0]\r\nfraudulent = counts[1]\r\n\r\nnormal_perc = (normal/(normal+fraudulent))*100\r\nfraudulent_perc = (fraudulent/(normal+fraudulent))*100\r\nprint('There were {} non-fraudulent transactions ({:.3f}%) and {} fraudulent transactions ({:.3f}%).'.format(normal,normal_perc,fraudulent,fraudulent_perc))\r\n\r\nplt.xlabel('Class-> 0: Non-Fraudulent 1:Fraudulent')\r\nplt.ylabel('Count')\r\nsns.barplot(x = counts.index, y = counts)\r\n\r\n\r\ncorr = df.corr()\r\ncorr\r\nheat = sns.heatmap(data=corr)\r\nplt.show(heat)\r\n\r\n#skewness\r\nskew = df.skew()\r\nskew\r\n\r\n#Scale Amount and Time\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler1 = StandardScaler()\r\nscaler2 = StandardScaler()\r\n\r\n#scale time\r\nscaled_time = scaler1.fit_transform(df[['Time']])\r\nflat_list1 = [item for sublist in scaled_time.tolist() for item in sublist]\r\nscaled_time = pd.Series(flat_list1)\r\n\r\n#scale amount\r\nscaled_amount = scaler2.fit_transform(df[['Amount']])\r\nflat_list2 = [item for sublist in scaled_amount.tolist() for item in sublist]\r\nscaled_amount = pd.Series(flat_list2)\r\n\r\ndf = pd.concat([df, scaled_amount.rename('scaled_amount'), scaled_time.rename('scaled_time')], axis=1)\r\n\r\ndf.head()\r\n\r\ndf.drop(['Amount','Time'], axis=1, inplace=True)\r\n\r\n\r\n#Splitting\r\n\r\nmask = np.random.rand(len(df)) < 0.9\r\ntrain = df[mask]\r\ntest = df[~mask]\r\n\r\ntrain.reset_index(drop=True, inplace=True)\r\ntest.reset_index(drop=True, inplace=True)\r\n\r\n\r\nno_of_frauds = train.Class.value_counts()[1]\r\nno_of_frauds\r\n\r\nnon_fraud = train.Class.value_counts()[0]\r\nnon_fraud\r\n\r\nnon_fraud = train[train['Class']==0]\r\nfraud = train[train['Class']==1]\r\n\r\nselected = non_fraud.sample(no_of_frauds)\r\nselected.head()\r\n\r\nselected.reset_index(drop=True, inplace=True)\r\nfraud.reset_index(drop=True, inplace=True)\r\n\r\nsub_sample = pd.concat([selected,fraud])\r\nsub_sample.shape\r\n\r\n#shuffle the data\r\nsub_sample = sub_sample.sample(frac=1).reset_index(drop=True)\r\n\r\nnew_counts = sub_sample.Class.value_counts()\r\nplt.xlabel('Class: 0-Legitimate 1-Fraudulent')\r\nsns.barplot(x=new_counts.index, y=new_counts)\r\n\r\ncorr = sub_sample.corr()\r\ncorr = corr[['Class']]\r\ncorr\r\n\r\n#negative corr smaller than -0.5\r\ncorr[corr.Class<-0.5]\r\n\r\n#positive corr\r\ncorr[corr.Class>0.5]\r\n\r\n\r\n#visualizing the features with high negative corr\r\n\r\nf, axes = plt.subplots(nrows=2, ncols=4, figsize=(26,16))\r\nf.suptitle('features with high negative corr')\r\nsns.boxplot(x='Class',y='V3', data=sub_sample, ax=axes[0,0])\r\nsns.boxplot(x='Class',y='V9', data=sub_sample, ax=axes[0,1])\r\nsns.boxplot(x='Class',y='V10', data=sub_sample, ax=axes[0,2])\r\nsns.boxplot(x='Class',y='V12', data=sub_sample, ax=axes[0,3])\r\nsns.boxplot(x='Class',y='V14', data=sub_sample, ax=axes[1,0])\r\nsns.boxplot(x='Class',y='V16', data=sub_sample, ax=axes[1,1])\r\nsns.boxplot(x='Class',y='V17', data=sub_sample, ax=axes[1,2])\r\nf.delaxes(axes[1,3])\r\n\r\n\r\n#visualizing the features w high positive correlation\r\nf, axes = plt.subplots(nrows=1, ncols=2, figsize=(18,9))\r\n\r\nf.suptitle('Features With High Positive Correlation', size=20)\r\nsns.boxplot(x=\"Class\", y=\"V4\", data=sub_sample, ax=axes[0])\r\nsns.boxplot(x=\"Class\", y=\"V11\", data=sub_sample, ax=axes[1])\r\n\r\n\r\n#Only removing extreme outliers\r\nQ1 = sub_sample.quantile(0.25)\r\nQ1\r\nQ3 = sub_sample.quantile(0.75)\r\nIQR = Q3 - Q1\r\n\r\ndf2 = sub_sample[~((sub_sample < (Q1 - 2.5 * IQR)) |(sub_sample > (Q3 + 2.5 * IQR))).any(axis=1)]\r\n\r\n\r\nfrom sklearn.manifold import TSNE\r\nX = df2.drop('Class',axis=1)\r\ny = df2['Class']\r\n\r\n\r\n# t-SNE\r\nX_reduced_tsne = TSNE(n_components = 2, random_state=42).fit_transform(X.values)\r\n\r\n# t-SNE scatterplot\r\nimport matplotlib.patches as mpatches\r\n\r\nf, ax = plt.subplots(figsize=(24,16))\r\n\r\n\r\nblue_patch = mpatches.Patch(color='#0A0AFF', label='No Fraud')\r\nred_patch = mpatches.Patch(color='#AF0000', label='Fraud')\r\n\r\nax.scatter(X_reduced_tsne[:,0], X_reduced_tsne[:,1], c=(y == 0), cmap='coolwarm', label='No Fraud', linewidths=2)\r\nax.scatter(X_reduced_tsne[:,0], X_reduced_tsne[:,1], c=(y == 1), cmap='coolwarm', label='Fraud', linewidths=2)\r\nax.set_title('t-SNE', fontsize=14)\r\n\r\nax.grid(True)\r\n\r\nax.legend(handles=[blue_patch, red_patch])\r\n\r\n## CLASSIFCATION\r\n\r\ndef warn(*args, **kwargs):\r\n pass\r\nimport warnings\r\nwarnings.warn = warn\r\n\r\n#train test split\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\n\r\nX_train = X_train.values\r\nX_validation = X_test.values\r\ny_train = y_train.values\r\ny_validation = y_test.values\r\n\r\n\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.svm import SVC\r\nfrom xgboost import XGBClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\n\r\nmodels = []\r\n\r\nmodels.append(('LR', LogisticRegression()))\r\nmodels.append(('LDA', LinearDiscriminantAnalysis()))\r\nmodels.append(('KNN', KNeighborsClassifier()))\r\nmodels.append(('CART', DecisionTreeClassifier()))\r\nmodels.append(('SVM', SVC()))\r\nmodels.append(('XGB', XGBClassifier()))\r\nmodels.append(('RF', RandomForestClassifier()))\r\n\r\n# testing models\r\n\r\nresults = []\r\nnames = []\r\n\r\nfor name, model in models:\r\n kfold = KFold(n_splits=10, random_state=42)\r\n cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring = 'roc_auc')\r\n results.append(cv_results)\r\n names.append(name)\r\n msg = '%s: %F (%F)' %(name, cv_results.mean(), cv_results.std())\r\n print(msg)\r\n\r\n#compare algorithms\r\n \r\n#Compare Algorithms\r\n\r\nfig = plt.figure(figsize=(12,10))\r\nplt.title('Comparison of Classification Algorithms')\r\nplt.xlabel('Algorithm')\r\nplt.ylabel('ROC-AUC Score')\r\nplt.boxplot(results)\r\nax = fig.add_subplot(111)\r\nax.set_xticklabels(names)\r\nplt.show()\r\n\r\n\r\n\r\n#from sklearn.metrics import average_precision_score, auc, roc_curve, precision_recall_curve\r\n#\r\n#model_LR = LogisticRegression()\r\n#model_LR.fit(X_train, y_train)\r\n#\r\n#predictions = model_LR.predict(X_validation)\r\n#score = model_LR.score(X_validation, y_validation)\r\n#score\r\n#\r\n#y_score_lr = model_LR.predict_proba(X_validation)[:,-1]\r\n#\r\n#avg_prec = average_precision_score(y_validation, y_score_lr)\r\n#format(avg_prec)\r\n#\r\n#precision, recall, _ = precision_recall_curve(y_validation, y_score_lr)\r\n#\r\n#plt.step(recall, precision, color='b', alpha=0.2,\r\n# where='post')\r\n#plt.fill_between(recall, precision, step='post', alpha=0.2,\r\n# color='b')\r\n#plt.xlabel('Recall')\r\n#plt.ylabel('Precision')\r\n#plt.ylim([0.0, 1.05])\r\n#plt.xlim([0.0, 1.0])\r\n#plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(\r\n# avg_prec))\r\n#\r\n#\r\n#fpr_rf, tpr_rf, _ = roc_curve(y_validation, y_score_lr)\r\n#roc_auc_rf = auc(fpr_rf, tpr_rf)\r\n#plt.figure(figsize=(8,8))\r\n#plt.xlim([-0.01, 1.00])\r\n#plt.ylim([-0.01, 1.01])\r\n#plt.plot(fpr_rf, tpr_rf, lw=1, label='{} curve (AUC = {:0.2f})'.format('RF',roc_auc_rf))\r\n#\r\n#\r\n#plt.xlabel('False Positive Rate', fontsize=16)\r\n#plt.ylabel('True Positive Rate', fontsize=16)\r\n#plt.title('ROC curve', fontsize=16)\r\n#plt.legend(loc='lower right', fontsize=13)\r\n#plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')\r\n#plt.axes().set_aspect('equal')\r\n#plt.show()\r\n#\r\n#\r\n#predictions\r\n#y_validation\r\n#\r\n#print(classification_report(y_validation, predictions))\r\n\r\n\r\n\r\n","sub_path":"CreditCardFraud.py","file_name":"CreditCardFraud.py","file_ext":"py","file_size_in_byte":8011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"140795893","text":"from gtts import gTTS\nfrom playsound import playsound\nfrom pytchat import LiveChat\nimport os\nfrom google_trans_new import google_translator\ntranslator = google_translator()\nimport keyboard\nimport threading\nimport time\n\n\n\"\"\"\nimport pyttsx3\n\nengine = pyttsx3.init()\nengine.setProperty(\"rate\", 110)\nvoices = engine.getProperty('voices')\nfor v in voices:\n print(\"id %s \" %v.id)\nengine.setProperty('voice', voices[1].id)\n\"\"\"\nwaitt=0\nflag=0\npaused=0\ndef pause():\n global flag\n global th\n global waitt\n global paused\n while True:\n try:\n if keyboard.is_pressed('{'):\n flag=1\n print('Paused voice')\n text_val=\"Daksh Voice is paused\"\n text_val = translator.translate(text_val,lang_src='en',lang_tgt='en')\n language = 'hi'\n obj = gTTS(text=text_val,tld='co.in', lang=language, slow=False)\n while(waitt==1):\n yy=5\n waitt=1\n playsound(\"starting.mp3\")\n os.remove(\"test.mp3\")\n obj.save(\"test.mp3\")\n playsound(\"test.mp3\")\n waitt=0\n \n #time.sleep(2)\n\n if keyboard.is_pressed('('):\n paused=1\n print('BOT Paused')\n text_val=\"Daksh BOT is paused\"\n text_val = translator.translate(text_val,lang_src='en',lang_tgt='e')\n language = 'hi'\n obj = gTTS(text=text_val,tld='co.in', lang=language, slow=False)\n while(waitt==1):\n yy=5\n waitt=1\n playsound(\"starting.mp3\")\n os.remove(\"test.mp3\")\n obj.save(\"test.mp3\")\n playsound(\"test.mp3\")\n waitt=0\n\n if keyboard.is_pressed(')'):\n paused=0\n print('BOT Resumed')\n text_val=\"Daksh BOT is working\"\n text_val = translator.translate(text_val,lang_src='en',lang_tgt='en')\n language = 'hi'\n obj = gTTS(text=text_val,tld='co.in', lang=language, slow=False)\n while(waitt==1):\n yy=5\n waitt=1\n playsound(\"starting.mp3\")\n os.remove(\"test.mp3\")\n obj.save(\"test.mp3\")\n playsound(\"test.mp3\")\n waitt=0\n \n #time.sleep(2)\n \n if keyboard.is_pressed('}'):\n flag=0\n print(\"Resumed voice\")\n text_val=\"Daksh Voice is working\"\n text_val = translator.translate(text_val,lang_src='en',lang_tgt='en')\n language = 'hi'\n obj = gTTS(text=text_val,tld='co.in', lang=language, slow=False)\n while(waitt==1):\n yy=5\n waitt=1\n playsound(\"starting.mp3\")\n os.remove(\"test.mp3\")\n obj.save(\"test.mp3\")\n playsound(\"test.mp3\")\n waitt=0\n \n #time.sleep(2)\n except:\n yy=5\n finally:\n time.sleep(1)\n\nidd=str(input(\"Enter youtube ID: \"))\nlivechat=LiveChat(video_id=idd)\nth = threading.Thread(target=pause)\nth.start()\nbf=0\ndef fun():\n global bf\n global waitt\n try:\n while livechat.is_alive():\n try:\n while(paused==1):\n yy=5\n chatdata=livechat.get()\n for c in chatdata.items:\n bf=0\n block = open(\"Blocklist.txt\", \"r\")\n blocklist=block.readlines()\n blocklist = [xx.strip() for xx in blocklist] \n #print(blocklist)\n for linee in blocklist:\n if (linee==str(c.author.name)):\n print(\"Message ignored because of Blacklist\")\n chatdata.tick()\n bf=1\n break\n block.close()\n\n \n \n c.datetime\n text_val = f\"{c.author.name} said,{c.message}\"\n text_val2=f\"{c.datetime}: {c.author.name} !!said!!, {c.message}\"\n print(text_val2)\n if(bf==1):\n bf=0\n continue\n \n if(flag==0):\n text_val = translator.translate(text_val,lang_src='en',lang_tgt='en')\n #print('sss')\n language = 'hi'\n i=1\n \n while(i=48 and ord(text_val[i]) <=57) or (ord(text_val[i])>=65 and ord(text_val[i])<=90) or (ord(text_val[i])>=97 and ord(text_val[i])<=122)):\n if(text_val[i-1]==text_val[i]):\n text_val=text_val[:i-1]+text_val[i:]\n i=i-1\n i=i+1\n #print(text_val)\n obj = gTTS(text=text_val,tld='co.in', lang=language, slow=False)\n while(waitt==1):\n yy=5\n waitt=1\n \n playsound(\"starting.mp3\")\n os.remove(\"test.mp3\")\n \n obj.save(\"test.mp3\")\n playsound(\"test.mp3\")\n waitt=0\n if(str(c.message).lower()==\"hi\" or str(c.message).lower()==\"hello\"):\n text_val=f\"Hello {c.author.name}, Daksh welcomes you to stream\"\n print(text_val)\n text_val = translator.translate(text_val,lang_src='hi',lang_tgt='hi')\n obj = gTTS(text=text_val,tld='co.in', lang=language, slow=False)\n while(waitt==1):\n yy=5\n waitt=1\n playsound(\"starting.mp3\")\n os.remove(\"test.mp3\")\n obj.save(\"test.mp3\")\n playsound(\"test.mp3\")\n waitt=0\n \n ##engine.say(text_val)\n # play the speech\n ##engine.runAndWait()\n chatdata.tick()\n except Exception as e:\n print(e)\n except Exception as e:\n fun()\nfun()\n","sub_path":"dd.py","file_name":"dd.py","file_ext":"py","file_size_in_byte":6818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"252851929","text":"# -*- coding: utf-8 -*- \n# @Time : 2019/12/16 23:29\n# @Author : hangzhouwh \n# @Email: hangzhouwh@gmail.com\n# @File : simi_artist.py \n# @Software: PyCharm\nimport json\n\nimport scrapy\n\nfrom music163.tool import json_tool\n\n\nar_count = 0\n\nclass SimiArtistSpider(scrapy.Spider):\n\tname = 'simi_artist'\n\tallowed_domains = ['localhost:3000']\n\n\tdef start_requests(self):\n\t\tglobal ar_count\n\t\tartists = []\n\t\tcodes = [1001, 1002, 1003]\n\t\tfor code in codes:\n\t\t\tfilepath = 'D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\data\\\\artist\\\\artist_' + str(code) +'.json'\n\t\t\tars = json_tool.load_json(filepath)\n\t\t\tartists.extend(ars)\n\n\t\turl_head = 'http://localhost:3000/simi/artist?id='\n\n\t\tartists = sorted(artists, key=lambda e: e.__getitem__('music_size'), reverse=True)\n\n\t\tfor artist in artists:\n\t\t\tif ar_count == 500:\n\t\t\t\tbreak\n\t\t\tartist_id = artist['artist_id']\n\t\t\tartist_name = artist['artist_name']\n\t\t\turl = url_head + str(artist_id)\n\t\t\tyield scrapy.Request(url=url, meta={'artist_id': artist_id, 'artist_name': artist_name}, dont_filter=False)\n\t\t\tar_count += 1\n\n\tdef parse(self, response):\n\t\tres = json.loads(response.body)\n\n\t\tartist_id = response.meta['artist_id']\n\t\tartist_name = response.meta['artist_name']\n\n\t\tpass\n\t\t#\n\t\t# \tcsps_lyric_item = CSPSLyricItem()\n\t\t# \tcsps_lyric_item[\"song_name\"] = artist_name\n\t\t# \tcsps_lyric_item[\"artist_name\"] = song_name\n\t\t# \tcsps_lyric_item[\"lyric\"] = lyric\n\t\t#\n\t\t# \tsong_count += 1\n\t\t# \tprint(\"热门音乐: \", song_count)\n\t\t#\n\t\t# \tif song_count % 500 == 0:\n\t\t# \t\tsec = random.randint(3, 9)\n\t\t# \t\tprint('休眠 ', sec, 's')\n\t\t# \t\ttime.sleep(sec)\n\t\t#\n\t\t# \tyield csps_lyric_item\n\t\t# else:\n\t\t# \tpass\n","sub_path":"music163/spiders/simi_artist.py","file_name":"simi_artist.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"260206758","text":"dxy = [(0, 1), (1, 0)]\ndef move(idx, res):\n global min_sum\n if idx == (N - 1, N - 1):\n if res < min_sum:\n min_sum = res\n return True\n\n y, x = idx\n for b, a in dxy:\n yi = y + b\n xi = x + a\n if 0 <= yi < N and 0 <= xi < N:\n temp = res + board[yi][xi]\n if temp < min_sum:\n move((yi, xi), temp)\n\nfor case in range(1, int(input()) + 1):\n N = int(input())\n board = [list(map(int, input().split())) for _ in range(N)]\n min_sum = 999999999999\n move((0, 0), board[0][0])\n print(f'#{case} {min_sum}')\n","sub_path":"Python/SWEA/D3/5188_최소합.py","file_name":"5188_최소합.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"534040886","text":"import unittest\nimport models.Measurement as m\n\nclass TestMeasurement(unittest.TestCase):\n def setUp(self):\n self.m1 = m.Measurement(66, 1476351846113)\n self.m2 = m.Measurement(69, 1476351876762)\n self.m3 = m.Measurement(62, 1476351898901)\n self.m4 = m.Measurement(66, 1476351846113)\n\n def testEquality(self):\n self.assertNotEqual(self.m1, self.m2)\n self.assertNotEqual(self.m2, self.m3)\n self.assertNotEqual(self.m3, self.m4)\n self.assertEqual(self.m1, self.m4)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Phase_2/path_classifier/tests/TestMeasurement.py","file_name":"TestMeasurement.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"268032689","text":"import sys\n\nsys.path.append('keras-rnn/wrappers')\n\nfrom hLSTM import hLSTM\n\ndef trainTheThing(name_to_train):\n\n theThing = hLSTM()\n\n theThing.load(name_to_train)\n\n print('discretizing')\n theThing.discretizeSpace()\n\n print('training')\n theThing.train(num_iters=75)\n\n theThing.save('iter75.json','iter75.weights')\n\n\n\n\nif __name__ == '__main__':\n trainTheThing(sys.argv[1])\n","sub_path":"trainTheThing.py","file_name":"trainTheThing.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"143167992","text":"from io import BytesIO\nimport requests\nfrom PIL import Image\n\nclass ImageConvert:\n \"\"\"\n Convert images automatically.\n \"\"\"\n def __init__(self, bot):\n self.bot = bot\n print('Addon \"{}\" loaded'.format(self.__class__.__name__))\n\n async def on_message(self, message):\n # BMP conversion\n for f in message.attachments:\n if f[\"filename\"].lower().endswith('.bmp') and f[\"size\"] <= 600000: # 600kb\n img_request = requests.get(f[\"url\"])\n img_obj = Image.open(BytesIO(img_request.content))\n img_out = BytesIO()\n img_obj.save(img_out, 'png')\n img_out.seek(0)\n out_message = \"{} from {}\".format(self.bot.escape_name(f[\"filename\"]), message.author.mention)\n new_filename = f[\"filename\"][:-3] + \"png\"\n await self.bot.send_file(message.channel, img_out, filename=new_filename, content=out_message)\n\ndef setup(bot):\n bot.add_cog(ImageConvert(bot))\n","sub_path":"addons/imgconvert.py","file_name":"imgconvert.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"48183836","text":"#-------------------------------------------------------------------------------\r\n# Name: module2\r\n# Purpose:\r\n#\r\n# Author: 15Richardsonc\r\n#\r\n# Created: 23/11/2018\r\n# Copyright: (c) 15Richardsonc 2018\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\n\r\n# Using a list to represent a binary tree - numbers are indexes for ease of maintenance\r\n# Using an array would allow null (i.e. numbered) elements to be omitted\r\n# A more balanced tree produces a shorter, less sparsely populated list\r\ntree = [0,\"QIs it a mammal\",\"QDoes it spend all of its time on land\",\"QIs it a bird\",\"QIs it feline\",\"QDoes it ever leave the water\",\"QCan it fly\",\"QIs it an insect\",\"QIs it a domestic pet\",\"QCan it be milked\",\"Aseal\",\"QDoes it have conical teeth and a single blowhole\",\"Asparrow\",\"QDoes it swim\",\"QDoes it sting\",\"QHas it got eight limbs\",\"Acat\",\"QDoes it have stripes\",\"QDoes it produce wool\",\"QIs it usually ridden\",20,21,\"Adolphin\",\"Awhale\",24,25,\"Apenguin\",\"Aostrich\",\"QDoes it produce honey\",\"QDoes it have a narrow waist\",\"QDoes it live in water\",\"Asquid\",32,33,\"Atiger\",\"Alion\",\"Asheep\",\"Acow\",\"Ahorse\",\"QIs it farmed\",40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,\"Abee\",\"Awasp\",\"Aant\",\"Atermite\",\"Aoctopus\",\"Aspider\",62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,\"Apig\",\"Adog\"]\r\n\r\nagain=True\r\ncurrent = 1\r\nprint(\"Answer the questions to identify an animal from the following list: horse, cow, sheep, pig, dog, cat, lion, tiger, whale, dolphin, seal, penguin, ostrich, sparrow, spider, ant, bee, wasp, termite, octopus, squid\\n\")\r\nwhile tree[current][0] == \"Q\" and again==True:\r\n answer = input(tree[current][1:] + \"? [Y/N]\").lower()\r\n if answer in (\"y\",\"yes\",\"ye\",\"yeah\"):\r\n current = current * 2\r\n else:\r\n current = current * 2 + 1\r\nprint(\"My guess would be...\",tree[current][1:])\r\n\r\nguess=input(\"Did I get it right? [Y/N]\")\r\nif guess in (\"y\", \"yes\", \"yeah\",\"Y\", \"ye\"):\r\n print(\"I knew it!\")\r\nelse:\r\n print(\"Oh no.....Nobodys perfect!\")\r\n\r\nagain=input(\"Do you want to play again? [Y/N]\")\r\nif guess in (\"y\", \"yes\", \"yeah\",\"Y\", \"ye\"):\r\n again=True\r\nelse:\r\n again=False","sub_path":"python/20 questions.py","file_name":"20 questions.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"540562815","text":"#!/usr/bin/env python\n# pylint: disable=C0103\n\n\"\"\"Twitter Bot. Listens for mentions and replies to them.\"\"\"\n\n#\n# IMPORTS\n#\n\n# Allow using print as a function with parenthesis: print()\nfrom __future__ import print_function\n# basic operating system interactions\nimport os\nimport sys\n# import the code that connects to Twitter\nfrom twython import Twython, TwythonError\n# import all functions from tweet_text.py\nfrom tweet_text import *\n# import all functions from helper.py\nfrom helper import *\n\nimport requests\nimport datetime\n\n# Try to import the variables defined in credentials.py\n# If that does not exist (e.g. on Heroku), fall back to environment variables\ntry:\n from credentials import APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET\nexcept ImportError as error:\n print('Info: {e}'.format(e=error))\n print('Info: Cannot load credentials.py. Will use environment variables.')\n try:\n APP_KEY = os.environ['APP_KEY']\n APP_SECRET = os.environ['APP_SECRET']\n OAUTH_TOKEN = os.environ['OAUTH_TOKEN']\n OAUTH_TOKEN_SECRET = os.environ['OAUTH_TOKEN_SECRET']\n except KeyError as error:\n print('Error: {e} not found in environment variables'.format(e=error))\n print('Error: Could not retrieve credentials from either credentials.py or environment variables. Make sure either is set.')\n # can't do anything without credentials, so quit\n sys.exit()\n\n\n#\n# BOT CODE\n#\n\ndef setup():\n # Login to Twitter\n account = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\n # Check the supplied credentials, get some general info on the account\n # https://dev.twitter.com/rest/reference/get/account/verify_credentials\n info = account.verify_credentials(include_entities=False, skip_status=True, include_email=False)\n print('user:', info['screen_name'])\n print('tweet count:', info['statuses_count'])\n print('favourite count:', info['favourites_count'])\n print('friends count:', info['friends_count'])\n return account\n\ndef tweet(account):\n \"\"\"check for mentions and answer, otherwise tweet idle tweet\"\"\"\n replied = False\n mentions = account.get_mentions_timeline()\n rate_limit_remaining = account.get_lastfunction_header('x-rate-limit-remaining')\n print('rate limit remaining', rate_limit_remaining)\n\n repositories = requests.get('https://api.github.com/users/klauck/repos').json()\n updated_repositories = []\n for repository in repositories:\n updated_at = datetime.datetime.strptime(repository['pushed_at'][:10], '%Y-%m-%d').date()\n if updated_at == (datetime.datetime.now() - datetime.timedelta(days=1)).date():\n updated_repositories.append(repository['full_name'])\n\n if len(updated_repositories) > 0:\n text = 'New commits for: %s' % (', '.join(updated_repositories))\n tweet = account.update_status(status=text)\n print('https://twitter.com/statuses/{id}'.format(id=tweet['id']))\n\n\nif __name__ == \"__main__\":\n account = setup()\n tweet(account)\n","sub_path":"twitter_bot.py","file_name":"twitter_bot.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"281675643","text":"from copy import copy\n\nfrom classes import SnakeAndLadder, Player, Dice\n\ngame = SnakeAndLadder()\n\n# get the snakes\ns = int(input())\nsnakes = {}\nfor _ in range(s):\n start, end = map(int, input().split(\" \"))\n snakes[start] = end\n game.snakes[start] = end\n\n# get the ladders\nl = int(input())\nladders = {}\nfor _ in range(l):\n start, end = map(int, input().split(\" \"))\n ladders[start] = end\n game.ladders[start] = end\n\n# get the players\nn = int(input())\nplayers = []\nfor _ in range(n):\n player = str(input())\n players.append(player)\n\n p = Player(player)\n game.add_player(p)\n\n# Orchestrate\ndice = Dice()\nplayer_order = game.get_player_order()\n\ncurr_play = 0\ncount = 10\nwhile not game.is_game_end():\n dice_val = dice.roll()\n game.move(player_order[curr_play], dice_val)\n\n if dice_val != 6:\n curr_play = (curr_play + 1) % len(game.players)\n\n count -= 1\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"199683071","text":"\n\nfrom xai.brain.wordbase.nouns._smile import _SMILE\n\n#calss header\nclass _SMILED(_SMILE, ):\n\tdef __init__(self,): \n\t\t_SMILE.__init__(self)\n\t\tself.name = \"SMILED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"smile\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_smiled.py","file_name":"_smiled.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"378818149","text":"# Write a program to ask for a DNA sequence.\n# Translate the DNA into protein. (See next page for the codon table to use.) When the codon doesn’t code for anything (eg, stop codon), use “*”. Ignore the extra bases if the sequence length is not a multiple of 3. Decide how you want to handle ambiguous codes.\n# \n# Come up with your own test cases. Compare your\n# results with someone else or with a web site.\n\n\ntable = {\n 'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',\n 'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',\n 'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',\n 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',\n 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',\n 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',\n 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',\n 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',\n 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',\n 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',\n 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',\n 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',\n 'GGG': 'G', }\n\noutfile = open(\"Chapter Specification/dna to protein.txt\", \"w\")\nfor seq in open(\"Chapter Specification/sequence.txt\"):\n seq = seq.rstrip()\n new_seq = \"\"\n i=0\n while 1:\n if i+3 >= len(seq):\n break\n sub_str = seq[i] + seq[i+1] + seq[i+2]\n i=i+3\n keys = table.keys()\n keys = list(keys)\n\n if sub_str in keys:\n tmp = table[sub_str]\n else:\n tmp = \"*\"\n new_seq = new_seq + tmp\n \n print(new_seq)\n outfile.write(new_seq + \"\\n\")\n\noutfile.close()","sub_path":"3-2/CSE 3210 (Artificial Intelligence)/Lab Works by '15/Python All/27. Dictionaries - DNA to Protein.py","file_name":"27. Dictionaries - DNA to Protein.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"344155573","text":"import os\n\nfrom django.http import JsonResponse\n\ndef healthcheck(request):\n \"\"\"App health check, return OK if the app is alive\"\"\"\n version = os.environ.get(\"GIT_HASH\")\n hostname = os.environ.get(\"HOSTNAME\")\n return JsonResponse({\n 'version': version,\n 'status': 'OK',\n 'hostname': hostname\n })\n","sub_path":"app/healthcheck.py","file_name":"healthcheck.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"237768620","text":"\"\"\"MobileNetV2\n\nPaper: https://arxiv.org/pdf/1801.04381.pdf\nAdapted from: https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenetv2.py\n\nCopyright 2021 | farabio\n\"\"\"\nimport torch\nfrom torch import nn\nfrom torch import Tensor\nfrom typing import Any, List, Optional, Callable\nfrom farabio.utils.helpers import get_num_parameters, _make_divisible\n\n__all__ = ['MobileNetV2', 'mobilenet_v2']\n\n\nclass ConvBNActivation(nn.Sequential):\n def __init__(\n self,\n in_planes: int,\n out_planes: int,\n kernel_size: int = 3,\n stride: int = 1,\n groups: int = 1,\n norm_layer: Optional[Callable[..., nn.Module]] = None,\n activation_layer: Optional[Callable[..., nn.Module]] = None,\n dilation: int = 1,\n ) -> None:\n padding = (kernel_size - 1) // 2 * dilation\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if activation_layer is None:\n activation_layer = nn.ReLU6\n\n super().__init__(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, dilation=dilation, groups=groups,\n bias=False),\n norm_layer(out_planes),\n activation_layer(inplace=True)\n )\n self.out_channels = out_planes\n\n\nclass InvertedResidual(nn.Module):\n def __init__(\n self,\n inp: int,\n oup: int,\n stride: int,\n expand_ratio: int,\n ) -> None:\n super(InvertedResidual, self).__init__()\n self.stride = stride\n\n norm_layer = nn.BatchNorm2d\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = self.stride == 1 and inp == oup\n\n layers: List[nn.Module] = []\n if expand_ratio != 1:\n layers.append(ConvBNActivation(inp, hidden_dim, kernel_size=1))\n layers.extend([\n ConvBNActivation(hidden_dim, hidden_dim,\n stride=stride, groups=hidden_dim),\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n norm_layer(oup),\n ])\n self.conv = nn.Sequential(*layers)\n self.out_channels = oup\n\n def forward(self, x: Tensor) -> Tensor:\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass MobileNetV2(nn.Module):\n def __init__(\n self,\n n_classes: int = 1000,\n width_mult: float = 1.0,\n round_nearest: int = 8,\n init_weights: bool = True\n ) -> None:\n super(MobileNetV2, self).__init__()\n\n block = InvertedResidual\n\n input_channel = 32\n last_channel = 1280\n\n inverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n ]\n\n input_channel = _make_divisible(\n input_channel * width_mult, round_nearest)\n self.last_channel = _make_divisible(\n last_channel * max(1.0, width_mult), round_nearest)\n features: List[nn.Module] = [\n ConvBNActivation(3, input_channel, stride=2)]\n\n for t, c, n, s in inverted_residual_setting:\n output_channel = _make_divisible(c * width_mult, round_nearest)\n for i in range(n):\n stride = s if i == 0 else 1\n features.append(\n block(input_channel, output_channel, stride, expand_ratio=t))\n input_channel = output_channel\n\n features.append(ConvBNActivation(\n input_channel, self.last_channel, kernel_size=1))\n self.features = nn.Sequential(*features)\n\n self.classifier = nn.Sequential(\n nn.Dropout(0.2),\n nn.Linear(self.last_channel, n_classes),\n )\n\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.features(x)\n x = nn.functional.adaptive_avg_pool2d(x, (1, 1))\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self) -> None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.zeros_(m.bias)\n\n\ndef mobilenet_v2(**kwargs: Any) -> MobileNetV2:\n model = MobileNetV2(**kwargs)\n return model\n\n\ndef test():\n x = torch.randn(1, 3, 224, 224)\n\n model = mobilenet_v2()\n y = model(x)\n\n print(\"Trainable parameters: \", get_num_parameters(model))\n print(\"in shape: \", x.shape, \", out shape: \", y.shape)\n\n\n# test()","sub_path":"farabio/models/classification/mobilenetv2.py","file_name":"mobilenetv2.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"144430650","text":"from django.db import models\nfrom django.forms import ModelForm\nfrom rest_framework import serializers\n\nclass Assessment(models.Model):\n assessment_id = models.AutoField(primary_key=True)\n section = models.ForeignKey('Section')\n class_roster = models.ForeignKey('ClassRoster')\n score = models.DecimalField(max_digits=9, decimal_places=2)\n teacher_comments = models.TextField(blank=True)\n bayyinah_comments = models.TextField(blank=True)\n created_date = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n\n class Meta:\n app_label = \"sass\"\n db_table = \"sass_assessments\"\n\nclass AssessmentSerializer(serializers.ModelSerializer):\n section_name = serializers.Field(source='section.section_name')\n\n class Meta:\n model = Assessment\n fields = ['assessment_id', 'section', 'section_name', 'class_roster', 'score', 'teacher_comments', 'bayyinah_comments', 'created_date', 'modified_date',]\n\n\nclass AssessmentForm(ModelForm):\n class Meta:\n model = Assessment\n fields = ['assessment_id', 'section', 'class_roster', 'score', 'teacher_comments', 'bayyinah_comments', ]\n\n","sub_path":"sass/models/assessment.py","file_name":"assessment.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"82059750","text":"from selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nimport authorization.authorization as authorization\nimport action.submenu as submenu\nimport unittest\n\nbase_url = \"http://way2automation.com/way2auto_jquery\"\n\n\nclass Test (unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome(\"chromedriver.exe\")\n self.driver.get(base_url)\n self.wait = WebDriverWait(self.driver, 10)\n authorization.authorize(self.wait)\n self.driver.refresh()\n self.driver.get(base_url + \"/menu.php\")\n\n def test_action_submenu(self):\n submenu_action = submenu.action(self.driver, self.wait)\n assert(submenu_action.is_displayed())\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"548250752","text":"import numpy as np\r\nfrom sklearn.model_selection import KFold\r\nimport sys\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\n\r\n#hidden units\r\nl=10000\r\n\r\n\r\nnp.set_printoptions(threshold=sys.maxsize)\r\n\r\na=np.loadtxt('featurestest.txt')\r\n\r\n#extract 1,5 rows\r\ndef extract(m,n):\r\n k0=a.shape[0]\r\n b=[0,0,0]\r\n for row in list(range(k0)):\r\n if a[row,0]==m:\r\n b=np.vstack((b,a[row,:])) \r\n elif a[row,0]==n:\r\n b=np.vstack((b,a[row,:]))\r\n c=b[1:k0,:]\r\n with open('problem2 test.txt','w') as f:\r\n f.write('<%s>\\n'%c)\r\n return c\r\n\r\nc=extract(1,5)\r\ndataSet=c[:,1:2]\r\nhwlabels=c[:,0]\r\n\r\nclf=MLPClassifier(hidden_layer_sizes=(l,),activation='relu',\r\n solver='adam',learning_rate_init=0.0001,max_iter=2000)\r\n\r\n\r\n\r\n#3 fold cross validation\r\nkf=KFold(n_splits=3)\r\npipe_lr=Pipeline([('sc', StandardScaler()),\r\n ('pca', PCA(n_components=1)),\r\n ('clf', clf.fit(dataSet,hwlabels))\r\n ])\r\n\r\nscores = []\r\nfor k, (train, test) in enumerate(kf.split(c)):\r\n pipe_lr.fit(dataSet[train], hwlabels[train])\r\n score= pipe_lr.score(dataSet[test], hwlabels[test])\r\n scores.append(score)\r\n print('Fold: %s, Class dist.: %s, Acc: %.3f' % (k+1,np.bincount(np.array(hwlabels[train],dtype=int)), score))\r\n \r\nprint('\\nCV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))\r\nprint('test error:',1-np.mean(scores))\r\n\r\n#statictical errors\r\nres=clf.predict(dataSet)\r\nerror_num =0 \r\nnum =len(dataSet)\r\nfor i in range(num):\r\n if res[i]!=hwlabels[i]:\r\n error_num+=1\r\nprint(\"Total num:\",num,\"Wrong num:\",error_num,\" in-sample error:\",error_num/float(num))\r\n\r\n","sub_path":"problem2 test.py","file_name":"problem2 test.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"347142085","text":"try:\r\n import simplegui\r\nexcept:\r\n import SimpleGUICS2Pygame.simpleguics2pygame as simplegui\r\n\r\nclass PlatformItem:\r\n\r\n def __init__(self, pos, player):\r\n\r\n self.pos = pos\r\n self.player = player\r\n self.spriteSheet = simplegui.load_image('https://i.imgur.com/Unr26NL.png')\r\n self.spriteSheetWidth = 768\r\n self.spriteSheetHeight = 2048\r\n self.columns = 6\r\n self.rows = 16\r\n self.frameWidth = self.spriteSheetWidth // self.columns\r\n self.frameHeight = self.spriteSheetHeight // self.rows\r\n self.frameCentreX = self.frameWidth // 2\r\n self.frameCentreY = self.frameHeight // 2\r\n self.frameCount = 0\r\n\r\n self.collided = False\r\n\r\n def collide(self):\r\n #colllision code....\r\n # if collided = True:\r\n #PlatformItem.action()\r\n pass\r\n\r\n def imgUpdate(self):\r\n j = self.frameIndex[1]\r\n i = self.frameIndex[0]\r\n if self.frameCount % self.spritePlaySpeed == 0:\r\n i = (self.frameIndex[0] + 1) % self.columns\r\n if self.frameIndex[1] == self.lastFrameRow and self.frameIndex[0] == 5:\r\n j = self.topFrameRow\r\n elif i == 0:\r\n j = (self.frameIndex[1] + 1) % self.rows\r\n self.frameIndex = (i, j)\r\n\r\n def update(self, canvas):\r\n if self.collided == False:\r\n canvas.draw_image(self.spriteSheet, (self.frameWidth * self.frameIndex[0] + self.frameCentreX,\r\n self.frameHeight * self.frameIndex[1] + self.frameCentreY),\r\n (self.frameWidth, self.frameHeight), self.pos,\r\n (self.frameWidth, self.frameHeight))\r\n self.frameCount += 1\r\n self.imgUpdate()","sub_path":"GameGit2.2/PlatformItem.py","file_name":"PlatformItem.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"58319138","text":"# Copyright 2013 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\n/update endpoint for Daisy v1 API\n\"\"\"\n\nimport subprocess\nfrom oslo_log import log as logging\nfrom daisy import i18n\nimport daisy.api.backends.common as daisy_cmn\nimport daisy.api.backends.kolla.common as kolla_cmn\nfrom daisy.api.backends.kolla import config as kconfig\nimport daisy.registry.client.v1.api as registry\nfrom threading import Thread\nfrom daisy.common import exception\n\n\nLOG = logging.getLogger(__name__)\n_ = i18n._\n_LE = i18n._LE\n_LI = i18n._LI\n_LW = i18n._LW\n\nkolla_state = kolla_cmn.KOLLA_STATE\n\n\ndef update_all_host_progress_to_db(req, hosts_id_list, role_host_meta={}):\n for host_id in hosts_id_list:\n host_roles = registry.get_host_roles_by_host_id(req.context, host_id)\n for host_role_id in host_roles:\n if role_host_meta:\n daisy_cmn.update_role_host(req, host_role_id['id'],\n role_host_meta)\n\n\nclass KOLLAUpgradeTask(Thread):\n \"\"\"\n Class for kolla upgrade openstack.\n \"\"\"\n\n def __init__(self, req, cluster_id, version_id, update_file):\n super(KOLLAUpgradeTask, self).__init__()\n self.req = req\n self.cluster_id = cluster_id\n self.progress = 0\n self.version_id = version_id\n self.update_file = update_file\n self.message = \"\"\n self.kolla_file = \"/home/kolla_install\"\n self.log_file = \"/var/log/daisy/kolla_%s_upgrade.log\" % self.cluster_id\n\n def run(self):\n hosts = registry.get_cluster_hosts(self.req.context, self.cluster_id)\n hosts_id_list = [host['host_id'] for host in hosts]\n cluster_meta = registry.get_cluster_metadata(self.req.context,\n self.cluster_id)\n self.message = \"prechecking envirnoment\"\n update_all_host_progress_to_db(self.req, hosts_id_list,\n {'progress': 0,\n 'status': kolla_state['UPDATING'],\n 'messages': self.message})\n kolla_version_pkg_file = kolla_cmn.check_and_get_kolla_version(\n kolla_cmn.daisy_kolla_ver_path, self.update_file)\n if not kolla_version_pkg_file:\n self.message = \"kolla version file not found in %s\"\\\n % kolla_cmn.daisy_kolla_path\n update_all_host_progress_to_db(self.req, hosts_id_list,\n {'progress': 0,\n 'status': kolla_state[\n 'UPDATE_FAILED'],\n 'messages': self.message})\n raise exception.NotFound(message=self.message)\n if cluster_meta['tecs_version_id']:\n version_data = registry.get_version_metadata(\n self.req.context, cluster_meta['tecs_version_id'])\n if version_data['name'] == self.update_file:\n LOG.error(_(\"kolla version %s is not need to upgrade!\"\n % version_data['name']))\n self.message = \"kolla version %s is not need to upgrade!\" \\\n % version_data['name']\n update_all_host_progress_to_db(self.req, hosts_id_list,\n {'progress': 0,\n 'status': kolla_state[\n 'UPDATE_FAILED'],\n 'messages': self.message})\n return\n # TODO: Is the hosts argument right?\n try:\n LOG.info(_(\"load kolla registry...\"))\n kolla_cmn.version_load(kolla_version_pkg_file, hosts)\n except exception.SubprocessCmdFailed as e:\n self.message = \"load kolla registry failed!\"\n LOG.error(self.message)\n raise exception.InstallException(self.message)\n\n update_all_host_progress_to_db(self.req, hosts_id_list,\n {'progress': 10,\n 'status': kolla_state[\n 'UPDATING'],\n 'messages': self.message})\n\n res = kolla_cmn.version_load_mcast(kolla_version_pkg_file,\n hosts)\n (kolla_config, self.mgt_ip_list, host_name_ip_list) = \\\n kolla_cmn.get_cluster_kolla_config(self.req, self.cluster_id)\n # generate_kolla_config_file() can not be used here, it not only\n # update global.yml, but also update passwd.yml and redo some\n # ssh commands(cause failure) on target nodes. So do not be\n # misleaded by that bad function name, here we only want to\n # update global.yml's docker_registry and openstack_release value.\n if kolla_config:\n kconfig.update_docker_registry_url(kolla_config, res)\n kconfig.update_openstack_release(kolla_config)\n\n hosts_ip_set = set()\n for host in hosts:\n host_meta = daisy_cmn.get_host_detail(self.req, host[\"host_id\"])\n host_ip = daisy_cmn.get_management_ip(host_meta)\n hosts_ip_set.add(host_ip)\n unreached_hosts = daisy_cmn.check_ping_hosts(\n hosts_ip_set, 3)\n if unreached_hosts:\n self.message = \"hosts %s ping failed\" % unreached_hosts\n update_all_host_progress_to_db(self.req, hosts_id_list,\n {'progress': 10,\n 'status': kolla_state[\n 'UPDATE_FAILED'],\n 'messages': self.message})\n raise exception.NotFound(message=self.message)\n\n LOG.info(_(\"precheck envirnoment successfully ...\"))\n self.message = \"openstack upgrading\"\n update_all_host_progress_to_db(self.req, hosts_id_list,\n {'progress': 20,\n 'status': kolla_state[\n 'UPDATING'],\n 'messages': self.message})\n with open(self.log_file, \"w+\") as fp:\n try:\n LOG.info(_(\"begin to kolla-ansible \"\n \"upgrade for all nodes...\"))\n exc_result = subprocess.check_output(\n 'cd %s/kolla-ansible && ./tools/kolla-ansible upgrade -i '\n '%s/kolla-ansible/ansible/inventory/multinode' %\n (self.kolla_file, self.kolla_file),\n shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n LOG.error(\"kolla-ansible upgrade failed!\")\n self.message = \"kolla-ansible upgrade failed!\"\n update_all_host_progress_to_db(self.req, hosts_id_list,\n {'progress': 20,\n 'status': kolla_state[\n 'UPDATE_FAILED'],\n 'messages': self.message})\n LOG.info(_(\"kolla-ansible upgrade failed!\"))\n fp.write(e.output.strip())\n exit()\n else:\n LOG.info(_(\"openstack upgraded successfully\"))\n fp.write(exc_result)\n self.message = \"openstack upgraded successfully\"\n update_all_host_progress_to_db(self.req, hosts_id_list,\n {'progress': 100,\n 'status': kolla_state[\n 'ACTIVE'],\n 'messages': self.message})\n for host_id in hosts_id_list:\n daisy_cmn.update_db_host_status(\n self.req, host_id, {'tecs_version_id': self.version_id,\n 'tecs_patch_id': ''})\n cluster_meta = {}\n cluster_meta['tecs_version_id'] = self.version_id\n cluster_meta = registry.update_cluster_metadata(\n self.req.context, self.cluster_id, cluster_meta)\n LOG.info(_(\"openstack upgraded for cluster %s successfully.\"\n % self.cluster_id))\n","sub_path":"code/daisy/daisy/api/backends/kolla/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":9121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"492630648","text":"import queue\r\nimport sys\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom keras.models import load_model\r\n\r\nimport picture as pic\r\n\r\n\r\nclass Ui_MainWindow(QtWidgets.QWidget):\r\n\r\n def __init__(self, parent=None):\r\n super().__init__(parent) # 父类的构造函数\r\n\r\n self.timer_camera = QtCore.QTimer() # 定义定时器,用于控制显示视频的帧率\r\n self.cap = cv2.VideoCapture() # 视频流\r\n self.CAM_NUM = 0 # 为0时表示视频流来自笔记本内置摄像头\r\n\r\n self.set_ui() # 初始化程序界面\r\n self.slot_init() # 初始化槽函数\r\n self.frame_num = 0\r\n self.frames = []\r\n self.inference_frames = []\r\n self.action_labels = ['step1', 'step2', 'step3', 'step4', 'step5', 'step6', 'step7']\r\n self.inference_model = load_model('myModel.h5')\r\n self.complete = {}\r\n\r\n self.x0 = 300\r\n self.y0 = 100\r\n self.width = 300\r\n self.height = 300\r\n self.actionQueue = queue.Queue()\r\n\r\n for step in self.action_labels:\r\n self.actionQueue.put(step)\r\n self.complete[step] = 5\r\n\r\n self.current_stage = 'step1'\r\n\r\n '''程序界面布局'''\r\n\r\n def set_ui(self):\r\n self.__layout_main = QtWidgets.QHBoxLayout() # 总布局\r\n self.__layout_fun_button = QtWidgets.QVBoxLayout() # 按键布局\r\n self.__layout_data_show = QtWidgets.QVBoxLayout() # 数据(视频)显示布局\r\n self.__layout_roi_res_show = QtWidgets.QVBoxLayout() # 数据(视频)显示布局\r\n self.button_open_camera = QtWidgets.QPushButton('Open Camera') # 建立用于打开摄像头的按键\r\n self.button_close = QtWidgets.QPushButton('Exit') # 建立用于退出程序的按键\r\n self.button_open_camera.setMinimumHeight(50) # 设置按键大小\r\n self.button_close.setMinimumHeight(50)\r\n self.button_close.move(10, 100) # 移动按键\r\n\r\n '''信息显示'''\r\n self.label_show_camera = QtWidgets.QLabel() # 定义显示视频的Label\r\n self.label_show_camera.setFixedSize(641, 481) # 给显示视频的Label设置大小为641x481\r\n self.__layout_data_show.addWidget(self.label_show_camera)\r\n\r\n self.label_roi_camera = QtWidgets.QLabel() # 定义显示视频roi的Label\r\n self.label_roi_camera.setFixedSize(201, 241) # 给显示视频roi的Label设置大小为300x240\r\n self.__layout_roi_res_show.addWidget(self.label_roi_camera)\r\n\r\n self.label_res_camera = QtWidgets.QLabel() # 定义显示视频res的Label\r\n self.label_res_camera.setFixedSize(201, 241) # 给显示视频res的Label设置大小为300x240\r\n self.__layout_roi_res_show.addWidget(self.label_res_camera)\r\n\r\n '''把按键加入到按键布局中'''\r\n self.__layout_fun_button.addWidget(self.button_open_camera) # 把打开摄像头的按键放到按键布局中\r\n self.__layout_fun_button.addWidget(self.button_close) # 把退出程序的按键放到按键布局中\r\n '''把某些控件加入到总布局中'''\r\n self.__layout_main.addLayout(self.__layout_fun_button) # 把按键布局加入到总布局中\r\n self.__layout_main.addLayout(self.__layout_data_show) # 把用于显示视频的Label加入到总布局中\r\n self.__layout_main.addLayout(self.__layout_roi_res_show)\r\n '''总布局布置好后就可以把总布局作为参数传入下面函数'''\r\n self.setLayout(self.__layout_main) # 到这步才会显示所有控件\r\n\r\n '''初始化所有槽函数'''\r\n\r\n def slot_init(self):\r\n self.button_open_camera.clicked.connect(\r\n self.button_open_camera_clicked) # 若该按键被点击,则调用button_open_camera_clicked()\r\n self.timer_camera.timeout.connect(self.show_camera) # 若定时器结束,则调用show_camera()\r\n self.button_close.clicked.connect(self.close) # 若该按键被点击,则调用close(),注意这个close是父类QtWidgets.QWidget自带的,会关闭程序\r\n\r\n '''槽函数之一'''\r\n\r\n def button_open_camera_clicked(self):\r\n if not self.timer_camera.isActive(): # 若定时器未启动\r\n flag = self.cap.open(self.CAM_NUM) # 参数是0,表示打开笔记本的内置摄像头,参数是视频文件路径则打开视频\r\n if not flag: # flag表示open()成不成功\r\n msg = QtWidgets.QMessageBox.warning(self, 'warning', \"pleas check your camera setting on your laptop\",\r\n buttons=QtWidgets.QMessageBox.Ok)\r\n else:\r\n self.timer_camera.start(30) # 定时器开始计时30ms,结果是每过30ms从摄像头中取一帧显示\r\n self.button_open_camera.setText('Close Camera')\r\n else:\r\n self.timer_camera.stop() # 关闭定时器\r\n self.cap.release() # 释放视频流\r\n self.label_show_camera.clear() # 清空视频显示区域\r\n self.label_res_camera.clear()\r\n self.label_roi_camera.clear()\r\n self.button_open_camera.setText('Open Camera')\r\n\r\n def show_camera(self):\r\n flag, image = self.cap.read() # 从视频流中读取\r\n image = cv2.flip(image, 2)\r\n\r\n text1 = \"Please start wish your hand\"\r\n text2 = \"You are in {} now ({})\".format(self.current_stage, self.complete[self.current_stage])\r\n\r\n roi, res = pic.binaryMask(image, self.x0, self.y0, self.width, self.height) # 取手势所在框图并进行处理\r\n\r\n show = cv2.resize(image, (640, 480)) # 把读到的帧的大小重新设置为 640x480\r\n roi = cv2.resize(roi, (300, 240))\r\n res = cv2.resize(res, (300, 240))\r\n\r\n show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB) # 视频色彩转换回RGB,这样才是现实的颜色\r\n roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)\r\n res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)\r\n\r\n if self.frame_num == 10:\r\n\r\n inference_video = np.array(self.frames)\r\n inference_video = np.array([inference_video]).transpose((0, 2, 3, 1))\r\n inference_video = np.array([inference_video]).reshape((inference_video.shape[0], 32, 32, 10, 1))\r\n\r\n prdict = self.inference_model.predict(inference_video)[0]\r\n proba = prdict[np.argmax(prdict)]\r\n\r\n result = self.action_labels[np.argmax(prdict)]\r\n\r\n threshold = 0.7\r\n if proba < threshold:\r\n result = None\r\n\r\n print(\"this is {} and the probability is {}\".format(result, proba))\r\n\r\n self.frame_num = 0\r\n self.frames = []\r\n\r\n if not result is None:\r\n if result == self.current_stage:\r\n self.complete[result] -= 1\r\n if self.complete[result] == 0:\r\n self.current_stage = self.actionQueue.get()\r\n if self.actionQueue.empty:\r\n text1 = \"You have finished\"\r\n text2 = \" \"\r\n\r\n else:\r\n self.frames.append(cv2.resize(cv2.cvtColor(res, cv2.COLOR_BGR2GRAY), (32, 32)))\r\n self.frame_num += 1\r\n\r\n show = cv2.putText(show, text1, (40, 50), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)\r\n show = cv2.putText(show, text2, (40, 65), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 255), 2)\r\n\r\n showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0],\r\n QtGui.QImage.Format_RGB888) # 把读取到的视频数据变成QImage形式\r\n\r\n showRoi = QtGui.QImage(roi.data, roi.shape[1], roi.shape[0],\r\n QtGui.QImage.Format_RGB888)\r\n showRes = QtGui.QImage(res.data, res.shape[1], res.shape[0],\r\n QtGui.QImage.Format_RGB888)\r\n\r\n self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage)) # 往显示视频的Label里 显示QImage\r\n self.label_roi_camera.setPixmap(QtGui.QPixmap.fromImage(showRoi))\r\n self.label_res_camera.setPixmap(QtGui.QPixmap.fromImage(showRes))\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv) # 固定的,表示程序应用\r\n ui = Ui_MainWindow() # 实例化Ui_MainWindow\r\n ui.show() # 调用ui的show()以显示。同样show()是源于父类QtWidgets.QWidget的\r\n sys.exit(app.exec_())\r\n","sub_path":"qt.py","file_name":"qt.py","file_ext":"py","file_size_in_byte":8457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"83803247","text":"from __future__ import absolute_import\nfrom distutils.core import Extension\nfrom astropy_helpers import setup_helpers\n\ndef get_extensions():\n exts = []\n\n # malloc\n mac_incl_path = \"/usr/include/malloc\"\n\n cfg = setup_helpers.DistutilsExtensionArgs()\n cfg['include_dirs'].append('numpy')\n cfg['include_dirs'].append(mac_incl_path)\n cfg['include_dirs'].append('twobody/')\n cfg['extra_compile_args'].append('--std=gnu99')\n cfg['sources'].append('twobody/wrap.pyx')\n cfg['sources'].append('twobody/src/twobody.c')\n exts.append(Extension('twobody.wrap', **cfg))\n\n return exts\n\ndef get_package_data():\n return {'twobody': ['data/*', 'src/*.h', 'src/*.c', '*.pyx', '*.pxd']}\n","sub_path":"twobody/setup_package.py","file_name":"setup_package.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"525052376","text":"# labor.py\n#import mdl_base as mdl\nexecfile('mdl_base.py')\n\nfrom pylab import *\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\t\nfrom matplotlib.widgets import Slider\n\ndef labor_plots():\n\tplt.close(\"all\")\n\n\t# Set up\n\tN = linspace(0, in_max, 61)\n\tN = linspace(0, in_max, 61)\n\tY = production(A_init, K_init, N, alpha)\n\tomega_s = labor_supply(N)\n\tomega_d = labor_demand(A_init, K_init, N, alpha)\n\n\n\t## Set up figure\n\tfig, axs = plt.subplots(1, 2)\n\tfig.subplots_adjust(bottom=0.2, left=0.1)\n\tax1 = axs[0]\n\tax2 = axs[1]\n\n\t# Production plot\n\tprod, = ax1.plot(N, Y, 'r')\n\tax1.set_xlabel('Labor (N)')\n\tax1.set_ylabel('Output (Y)')\n\tax1.set_autoscale_on(False)\n\tax1.set_ylim([0, disp_max])\n\tax1.spines['right'].set_color(\"none\")\n\tax1.spines['top'].set_color(\"none\")\n\tax1.tick_params(axis='x', which='both', top='off')\n\tax1.tick_params(axis='y', which='both', right='off')\n\tax1.yaxis.set_ticks(arange(0, disp_max, 1))\n\n\n\t# Labor Market Plot\n\tlabor_d, = ax2.plot(N, omega_d, 'r')\n\tlabor_s = ax2.plot(N, omega_s, 'r')\n\n\tax2.set_xlabel('Labor (N)')\n\tax2.set_ylabel('Wage ($\\omega$)')\n\tax2.set_autoscale_on(False)\n\tax2.set_ylim([0, 3.1])\n\tax2.spines['right'].set_color(\"none\")\n\tax2.spines['top'].set_color(\"none\")\n\tax2.tick_params(axis='x', which='both', top='off')\n\tax2.tick_params(axis='y', which='both', right='off')\n\tax2.yaxis.set_ticks(arange(0, 3.1, 1))\n\n\tdef on_capital_change(k_0):\n\t\tN_labor = solve_labor(N, omega_d, omega_s)\n\t\tlabor_d.set_ydata(labor_demand(A_init, k_0, N, alpha))\n\t\tprod.set_ydata(production(A_init, k_0, N_labor, alpha))\n\n\tk_slider_ax = plt.axes([0.1, 0.1, 0.8, 0.02])\n\tk_slider = Slider(k_slider_ax, \"Capital (K)\", 0, in_max, valinit=1, color='#AAAAAA')\n\tk_slider.on_changed(on_demand_change)\n\n\tfig.patch.set_facecolor('white')\n\tfig.show()\n","sub_path":"arc/labor.py","file_name":"labor.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"18221895","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 28 15:13:29 2018\n\n@author: Luc Deike\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib.colors\nimport fluids2d.backlight as backlight\nimport fluids2d.bubble_pinchoff as pinchoff\nimport fluids2d.geometry\nimport comps\nimport pims\nimport pickle\nimport pandas as pd\nfrom scipy.signal import butter, lfilter, filtfilt, argrelextrema\nfrom scipy.optimize import curve_fit\nfrom copy import deepcopy\n\ncase_name = r'bubble_pinchoffNeedle_pumps8V_fps100k_viewD_v5'\n\n#meta = meta[meta['case_name']==case_name].to_dict('records')[0]\nmeta = pinchoff.get_meta_dict(case_name)\nfolder = comps.cf(meta['data_loc'])+str(int(meta['date_folder']))+r'\\\\'\nbu = pickle.load(open(folder+case_name+r'_breakupInfo.pkl'))\n#bu.dists_m = bu.other_params['dists_orig']*bu.dx\n\n\n#bu.other_params['L_c'] = bu.dists*7\nif ~np.isnan(meta['remove_neck_before_time']):\n for vec in [bu.dists,bu.dists_m,bu.other_params['L_c']]:\n vec[bu.t*-1>meta['remove_neck_before_time']] = np.nan\n #bu.points[np.argwhere(bu.t*-1>meta['remove_neck_before_time'])] = np.nan\n \nres = pinchoff.rolling_powerlaw_fits(bu.t*-1,bu.dists_m,window=10)\nplt.figure()\nplt.plot(res[:,2],res[:,0],color='gray',lw=0.5,alpha=0.5)\nplt.scatter(res[:,2],res[:,0],c=np.arange(len(res)))\nplt.figure()\nplt.loglog(bu.t*-1,bu.dists_m)\nplt.scatter(bu.t*-1,bu.dists_m,c=np.arange(len(bu.t)))\n\n \n# physical properties\nrho_g = 1.2\nrho_l = 1000.\nsigma = 0.0728\nmu_l = 8.9e-4\nmu_g = 1.845e-5\n\ndt = bu.dt\nr_b = 3e-3\n\n#pinchoff.powerlaw_exponent_fit_to_range(bu.t*-1,bu.dists_m,x_range='click',viz=True,n_ranges=3)\n#\n#fig = plt.figure()\n#ax = fig.add_subplot(111)\n##ax.plot(bu.dists)\n##ax.plot(np.array(bu.points)[])\n#points = np.array(bu.points)\n#ax.plot(points[:,0,0],points[:,0,1],'x')\n#ax.plot(points[:,1,0],points[:,1,1],'x')\n#\n#order = np.argsort(points[:,:,1],axis=1)\n#\n#points_resort = points.copy()\n#for pi in range(len(points)):\n# if order[pi][0]==1:\n# points_resort[pi,:,:] = np.flipud(points_resort[pi,:,:])\n#ax.plot(points_resort[:,0,0],points_resort[:,0,1],'-')\n#ax.plot(points_resort[:,1,0],points_resort[:,1,1],'-')\n#stophere\n\n\n'''\nbetter estimate for the pinchoff time\n'''\nneck_before = bu.dists_m[0:4]\nt_before = bu.t[0:4]*-1\n#plt.figure()\n#plt.plot(t_before,neck_before,'-x')\nlog_t = np.log(t_before)\nlog_neck = np.log(neck_before)\ndef func_powerlaw_withoffset(x,m,c,offset):\n return c*(x+offset)**m\ntry:\n stophere\n popt,pcov = curve_fit(func_powerlaw_withoffset,t_before,neck_before,maxfev=2000,p0=np.asarray([0.5,1,-0.5*dt]))\n plt.plot(np.linspace(0,max(t_before),1001),func_powerlaw_withoffset(np.linspace(0,max(t_before),1001),popt[0],popt[1],popt[2]),'--')\n print(popt[2]/bu.dt)\n bu.t = bu.t-popt[2]\n print(popt)\nexcept:\n pass\n\n\n'''\nInstantaneous exponent\n'''\n\ndists_m_orig = bu.dists_m.copy()\n#fig = plt.figure()\n#ax_neck = fig.add_subplot(111)\n#ax_exp = ax_neck.twinx()\n#ax_neck.loglog(bu.t*-1,dists_m_orig,color='gray',alpha=0.5,lw=0.5)\n#ax_neck.loglog(bu.t*-1,bu.dists_m,color='k')\n#ax_neck.set_ylabel('$r_0$ [m]')\n#ax_neck.set_xlabel('$t_\\mathrm{b}-t$ [s]')\n\nlog_neck = np.log(dists_m_orig)\nlog_t = np.log(bu.t*-1)\ninst_slope = np.gradient(log_neck)/np.gradient(log_t)\n\n#ax_exp.semilogx(bu.t*-1,inst_slope,color='r',alpha=0.4)\n#ax_exp.semilogx(bu.t*-1,pd.Series(inst_slope).rolling(window=10,center=True,min_periods=0).mean(),color='r')\n#ax_exp.set_ylim([0,1])\n#ax_exp.set_ylabel('$m$') \n\n \n\n'''\npressure terms\n'''\n\nd_b = 5e-3\n#epsilon = 8000./100**2\nepsilon = meta['epsilon']\nif np.isnan(epsilon):\n epsilon = 8000./100**2\ndVdt_turb = 2.**(1./6) * d_b**(7./3) * epsilon**(1./3)\n\n\nwin = 2\ndf = pd.DataFrame(index=bu.t)\ndf['r_0'] = dists_m_orig/2.\ndf['r_0dot'] = np.gradient(df['r_0'])/np.gradient(df.index)*-1\n#df['r_0dot'].iloc[1:] = df['r_0dot'].iloc[1:]/np.diff(df.index)*-1\ndf['r_0dot'] = df['r_0dot'].rolling(center=True,window=win,min_periods=0).apply(np.nanmean)\n\ndf['r_0dotdot'] = np.gradient(df['r_0dot'])/np.gradient(df.index)\n#df['r_0dotdot'].iloc[1:] = df['r_0dotdot'].iloc[1:]/np.diff(df.index)\ndf['r_0dotdot'] = df['r_0dotdot'].rolling(center=True,window=win,min_periods=0).apply(np.nanmean)\n\ndf['L_c'] = bu.other_params['L_c']*bu.dx\nL_c_nans = pd.isna(df['L_c']).copy()\ndf['L_c'][L_c_nans] = np.inf\n\ndf['u_neck'] = dVdt_turb/(np.pi*df['r_0']**2)\n\ndf['p_accel'] = rho_l * df['r_0'] * df['r_0dotdot']\ndf['p_vel'] = 3./2. * rho_l*df['r_0dot']**2\ndf['p_sur'] = sigma*(1./df['r_0'] - 1./df['L_c']) # - 1./df['L_c']\ndf['p_vis'] = 4*mu_l * df['r_0dot']/df['r_0']\ndf['p_Poi'] = 4*mu_g*df['L_c']/(np.pi*df['r_0']**4) * dVdt_turb\ndf['p_Poi'][L_c_nans] = np.nan\ndf['p_Ber'] = 1./2. * rho_g * df['u_neck']**2\n\n'''\nmain plot\n'''\n\nfig = plt.figure(figsize=(12,7))\nax_neck = fig.add_subplot(221)\n#ax_exp = ax_neck.twinx()\nax_contour = fig.add_subplot(122)\nax_pressures = fig.add_subplot(223,sharex=ax_neck)\n\nbu.points = pinchoff.order_neck_points(bu.points,viz=True)\n\nt_show = np.geomspace(bu.dt,max(bu.t*-1),11)\nti_show = [np.argmin(np.abs(t+bu.t)) for t in t_show]\nneck_points = np.array([np.array(bu.points[ti]) for ti in range(max(ti_show)) if np.size(bu.points[ti])==4])\nmean_neck = np.nanmean(neck_points,axis=(0,1))\nfor i in [0,1]:\n ax_contour.plot((neck_points[:,i,1]-mean_neck[1])*1000*bu.dx,(neck_points[:,i,0]-mean_neck[0])*1000*bu.dx,'.',markerfacecolor='r',markeredgecolor='none',alpha=0.2,lw=0.5)\ncmap = cm.viridis\n#neck_points = []\nfor tii,ti in enumerate(ti_show):\n contour = bu.contours[ti]\n if contour is not None:\n color=cmap(float(tii)/len(ti_show))\n #neck_points.append(np.array(bu.points[ti])) \n ax_contour.plot((contour[:,1]-mean_neck[1])*bu.dx*1000,(contour[:,0]-mean_neck[0])*bu.dx*1000,alpha=0.5,color=color)\n ax_neck.axvline(bu.t[ti]*-1*1000,color=color,alpha=0.5)\n \nfor i,color in zip([0,1],['r','b']):\n ax_contour.plot(bu.points[:,i,1],bu.points[:,i,0],color=color)\n \n#stophere\n \nax_contour.invert_yaxis()\n#ax_neck.loglog(bu.t*-1*1000,dists_m_orig*1000,color='k',label='$d_0$') # ,alpha=0.5,lw=0.5\npopt_list,x_range_list,ax = pinchoff.powerlaw_exponent_fit_to_range(bu.t*-1,bu.dists_m,x_range='click',viz=ax_neck,n_ranges=3)\nax_neck.fill_between(bu.t*-1*1000,dists_m_orig*1000-bu.dx*1000,dists_m_orig*1000+bu.dx*1000,color='gray',alpha=0.5) # ,alpha=0.5,lw=0.5\nax_neck.loglog(bu.t*-1*1000,bu.other_params['L_c']*bu.dx*1000,color='gray',label='$L_\\mathrm{c}$')\n#ax_neck.loglog(bu.t*-1,bu.dists_m,color='k') \n#ax_exp.semilogx(bu.t*-1*1000,pd.Series(inst_slope).rolling(window=5,center=True,min_periods=0).mean(),color='r',alpha=0.5)\n\nax_neck.set_ylabel('$d_0 [mm]$')\n#ax_exp.set_ylabel('$m$')\n\n#ax_exp.set_ylim([0,2])\n\nax_contour.set_xlabel('$x$ [mm]')\nax_contour.set_ylabel('$z$ [mm]')\nax_contour.set_aspect('equal')\n\npressures = ['p_accel','p_vel','p_sur','p_vis','p_Poi','p_Ber']\ncolors = ['b','g','r','k','cyan','purple']\nfor p,c in zip(pressures,colors):\n df[p][df[p]<0] = np.nan\n ax_pressures.loglog(df.index*1000*-1,df[p]/1000.,label=p,color=c)\nax_pressures.legend()\nax_pressures.set_ylim([10**-3,10**2])\n\nax_pressures.set_xlabel('$t_\\mathrm{b} - t$ [ms]')\nax_pressures.set_ylabel('pressure terms [kPa]')\n\nax_contour.set_title(case_name)\n\nfig.tight_layout()\nfig.savefig(folder+case_name+'_contours_and_pressures.pdf')\n\n'''\nqualitatively check self-similarity of the profile\n'''\nfig = plt.figure()\nax = fig.add_subplot(111)\nfor tii,ti in enumerate(ti_show):\n contour = bu.contours[ti]\n color=cmap(float(tii)/len(ti_show))\n #neck_points.append(np.array(bu.points[ti]))\n if bu.points[ti] is not None:\n center_point = np.nanmean(bu.points[ti],axis=0)\n x = contour[:,0]\n y = contour[:,1]\n x[x>0.99*(np.nanmax(x)-np.nanmin(x))]=np.nan\n x[x<0.01*(np.nanmax(x)-np.nanmin(x))]=np.nan\n y[y>0.99*(np.nanmax(y)-np.nanmin(y))]=np.nan\n y[y<0.01*(np.nanmax(y)-np.nanmin(y))]=np.nan\n ax.plot((x-center_point[0])/bu.dists[ti],(y-center_point[1])/bu.dists[ti],alpha=0.5,color=color)\nax.set_aspect('equal')\nax.set_xlabel('$x/d_0$')\nax.set_ylabel('$y/d_0$')\nfig.tight_layout()\n\n \n'''\nfits right around tau=0\n'''\n\nn_show = 18\nfig,axs = plt.subplots(3,6,figsize=(18,9),sharex=True,sharey=True); axs=axs.flatten()\n\nfor axi in range(len(axs)):\n pinchoff.show_frame(bu,-1*axi-1,ax=axs[axi])\n axs[axi].set_axis_off()\n axs[axi].set_title('$t_\\mathrm{b}-t = $ '+'{:01.0f}'.format(bu.t[axi]*1e6*-1)+' $\\mu$s')\n \nif False:\n fig.tight_layout()\n fig.savefig(folder+case_name+'_neck_lines.pdf')","sub_path":"scripts/pinchoff_single_case_v2.py","file_name":"pinchoff_single_case_v2.py","file_ext":"py","file_size_in_byte":8564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"547743522","text":"\ndef triangle(p,a):\n r=[]\n s=p/2\n for x in range(1,p//2+1):\n for y in range(int(s-x+1),p//2+1):\n z=p-x-y\n if round((s*(s-x)*(s-y)*(s-z))**.5,5)==a:\n new=sorted((x,y,z))\n if new not in r:\n r.append(new)\n return sorted(r)\n\n","sub_path":"dSrisJKHB78aj2d7L_0.py","file_name":"dSrisJKHB78aj2d7L_0.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"244995977","text":"#-- coding:utf8 --\nimport togpio.gpioset\nimport RPi.GPIO as GPIO\nimport os\nimport time\n\n\nif __name__ == \"__main__\":\n # IO口测试\n IO = togpio.gpioset.BGpio()\n IO.Gpio_Init()\n Index = 1\n Mod = 1\n fval = 0\n while True:\n val = GPIO.input(IO.B6)\n try:\n if fval == 0 and val == 1:\n os.system(\"sudo reboot\")\n elif fval == 1 and val == 0:\n os.system(\"sudo pkill -f main.py\")\n else:\n pass\n time.sleep(2)\n\n # var = IO.ioflash()\n # # print(var)\n # if IO.Bottom_state_Add_Index(var[\"B1\"],var[\"B2\"],Index) is True:\n # Index = IO.Index\n # print(Index)\n # if IO.Bottom_state_First(var[\"B1\"]) is True:\n # print(\"heihei\")\n # elif IO.Bottom_state_Add_Mods(var[\"B4\"],Mod) is True:\n # Mod = IO.Mods\n # print(Mod)\n # else:\n # pass\n except KeyboardInterrupt:\n GPIO.cleanup()","sub_path":"2.Software/RpiProgram/Bra1/literacy_machine/B6Control.py","file_name":"B6Control.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"365205527","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\n\n\nclass Cercle:\n def __init__(self,o,r):\n self.o=o\n self.r=r\n def inter(self,other):\n if self.o==other.o:\n if self.r==other.r:\n return((self.o[0]-self.r,self.o[1]))\n else:\n return(\"Impossible!\")\n else:\n d=(self.o[0]-other.o[0])**2+(self.o[1]-other.o[1])**2\n if d>(self.r+other.r)**2:\n return(\"Impossible!\")\n elif d==(self.r+other.r)**2:\n v=((other.o[0]-self.o[0])/d**0.5,(other.o[1]-self.o[1])/d**0.5)\n return((self.o[0]+self.r*v[0],self.o[1]+self.r*v[1]))\n else:\n a=d**0.5/2+(self.r**2-other.r**2)/(2*d**0.5)\n h=(self.r**2-a*a)**0.5\n v=((other.o[0]-self.o[0])/d**0.5,(other.o[1]-self.o[1])/d**0.5)\n w=(v[1],-v[0])\n return(min((self.o[0]+a*v[0]+h*w[0],self.o[1]+a*v[1]+h*w[1]),(self.o[0]+a*v[0]-h*w[0],self.o[1]+a*v[1]-h*w[1])))\n\ndef f(x):\n y=str(round(100*x))\n if len(y)<3:\n y=\"0\"*(3-len(y))+y\n return(y[:-2]+\".\"+y[-2:])\n\na,b=map(int,input().split())\nx1,y1=map(int,input().split())\nx0,y0=map(int,input().split())\nx3,y3=map(int,input().split())\nx2,y2=map(int,input().split())\nC=Cercle((x0-(x1-x0)/(a*a-1),y0-(y1-y0)/(a*a-1)),((x0-x1)**2+(y0-y1)**2)**0.5*a/(a*a-1))\nCC=Cercle((x2-(x3-x2)/(b*b-1),y2-(y3-y2)/(b*b-1)),((x2-x3)**2+(y2-y3)**2)**0.5*b/(b*b-1))\n\nz=C.inter(CC)\nif type(z)==tuple:\n z=f(z[0])+\" \"+f(z[1])\nprint(z)\n","sub_path":"Geometry/HoueseLocaction.py","file_name":"HoueseLocaction.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"433610339","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('joins', '0002_join_ip_addres'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='join',\n old_name='ip_addres',\n new_name='ip_address',\n ),\n ]\n","sub_path":"joins/migrations/0003_auto_20151117_1914.py","file_name":"0003_auto_20151117_1914.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"158765660","text":"#!/usr/bin/env python\n'''File name: Denver-Water_XWT.py\n Author: Andreas Prein\n E-mail: prein@ucar.edu\n Date created: 16.04.2018\n Date last modified: 16.04.2018\n\n ############################################################## \n Purpos:\n Contains the setup for extreme weather typing (XWT) for\n Denver Water watersheds\n\n'''\n\ndef HUC2_XWTs_apply(Season,\n Region):\n\n from pdb import set_trace as stop\n import numpy as np\n import os\n import pandas as pd\n import datetime\n \n # ###################################################\n \n FireObs='MODIS' # ['MODIS','Parks']\n REANAL='ERA5' # ['ERAI','ERA5']\n if FireObs == 'MODIS':\n dStartDayPR=datetime.datetime(2001, 1, 1,0) # (2000, 11, 01,0)\n dStopDayPR=datetime.datetime(2019, 11, 30,23)\n rgdTime = pd.date_range(dStartDayPR, end=dStopDayPR, freq='d')\n elif FireObs == 'Parks':\n dStartDayPR=datetime.datetime(2002, 1, 1,0) # (2000, 11, 01,0)\n dStopDayPR=datetime.datetime(2018, 12, 31,23)\n rgdTime = pd.date_range(dStartDayPR, end=dStopDayPR, freq='d')\n \n if Season == 'AMJJAS':\n iMonths=[4,5,6,7,8,9]\n elif Season == 'ONDJFM':\n iMonths=[1,2,3,10,11,12]\n elif Season == 'Annual':\n iMonths=[1,2,3,4,5,6,7,8,9,10,11,12]\n \n # ---------\n # Setup clustering algorithm\n ClusterMeth='hdbscan' # current options are ['HandK','hdbscan']\n ClusterBreakup = 0 # breakes up clusters that are unproportionally large (only for hdbscan)\n RelAnnom=1 # 1 - calculates daily relative anomalies\n NormalizeData='D' # normalize variables | options are - 'C' - climatology\n # - 'D' - daily (default)\n # - 'N' - none\n MinDistDD=7 # minimum nr of days between XWT events\n RemoveAnnualCycl=1 # remove annual cycle in varaiables with 21 day moving average filter\n # ---------\n\n # DENVER WATER REGIONS\n sPlotDir='/glade/u/home/prein/projects/2019_Janice-CA-Fire-WTs/plots/WT-Centroids/'# +str(iNrOfExtremes)+'_Events/'\n sDataDir='/glade/campaign/mmm/c3we/prein/Papers/2019_Janice-CA-Fire-WTs/data/AUC-APR/' # '/glade/work/jaye/fire/'\n DW_Regions=['Bay_Area','Central_Coast','LA','Modoc','Northeast','San_Diego','Siearas_East','Siearas_West']\n # sRegion=Regions.index(Region)\n # Region=Regions[sRegion]\n sSubregionPR='/glade/u/home/prein/projects/2019_Janice-CA-Fire-WTs/Shapefiles/' #+Regions[sRegion]\n \n Metrics=['PSS','MRD','MRR','APR','PEX']\n Dimensions=['Variables','Extreme Nr.','Domain Size','Annual Cycle','Smoothing','Split Sample','Metrics']\n\n\n if (Season == 'Annual') & (Region == 'Bay_Area'):\n VarsFullName=['RH2AVG','MF2AVG','V850'] #[\"RH2AVG\",\"RH500\",\"SLPAVG\"] \n rgrNrOfExtremes=4 #[6,10,15,30]\n WT_Domains='M' # ['S','M','L','XXL'] \n Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n SpatialSmoothing=0.5 #[0,0.5,1]\n# VarsFullName=['RH2AVG','MF2AVG','V850'] #['MF850','MR500','RH2AVG'] #['RH2AVG','MF2AVG','V850']\n# rgrNrOfExtremes=4 #[6,10,15,30]\n# WT_Domains='M' # ['S','M','L','XXL'] \n# Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n# SpatialSmoothing=0.5 #[0,0.5,1]\n if (Season == 'Annual') & (Region == 'Central_Coast'):\n VarsFullName=['RH2AVG','SLPAVG','WSPD10'] #['RH2AVG','T2MIN','WSPD10'] # ['T2AVG','T2MIN','RH2AVG'] #['RH500','SLPAVG','V10AVG']\n rgrNrOfExtremes=6 #[6,10,15,30]\n WT_Domains='M' # ['S','M','L','XXL'] \n Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n SpatialSmoothing=0.5 #[0,0.5,1]\n# VarsFullName=['RH500','SLPAVG','V10AVG'] # ['T2AVG','T2MIN','RH2AVG'] #['RH500','SLPAVG','V10AVG']\n# rgrNrOfExtremes=4 #[6,10,15,30]\n# WT_Domains='M' # ['S','M','L','XXL'] \n# Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n# SpatialSmoothing=0.5 #[0,0.5,1]\n if (Season == 'Annual') & (Region == 'LA'):\n VarsFullName=['MR2AVG','SLPAVG','WSPD10'] #['MR2AVG','RH2AVG','Z500']\n rgrNrOfExtremes=10 #8\n WT_Domains='M' # ['S','M','L','XXL'] \n Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n SpatialSmoothing=0.5 #0\n# VarsFullName=['MR2AVG','SLPAVG','WSPD10'] # ['CAPE','MF500','RH2AVG'] #['V850','MF2AVG']\n# rgrNrOfExtremes=10 #10 #[6,10,15,30]\n# WT_Domains='M' # ['S','M','L','XXL'] \n# Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n# SpatialSmoothing=0.5 #[0,0.5,1]\n if (Season == 'Annual') & (Region == 'Modoc'):\n VarsFullName=['T2MIN','WSPD10','Z500'] #['T2AVG','T2MAX','U10AVG']\n rgrNrOfExtremes=8 #[6,10,15,30]\n WT_Domains='M' # ['S','M','L','XXL'] \n Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n SpatialSmoothing=0.5 #[0,0.5,1]\n# VarsFullName=['T2MIN','U10AVG','U500'] #['U500','T850','MR850']\n# rgrNrOfExtremes=8 #3 #[6,10,15,30]\n# WT_Domains='M' # ['S','M','L','XXL'] \n# Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n# SpatialSmoothing=0.5 #[0,0.5,1]\n if (Season == 'Annual') & (Region == 'Northeast'):\n VarsFullName=['RH2AVG','RH500','U200'] #['MF850','RH500','T500']\n rgrNrOfExtremes=10 #[6,10,15,30]\n WT_Domains='M' # ['S','M','L','XXL'] \n Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n SpatialSmoothing=0.5 #[0,0.5,1]\n# VarsFullName=['RH500','T2AVG','T500'] #['MF850','RH500','T500']\n# rgrNrOfExtremes=4 #[6,10,15,30]\n# WT_Domains='M' # ['S','M','L','XXL'] \n# Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n# SpatialSmoothing=0.5 #[0,0.5,1]\n if (Season == 'Annual') & (Region == 'San_Diego'):\n if FireObs == 'MODIS':\n VarsFullName=['MF2AVG','PWAVG','V200'] # ['MF2AVG','MF500','SLPAVG']\n rgrNrOfExtremes=4 #6 #3 #[6,10,15,30]\n WT_Domains='M' # ['S','M','L','XXL'] \n Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n SpatialSmoothing=0.5 #[0,0.5,1]\n# VarsFullName=['MF2AVG','PWAVG','V200']\n# rgrNrOfExtremes=4 #6 #3 #[6,10,15,30]\n# WT_Domains='M' # ['S','M','L','XXL'] \n# Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n# SpatialSmoothing=0.5 #[0,0.5,1]\n elif FireObs == 'Parks':\n VarsFullName=['SLPAVG','UV200']\n rgrNrOfExtremes=6 #6 #3 #[6,10,15,30]\n WT_Domains='L' # ['S','M','L','XXL'] \n Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n SpatialSmoothing=0.5 #[0,0.5,1]\n if (Season == 'Annual') & (Region == 'Siearas_East'):\n VarsFullName=['RH500','T2MIN','V200'] #['LFC','T2MIN','U10AVG']\n rgrNrOfExtremes=10 #[6,10,15,30]\n WT_Domains='M' # ['S','M','L','XXL'] \n Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n SpatialSmoothing=0 #[0,0.5,1]\n# VarsFullName=['T2MIN','T500','WSPD500'] #['LFC','T2MIN','U10AVG']\n# rgrNrOfExtremes=15 #[6,10,15,30]\n# WT_Domains='M' # ['S','M','L','XXL'] \n# Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n# SpatialSmoothing=0.5 #[0,0.5,1]\n if (Season == 'Annual') & (Region == 'Siearas_West'):\n VarsFullName=['RH2AVG','V500','WSPD200'] # ['PWAVG','RH2AVG','Z500'] \n rgrNrOfExtremes=8 #[6,10,15,30]\n WT_Domains='M' # ['S','M','L','XXL'] \n Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n SpatialSmoothing=0.5 #[0,0.5,1]\n# VarsFullName=['RH2AVG','V500','WSPD200']\n# rgrNrOfExtremes=8 #[6,10,15,30]\n# WT_Domains='M' # ['S','M','L','XXL'] \n# Annual_Cycle='1' # '1' means that the annual cycle gets removed before clustering; '0' nothing is done\n# SpatialSmoothing=0.5 #[0,0.5,1]\n\n\n # ---------------\n # Full list of available variables\n VarsFullNameAll=['CAPE', 'CIN','LCL','LFC','MF2AVG','MF500','MF850','MR2AVG','MR500', 'MR850','PWAVG','RH2AVG','RH500','RH850','SLPAVG','T2AVG','T2MAX','T2MIN','T500','T850','U10AVG', 'U200','U500','U850','V10AVG','V200','V500','V850','WSPD10','WSPD200','WSPD500','WSPD850','Z500']\n rgsWTvarsAll =VarsFullNameAll\n iSelVariables=[VarsFullNameAll.index(VarsFullName[ii]) for ii in range(len(VarsFullName))]\n rgsWTvars=np.array(rgsWTvarsAll)[np.array(iSelVariables).astype('int')]\n rgsWTfolders=['/glade/campaign/mmm/c3we/prein/ERA5/'+str(VarsFullName[va])+'/' for va in range(len(VarsFullName))]\n # rgsWTfolders=np.array(rgsWTfoldersAll)[np.array(iSelVariables).astype('int')]\n\n DomDegreeAdd=np.array([2, 5, 10, 20])[['S','M','L','XXL'].index(WT_Domains)]\n\n\n return rgdTime, iMonths, sPlotDir, sDataDir, sSubregionPR, rgsWTvars, VarsFullName,rgsWTfolders, rgrNrOfExtremes, WT_Domains, DomDegreeAdd, Annual_Cycle, SpatialSmoothing, Metrics, Dimensions, FireObs, REANAL, ClusterMeth, ClusterBreakup, RelAnnom, NormalizeData, MinDistDD, RemoveAnnualCycl\n","sub_path":"IFS_XWTing/XWTs_apply_ERA5.py","file_name":"XWTs_apply_ERA5.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"48149152","text":"# voorspelUitslag.py\n# Leonardo Losno Velozo \n\nimport os\nimport pickle\nimport re\nfrom collections import Counter\n\n\ndef getUitslag(hoogtepuntenList):\n\t\"\"\" Haalt de voorspelde wedstrijduitslag op, op basis van de laatste\n\tpiek(en)-verzonden-tweets in de hoogtepuntenList. \"\"\"\n\tuitslagList = []\n\tscore = False\n\tfor lst in reversed(hoogtepuntenList):\n\t\tfor tweetData in lst:\t\n\t\t\ttweet = tweetData.split('\\t')[3]\n\t\t\tuitslag = re.findall(r'[0-9]-[0-9]', tweet, re.M|re.I)\n\t\t\tif uitslag:\n\t\t\t\tscore = True\n\t\t\t\tif len(uitslag) > 1:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tuitslagList.append(uitslag[0])\n\t\t\t\t\t\n\t\tcntScores = Counter(uitslagList)\n\t\tif score == True and len(cntScores) == 1:\n\t\t\tif cntScores[1] > 5:\n\t\t\t\tbreak\n\t\t\tscore = False\n\t\telif score == True and cntScores.most_common(2)[0][1] > 4 \\\n\t\t\tand cntScores.most_common(2)[0][1] != cntScores.most_common(2)[1][1] \\\n\t\t\tand (cntScores.most_common(2)[0][1] - cntScores.most_common(2)[1][1]) > 4:\n\t\t\tbreak\n\t\t\tscore = False \t\t\t\n\treturn cntScores.most_common()[0][0]\n\n","sub_path":"ranking/voorspelUitslag.py","file_name":"voorspelUitslag.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"104770027","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 14 08:29:59 2021\r\n\r\n@author: Ounaye\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nfrom sklearn.base import BaseEstimator, ClassifierMixin\r\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\r\nfrom sklearn.utils.multiclass import unique_labels\r\nfrom sklearn.utils import shuffle\r\n\r\nclass mySVM(BaseEstimator, ClassifierMixin):\r\n \r\n \r\n def __init__(self,maxIteration_ = 2050,learningRate_ = 0.000001,\r\n reg_strenght_ = 10000):\r\n print(\"mySVM\")\r\n self.maxIteration_ = maxIteration_\r\n self.learningRate_ = learningRate_\r\n self.reg_strenght_ = reg_strenght_\r\n \r\n \r\n \r\n\r\n \r\n def costFunction(self,weight,x,y): # On l'utilise pour évaluer l'avancée du modèle\r\n N = x.shape[0]\r\n dist = 1 - y *( np.dot(x,weight))\r\n dist[dist < 0] = 0\r\n cost = 0.5* np.dot(weight,weight) + (self.reg_strenght_ * (np.sum(dist)/N))\r\n return cost\r\n\r\n \r\n def gradiantCost(self,weight,xBatch,yBatch):\r\n \r\n if(type(yBatch) == np.float64 or type(yBatch) == np.int32 or type(yBatch) == np.int64):\r\n xBatch = np.array([xBatch])\r\n yBatch = np.array([yBatch])\r\n \r\n dst = 1 - (yBatch *np.dot(xBatch,weight)) # On regarde la distance\r\n downWeight = np.zeros(len(weight))\r\n index = 0\r\n \r\n if(type(dst) == np.float64):\r\n dst = np.array([dst])\r\n for i in dst:\r\n downI = 0\r\n if(max(0,i) == 0): \r\n downI = weight\r\n else: \r\n downI = weight - (self.reg_strenght_ * yBatch[index] * xBatch[index] )\r\n downWeight += downI\r\n index += 1\r\n \r\n downWeight = downWeight/len(dst) #Moyenne\r\n return downWeight\r\n\r\n\r\n def fit(self,features, outputs):\r\n tmp = np.ones((len(features),1))\r\n features = np.concatenate((features,tmp),axis=1)\r\n weights = np.zeros(features.shape[1])\r\n \r\n \r\n for epoch in range(self.maxIteration_):\r\n X, Y = shuffle(features, outputs)\r\n for ind, x in enumerate(X):\r\n ascent = self.gradiantCost(weights, x, Y[ind])\r\n weights = weights - (self.learningRate_ * ascent)\r\n \r\n\r\n self.weightVect_ = weights\r\n return self\r\n \r\n def predict(self, X):\r\n\r\n # Check is fit had been called\r\n check_is_fitted(self)\r\n\r\n # Input validation\r\n X = check_array(X)\r\n \r\n tmp = np.ones((len(X),1))\r\n X = np.concatenate((X,tmp),axis=1)\r\n \r\n guess = np.zeros(len(X),dtype=int)\r\n index = 0\r\n for i in X:\r\n tmp = np.dot(self.weightVect_,i)\r\n if( tmp > 0):\r\n guess[index] = 1\r\n elif(tmp < 0):\r\n guess[index] = -1\r\n else:\r\n guess[index] = 0\r\n index += 1\r\n\r\n\r\n return guess\r\n\r\n \r\n","sub_path":"mySVM.py","file_name":"mySVM.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"126822442","text":"#IMPORTY\nimport curses\nimport random\n\n# ustawienia okienka\ncurses.initscr()\nwin = curses.newwin(20, 60) # wys i szer\nwin.keypad(True)\ncurses.noecho() # nie reaguje na inne klawisze\ncurses.curs_set(False)\nwin.border(0)\nwin.nodelay(True) # nie czeka na kolejna akcje\n\n\n# sznek i żarcie, uzywamy krotki\n\nsnake = [(5, 10), (5, 9), (5, 8)] # startowe współrzędne szneka\nfood = (10, 20) # pierwsze jedzonko\n\nwin.addch(food[0], food[1], '?') # to samo dla jedzonka food0 i food1 pierwsza i druga wsplorzedna z krotki\n# logika szneka\nscore = 3\n\nEXIT = 27 #27 to nr escape w module curses\nkey = curses.KEY_RIGHT\n\nwhile key != EXIT:\n win.addstr(0, 4, 'Sznek.exe')\n win.addstr(0, 50, 'Score ' + str(score) + ' ')\n win.timeout(150 - (len(snake)) // 5 + len(snake) // 10 % 120) # dodanie prędkości\n\n prev_key = key\n event = win.getch()\n key = event if event != -1 else prev_key\n\n if key not in [curses.KEY_LEFT, curses.KEY_RIGHT, curses.KEY_UP, curses.KEY_DOWN, EXIT]: #program zostaje na wczesniejszym klawiszu\n key = prev_key\n\n #kolejne koordynaty, poczatkowe z pierwszej krotki\n y = snake[0][0]\n x = snake[0][1]\n # y i x dół i w prawo\n if key == curses.KEY_DOWN:\n y += 1\n if key == curses.KEY_UP:\n y -= 1\n if key == curses.KEY_RIGHT:\n x += 1\n if key == curses.KEY_LEFT:\n x -= 1\n\n snake.insert(0, (y, x)) #nowa głowa, przesuwa cała tuple\n\n # uderzenie w granice\n\n if y == 0: break\n if y ==19: break\n if x == 0: break\n if x == 59: break\n\n # na siebie\n if snake[0] in snake[1:]: break\n #na jedzonko\n if snake[0] == food:\n score += 1\n food = ()\n while food == ():\n food = (random.randint(1, 18), random.randint(1, 58))\n if food in snake:\n food = ()\n win.addch(food[0], food[1], 'o')\n else:\n # ruch szneka\n last = snake.pop()\n win.addch(last[0], last[1], ' ')\n\n\n\n win.addch(snake[0][0], snake[0][1], '#') # to samo dla jedzonka food0 i food1 pierwsza i druga wsplorzedna z krotki\n\n\n\n\n\n\ncurses.endwin() # konczy okno\n\n\nprint(f'Final score = {score}')","sub_path":"snake/sznek_do_dłubania.py","file_name":"sznek_do_dłubania.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"411027295","text":"\nimport requests\nfrom datetime import datetime, timedelta\nimport json\n\n#Paste your slack Autho token here\ntoken = \"Your Token Goes here\"\n# Starting from how many days ago it should delete\ndaysAgo = 60\n\n\ndef getOldFileIDs(token,daysAgo):\n date_N_days_ago = datetime.now() - timedelta(days=daysAgo)\n timestamp = date_N_days_ago.timestamp()\n\n payload = {'token': token, 'count': 100000, 'ts_to':timestamp}\n r = requests.post(\"https://slack.com/api/files.list\", data=payload)\n\n data = json.loads(r.text)\n files = data['files']\n fileIds = []\n for file in files:\n fileIds.append(file['id'])\n\n return fileIds\n\n\ndef deleteFile(token,Id):\n\n payload = {'token': token, 'file': Id}\n r = requests.post(\"https://slack.com/api/files.delete\", data=payload)\n\n data = json.loads(r.text)\n return data\ncounter = 0\nwhile True:\n fileIds = getOldFileIDs(token, daysAgo)\n for fileId in fileIds:\n print(deleteFile(token,fileId))\n print(fileId)\n counter = counter + 1\n if getOldFileIDs(token, daysAgo) == []:\n print(\"All Finished \\n You deleted \" + str(counter) + \" files\")\n break\n","sub_path":"deleteOldFiles.py","file_name":"deleteOldFiles.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"443992719","text":"\"\"\"\nname: Hayam Abdalla\nvigenere.py\n\nproblem: write a program to implement the vignere cipher\ncertification of authenticity:\nI certify that this assignment is entirely my own work.\n\"\"\"\n\nfrom graphics import GraphWin, Text, Point, Entry, Rectangle\n\n\ndef main():\n win = GraphWin(\"vigenere\", 500, 375)\n win.setCoords(0, 0, 10, 10)\n Text(Point(2, 8), \"message to code\").draw(win)\n Text(Point(2.3, 7), \"enter keyword\").draw(win)\n enter_box_1 = Entry(Point(5.6, 8), 30)\n enter_box_2 = Entry(Point(4.5, 7), 15)\n enter_box_1.draw(win)\n enter_box_2.draw(win)\n # draw encode button\n draw_button(Point(4, 3), Point(5.5, 4), \"Encode\", win)\n message = enter_box_1.getText()\n key = enter_box_2.getText()\n\n encoded = code(message, key)\n\n result = Text(Point(4.5, 3.5), \"Resulting message\\n\" + encoded)\n result.draw(win)\n text = Text(Point(5, 1.5), \"click anywhere to close\")\n text.draw(win)\n win.getMouse()\n win.close()\n\n\n# draw encode button\ndef draw_button(point_1, point_2, button_text, win):\n outline = Rectangle(point_1, point_2)\n center = outline.getCenter()\n label = Text(center, button_text)\n outline.draw(win)\n label.draw(win)\n\n win.getMouse()\n outline.undraw()\n label.undraw()\n\n\ndef code(message, keyword):\n message = message.upper()\n message = message.split()\n message = \"\".join(message)\n\n keyword = keyword.upper()\n\n message = message\n key = keyword\n acc = \"\"\n for i in range(len(message)):\n character = message[i]\n key = keyword[i % len(keyword)]\n key = ord(key) - 65\n y = ord(character) + key\n if y > 90:\n y -= 26\n z = chr(y)\n acc += z\n return acc\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/hw6/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"84383767","text":"# Ex 5.1\nfrom time import time\n\ndef formatted(timestamp):\n days, rem = divmod(timestamp, 24*60*60)\n hours, rem = divmod(rem, 60*60)\n mins, secs = divmod(rem, 60)\n return (days, hours, mins, secs)\n\nif __name__ == '__main__':\n print(formatted(time()))","sub_path":"Think Python/chapter5/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"461077068","text":"from django.shortcuts import render\nfrom .forms import ContactForm\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponseRedirect\nfrom django.conf import settings\n# Create your views here.\n\ndef home(request):\n return render(request,'index.html')\n\ndef contact_us(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n #nome = form.cleaned_data['nome']\n assunto = form.cleaned_data['assunto']\n msg = form.cleaned_data['msg']\n email = form.cleaned_data['email']\n cc_myself = form.cleaned_data['cc_myself']\n\n recipients = ['danilodalessandro08@gmail.com']\n if cc_myself:\n recipients.append(email)\n send_mail(assunto, msg,email, recipients)\n return HttpResponseRedirect('/thanks/')\n\n else:\n form = ContactForm()\n\n return render(request, 'index.html', {'form': form})\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"519462617","text":"#! /usr/bin/python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom pylibmodbus import ModbusRtu\nimport serial\nfrom time import sleep\nimport time\nimport sys\nimport logging\nimport os\nimport argparse\nimport traceback\n\ntry:\n import queue\nexcept ImportError:\n import Queue as queue\n\nimport paho.mqtt.client as mqtt\nimport tables\n\nTIME_SLOT = 5\nWAITING_TIMEOUT = 0.5\n# Wait a maximum of 3 cycle SLAVE => MASTER => SLAVE\nMAXIMUM_LOOP = 1 + int(TIME_SLOT * 3 / WAITING_TIMEOUT)\nMAXIMUM_OPERATION = TIME_SLOT - WAITING_TIMEOUT\nSERVER_ID=10\nBCM_PIN_DE=17\nBCM_PIN_RE=27\nos.system (\"gpio mode 0 out\")\nos.system (\"gpio write 0 0\")\nser = serial.Serial(port='/dev/serial0',baudrate = 9600,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n )\n \nser.timeout = WAITING_TIMEOUT\n\n#def writePidFile():\n# pid = str(os.getpid())\n# currentFile = open('/var/run/pollChaudiere.pid', 'w')\n# currentFile.write(pid)\n# currentFile.close()\n\n#writePidFile()\nparser = argparse.ArgumentParser()\nparser.add_argument(\"server\", help=\"MQtt server to connect to.\",default=\"localhost\")\nparser.add_argument(\"--user\", help=\"MQtt username.\")\nparser.add_argument(\"--password\", help=\"MQtt password.\")\nparser.add_argument(\"--interval\", help=\"Check interval default 60s.\", type=int, default=60)\nparser.add_argument(\"--cacert\", help=\"CA Certificate, default /etc/ssl/certs/ca-certificates.crt.\",\n default=\"/etc/ssl/certs/ca-certificates.crt\")\nparser.add_argument(\"--serial\", help=\"Serial interface, default /dev/ttyUSB0\",\n default=\"/dev/serial0\")\nparser.add_argument(\"--deviceid\", help=\"Modbus device id, default 10\",\n type=int, default=10)\nparser.add_argument(\"--log\", help=\"Logging level, default INFO\",\n default=\"INFO\")\nparser.add_argument(\"--model\", help=\"boiler model\",\n default=\"diematic3\")\n\n# handle no sll.PROTOCOL_TLSv1_2\ntry:\n import ssl\n parser.add_argument(\"--tls12\", help=\"use TLS 1.2\", dest=\"tls\",\n action=\"store_const\", const=ssl.PROTOCOL_TLSv1_2)\nexcept:\n pass\n\nargs = parser.parse_args()\n\n# Convert to upper case to allow the user to\n# specify --log=DEBUG or --log=debug\nnumeric_level = getattr(logging, args.log.upper(), None)\nif not isinstance(numeric_level, int):\n raise ValueError(\"Invalid log level: {0}\".format(args.log))\nlogging.basicConfig(level=numeric_level)\n\n_LOGGER = logging.getLogger(__name__)\n\n#writePidFile()\n\n(READ_TABLE, WRITE_TABLE, READ_ZONES) = tables.get_tables(args.model)\n\n\n# Initialisation of mqtt client\nbase_topic = \"heating/\"\n\nport_mqtt = 1883\nmqttclient = mqtt.Client()\n# client.on_log = on_log\nif args.user:\n _LOGGER.debug(\"Authenticate with user %s\", args.user)\n mqttclient.username_pw_set(args.user, args.password)\ntry:\n if args.tls:\n _LOGGER.debug(\"Set TLS mode.\")\n mqttclient.tls_set(args.cacert, tls_version=args.tls)\n port_mqtt = 8883\nexcept:\n pass\n\nmqttclient.will_set(base_topic + \"reading\", \"OFF\", 1, True)\n\nwrite_queue = queue.Queue()\n\ndef on_message(the_client, userdata, message):\n write_queue.put(message)\n\nmqttclient.on_message = on_message\n\nsubscribe_list = [(base_topic + name, 0) for name in WRITE_TABLE.keys()]\n\ndef on_connect(the_client, userdata, flags, rc):\n if rc == mqtt.CONNACK_ACCEPTED:\n the_client.subscribe(subscribe_list)\n mqttclient.publish(base_topic + \"reading\", \"ON\", 1, True)\n\nmqttclient.on_connect = on_connect\n\nmqttclient.connect(args.server, port_mqtt)\nmqttclient.loop_start()\nclient = ModbusRtu(device=\"/dev/serial0\", baud=9600, parity=\"N\", data_bit=8, stop_bit=1)\nclient.set_debug(0)\ndef read_zone(base_address, number_of_value):\n \"\"\" Read a MODBUS table zone and send the value to MQTT. \"\"\"\n mycounter=5\n while mycounter>0:\n try:\n ser.close()\n #Connect\n client.connect()\n #Set Slave ID number\n client.set_slave(SERVER_ID)\n #Enable RPi GPIO Functions\n client.enable_rpi(1)\n #Define pin numbers to be used as Read Enable (RE) and Drive Enable (DE)\n client.configure_rpi_bcm_pins(BCM_PIN_DE,BCM_PIN_RE)\n #Export pin direction (set as outputs)\n client.rpi_pin_export_direction()\n raw_values=(client.read_registers(base_address, number_of_value))\n mycounter=0\n except EnvironmentError:\n logging.exception(\"I/O error: %d, %d\", base_address, number_of_value)\n wait_time_slot()\n mycounter = mycounter - 1\n except ValueError:\n logging.exception(\"Value error: %d, %d\", base_address, number_of_value)\n wait_time_slot()\n mycounter = mycounter - 1\n except:\n continue\n else:\n for index in range(0, number_of_value):\n address = base_address + index\n tag_definition = READ_TABLE.get(address)\n if tag_definition:\n tag_definition.publish(mqttclient, base_topic, raw_values, index)\n\ndef write_value(message):\n \"\"\" Write a value receive from MQTT to MODBUS \"\"\"\n tag_definition = WRITE_TABLE.get(message.topic.strip(base_topic))\n if tag_definition:\n string_value = message.payload.decode(\"utf-8\")\n value = tag_definition.convertion(string_value)\n _LOGGER.debug(\"write value %s : %s => address : %s = %s\",\n message.topic.strip(base_topic), string_value,\n tag_definition.address, value)\n if value is not None:\n mycnt = 10\n while mycnt > 0:\n try:\n ser.close()\n #Connect\n client.connect()\n #Set Slave ID number\n client.set_slave(SERVER_ID)\n #Enable RPi GPIO Functions\n client.enable_rpi(1)\n #Define pin numbers to be used as Read Enable (RE) and Drive Enable (DE)\n client.configure_rpi_bcm_pins(BCM_PIN_DE,BCM_PIN_RE)\n #Export pin direction (set as outputs)\n client.rpi_pin_export_direction()\n client.write_registers(tag_definition.address,value)\n mycnt = 0\n except ValueError:\n logging.exception(\"Value error: %d, %d\", tag_definition.address, value)\n wait_time_slot()\n mycnt = mycnt - 1\n except KeyboardInterrupt:\n print('interrupted!')\n break\n except:\n logging.exception(\"Value error: %d, %d\", tag_definition.address, value)\n wait_time_slot()\n mycnt = mycnt - 1 \n\ndef wait_time_slot():\n os.system (\"gpio mode 0 out\")\n os.system (\"gpio write 0 0\")\n ser.close()\n ser.open()\n data = b''\n number_of_wait = 0\n #wait a maximum of 6 seconds\n while len(data) == 0 and number_of_wait < MAXIMUM_LOOP:\n data = ser.read(1)\n number_of_wait += 1\n if number_of_wait >= MAXIMUM_LOOP:\n logging.exception(\"LOOP error: %d\", MAXIMUM_LOOP)\n # the master is the boiler wait for the end of data\n while len(data) != 0:\n data = ser.read(1)\n # we are master for a maximum of 4.6s (5s - 400ms) \n\nwait_time_slot()\n# Main loop\nwhile True:\n# read until boiler is master \n mqttclient.publish(base_topic + \"reading\", \"ON\", 1, True)\n# The total read time must be under the time slot duration\n start_time = time.time()\n for zone in READ_ZONES:\n if zone[1] == 0:\n wait_time_slot()\n else:\n read_zone(zone[0], zone[1])\n\n duration = time.time() - start_time\n if duration > MAXIMUM_OPERATION:\n _LOGGER.warning(\"Read take too long, wait_time_slot must be added between read_zone.\")\n\n# Traitement de toute les ecritures ou attente de l'intervale\n try:\n waittime = args.interval\n while True:\n writeelement = write_queue.get(timeout=waittime)\n#\n wait_time_slot()\n _LOGGER.debug(\"writing\")\n write_value(writeelement)\n waittime = 0\n except queue.Empty:\n # no more write, continue to read.\n wait_time_slot()\n continue\n#Release pins and close connection\nclient.rpi_pin_unexport_direction()\nclient.close()\n","sub_path":"diematicmqtt.py","file_name":"diematicmqtt.py","file_ext":"py","file_size_in_byte":8553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"308204174","text":"import numpy as np\nfrom numpy import linalg\nfrom sklearn.cluster import KMeans\n\ndef init_params(data, num_clusters):\n kmeans_model = KMeans(n_clusters=num_clusters, n_init=5, max_iter=400, random_state=1)\n kmeans_model.fit(data)\n centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_\n in_labels = np.unique(cluster_assignment, return_index=True)[1]\n unsorted_labels = [cluster_assignment[index] for index in sorted(in_labels)]\n means = []\n for i in unsorted_labels:\n means.append([centroid for centroid in centroids][i])\n num_docs = data.shape[0]\n weights = []\n degs = []\n\n for i in unsorted_labels:\n # Compute the number of data points assigned to cluster i:\n num_assigned = cluster_assignment[cluster_assignment == i].shape[0]\n degs.append(num_assigned - 1)\n w = float(num_assigned) / num_docs\n weights.append(w)\n\n # initialize covs\n covs = []\n\n for i in unsorted_labels:\n # calculate covariance matrix for the ith component\n m_r = data[cluster_assignment == i] # member rows\n m_r = m_r - m_r.mean(0)\n cov1 = np.matmul(np.transpose(m_r), m_r) / m_r.shape[0]\n testcov1 = not (not np.allclose(cov1, cov1.T) or np.any(linalg.eigvalsh(cov1) <= 0))\n # covs.append(np.sqrt(cov))\n cov = np.cov(m_r.T)\n testcov = not (not np.allclose(cov, cov.T) or np.any(linalg.eigvalsh(cov) <= 0))\n cov[cov < 1e-6] = 1e-6\n covs.append(cov)\n\n lower_bound = []\n upper_bound = []\n for i in unsorted_labels:\n lower_bound.append(np.amin(data[cluster_assignment == i], axis=0))\n upper_bound.append(np.amax(data[cluster_assignment == i], axis=0))\n params = {'prior': np.array(weights), 'means': np.array(means), 'cov_mat': np.array(covs), 'degs': np.array(degs),\n 'lower_bound': np.array(lower_bound), 'upper_bound': np.array(upper_bound)}\n return params","sub_path":"kmeans_init.py","file_name":"kmeans_init.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"40124230","text":"import numpy as np\nimport scipy as sp\nimport sys\nimport os\nimport math\nfrom copy import deepcopy\n\ndef normData(data,usetimes,normtype):\n \"\"\"variance normalization\n Args:\n data:\n usetimes:\n normtype:\n Returns:\n moddata: \n \"\"\"\n assert normtype in [\"all\",\"pertime\"]\n moddata = [{} for rind in xrange(len(data))]\n if normtype == \"pertime\":\n for ttime in usetimes:\n arrcount = len(data[0][ttime])\n Mdict,Mvaldict = {}, {}\n for arrind in xrange(arrcount):\n Mdict[arrind] = np.mean([rowlist[ttime][arrind] for rowlist in data])\n Mvaldict[arrind] = [rowlist[ttime][arrind] for rowlist in data]\n M = np.mean(Mdict.values())\n Vdict = {}\n for arrind in xrange(arrcount):\n Vdict[arrind] = np.var(Mvaldict[arrind]) \n V = np.mean(Vdict.values()) \n for rind,rowlist in enumerate(data):\n outs = []\n for arrind,titem in enumerate(rowlist[ttime]):\n outs.append(float(titem-Mdict[arrind]+M)*math.sqrt(V)/math.sqrt(Vdict[arrind]))\n moddata[rind][ttime] = list(outs)\n elif normtype == \"all\":\n Mdict,Mvaldict = {}, {} \n for ttime in usetimes:\n arrcount = len(data[0][ttime])\n Mdict[ttime], Mvaldict[ttime] = {},{}\n arrcount = len(data[0][ttime])\n for arrind in xrange(arrcount):\n Mdict[ttime][arrind] = np.mean([rowlist[ttime][arrind] for rowlist in data])\n Mvaldict[ttime][arrind] = [rowlist[ttime][arrind] for rowlist in data]\n M = np.mean([meanval for ttime in Mdict.keys() for meanval in Mdict[ttime].values()])\n Vdict = {}\n for ttime in usetimes:\n arrcount = len(data[0][ttime])\n Vdict[ttime] = {}\n for arrind in xrange(arrcount):\n Vdict[ttime][arrind] = np.var(Mvaldict[ttime][arrind]) \n V = np.mean([varval for ttime in Vdict.keys() for varval in Vdict[ttime].values()]) \n for rind,rowlist in enumerate(data):\n for ttime in usetimes:\n outs = []\n for arrind,titem in enumerate(rowlist[ttime]):\n outs.append(float(titem-Mdict[ttime][arrind]+M)*math.sqrt(V)/math.sqrt(Vdict[ttime][arrind]))\n moddata[rind][ttime] = list(outs) \n return moddata\n\n\ndef writeMiRNAData(outfname,gene2data,allgenes,timekeys):\n \"\"\"writes mirna data\n Args:\n outfname,gene2data,allgenes:\n timekeys:\n Returns:\n \"\"\" \n with open(outfname,\"w\") as outfile:\n outfile.write(\"Gene\\t{0}\\n\".format(\"\\t\".join(timekeys)))\n for gind in xrange(len(allgenes)):\n outfile.write(\"{0}\\t{1}\\n\".format(allgenes[gind],\"\\t\".join([str(titem) for titem in gene2data[gind]])))\n\n \ndef logTransform(mydata,gene2data,usetimes):\n \"\"\"log transform data\n Args:\n mydata:\n gene2data:\n usetimes:\n Returns:\n loggene2data:\n logmydata: \n \"\"\"\n loggene2data,logmydata = [],[]\n for rowlist in mydata:\n newoutdict = {}\n baseval = np.median(rowlist[usetimes[0]])\n newoutdict[usetimes[0]] = [0.0 for tind in xrange(len(rowlist[usetimes[0]]))]\n for ttime in usetimes[1:]:\n newoutdict[ttime] = [math.log(titem/baseval,2.0) for titem in rowlist[ttime]]\n logmydata.append(deepcopy(newoutdict))\n for rowlist in logmydata:\n loggene2data.append([titem for ttime in usetimes for titem in rowlist[ttime]])\n return logmydata,loggene2data\n \n \ndef genData(fname,expfname,outfname):\n \"\"\"generate data\n Args:\n fname:\n expfname:\n outfname:\n Returns:\n gene2data,timekeys:\n \"\"\"\n gene2data = []\n timekeys = []\n allgenes = []\n count = 0\n emptyind = None\n with open(fname,\"r\") as infile:\n for line in infile:\n line = line.rstrip()\n splitted = line.split(\",\")\n if count == 0:\n timekeys = splitted[4:]\n emptyind = timekeys.index(\"\")\n timekeys = timekeys[0:emptyind]\n count += 1\n else:\n assert splitted[0] not in allgenes\n genename = splitted[0]\n pref = genename.split(\"-\")[0]\n if pref not in [\"mghv\",\"mcmv\",\"mmu\"]:\n continue\n allgenes.append(genename)\n gene2data.append([float(titem) for titem in splitted[4:emptyind+4]])\n key2time = {keystr: float(keystr.replace(\"P\",\"\").replace(\"p\",\"\")) for keystr in timekeys}\n usetimes = sorted(set(key2time.values()))\n\n writeMiRNAData(outfname,gene2data,allgenes,timekeys)\n \n with open(expfname,\"w\") as outfile:\n outfile.write(\"Experiment Names\\ttime inclusive\\n\")\n for keystr,tval in key2time.items():\n outfile.write(\"{0}\\t{1}\\n\".format(keystr,tval))\n\n mydata = []\n for rowlist in gene2data:\n outdict = {}\n assert len(rowlist) == len(timekeys)\n for tind,titem in enumerate(rowlist):\n ttime = key2time[timekeys[tind]]\n outdict.setdefault(ttime,[])\n outdict[ttime].append(titem)\n mydata.append(deepcopy(outdict)) \n return gene2data,mydata,allgenes,timekeys,key2time,usetimes\n\n\nfname = \"LCM-mir.csv\"\nexpfname = \"expdes.txt\"\noutfname = \"mirnadata.txt\"\nnormoutfname = \"norm_mirnadata.txt\"\nlogoutfname = \"logmirnadata.txt\"\nlognormoutfname = \"lognorm_mirnadata.txt\"\ngene2data,data,allgenes,timekeys,key2time,usetimes = genData(fname,expfname,outfname)\n#data time dict format, gene2data plain list\nlogdata,loggene2data = logTransform(data,gene2data,usetimes)\nwriteMiRNAData(logoutfname,loggene2data,allgenes,timekeys)\nusetimes = sorted(data[0].keys())\nfor rowlist in data:\n assert len(set(rowlist.keys()) ^ set(usetimes)) == 0\nnormtype = \"all\" #\"pertime\" \nnormdata = normData(data,usetimes,normtype)\nnormgene2data = [[] for tind in xrange(len(normdata))]\nfor rind,rowlist in enumerate(normdata):\n for ttime in usetimes:\n for titem in rowlist[ttime]:\n normgene2data[rind].append(titem) \nwriteMiRNAData(normoutfname,normgene2data,allgenes,timekeys)\nexit(1)\n\nnormdata = normData(logdata,usetimes,normtype)\nnormgene2data = [[] for tind in xrange(len(normdata))]\nfor rind,rowlist in enumerate(normdata):\n for ttime in usetimes:\n for titem in rowlist[ttime]:\n normgene2data[rind].append(titem) \nwriteMiRNAData(lognormoutfname,normgene2data,allgenes,timekeys)\n\n\n","sub_path":"mirnadata/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"635738668","text":"\"\"\"\nAuthor - Benjamin Townsend\n\nUnless explicitly stated in the Doc-string all code that follows was written\nby the Author.\n\"\"\"\nimport codecs\n\n__TAGS = [\"subject\", \"content\", \"maincat\"]\n\n\ndef XMLEater(filename, tags=__TAGS):\n \"\"\"\n Takes in an XML file and splits it up about tags begining \"\")[1].split(\"\")[0] for t in tags]\n\n yield \"\\\"\" + line[0] + \" \" + line[1] + \"\\\",\\\"\" + line[2] + \"\\\"\\n\"\n except(IndexError):\n pass\n #initial header stuff.\n buff = \"\"\n f.close()\n\n\ndef xml2csv(XML_file, training_set, testing_set,\n max_train=1000000, max_test=10000):\n \"\"\"\n Parses the given XML file into the format required for Yahoo_data_prep.py\n\n Args:\n XML_file: A path to the input XML file. eg \"Webscope_L6-1/FullOct2007.xml\"\n training_set: A path for the output training csv.\n testing_set: A path for the output testing csv.\n max_train : the maximum xml records to be included in the training set.\n max_test : the maximum xml records to be included in the testing set.\n \"\"\"\n training_csv = codecs.open(training_set, \"w\", \"utf-8\")\n testing_csv = codecs.open(testing_set, \"w\", \"utf-8\")\n count = 0\n\n for l in XMLEater(XML_file):\n print(\".\")\n count += 1\n if(count <= max_train):\n training_csv.write(l)\n elif(count <= max_train + max_test):\n testing_csv.write(l)\n training_csv.close()\n testing_csv.close()\n\n\nif (__name__ == \"__main__\"):\n xml2csv(\n \"./data/Webscope_L6-1/FullOct2007.xml\",\n \"./data/train.csv\",\n \"./data/test,csv\")\n","sub_path":"XMLEater.py","file_name":"XMLEater.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"528330324","text":"#encoding=UTF-8\n#\n#\n#\n\n#from subprocess import Popen,PIPE\nimport cx_Oracle\n\ntns_name = cx_Oracle.makedsn('10.169.8.59','1521','orcl')\ndb = cx_Oracle.connect('v3xuser','Www123456',tns_name)\ncur_src = db.cursor()\n\ntables = [\t'col_summary order by create_date',\n\t\t'form_definition order by create_time',\n\t\t'form_resource',\n\t\t'formmain order by start_date',\n\t\t'ctp_content_all order by create_date',\n\t\t'ctp_attachment order by createdate',\n\t\t'coll_360_detail order by send_time',\n\t\t'coll_cube_detail order by start_time',\n\t\t'coll_cube_index order by id',\n\t\t'coll_cube_index_set order by id',\n\t\t'coll_cube_data order by update_date']\n\nwhile 1:\n\n\tfor t in tables:\n\t\t#\n\t\tprint(\"[ %s ]:\" % t)\n\t\tprint(\"-------------------------------------------------------\")\n\t\trec = cur_src.execute('select * from '+t)\n\t\twhile 1:\n\t\t\tone = rec.fetchone()\n\t\t\tif one==None:\n\t\t\t\tbreak;\n\t\t\tprint(one)\n\n\tbreak\n\n#process = Popen('mysql -ushenwei -psw64419 v50', stdin=PIPE, shell=True) \n#output = process.communicate('source '+t+'.sql') \n\ncur_src.close()\ndb.close()\n\n#\n# Eof\n#\n\n","sub_path":"getform.py","file_name":"getform.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"539917299","text":"import pytest\n\nfrom ethereum import abi\nfrom ethereum import utils\nfrom ethereum.tester import (\n TransactionFailed,\n accounts,\n encode_hex,\n)\n\n\ndeploy_contracts = [\n \"CallLib\",\n \"TestCallExecution\",\n]\n\n\ndef test_cannot_execute_if_claimed_by_other(deploy_client, deployed_contracts,\n deploy_coinbase,\n deploy_future_block_call):\n client_contract = deployed_contracts.TestCallExecution\n\n target_block = deploy_client.get_block_number() + 300\n\n call = deploy_future_block_call(\n client_contract.setBool,\n target_block=target_block,\n )\n\n deploy_client.wait_for_block(target_block - 10 - 255)\n\n # claim it\n claim_txn_h = call.claim(value=2 * call.basePayment())\n claim_txn_r = deploy_client.wait_for_transaction(claim_txn_h)\n\n assert call.claimer() == deploy_coinbase\n\n deploy_client.wait_for_block(call.targetBlock())\n\n assert call.wasCalled() is False\n\n not_allowed_txn_h = call.execute(_from=encode_hex(accounts[1]))\n not_allowed_txn_r = deploy_client.wait_for_transaction(not_allowed_txn_h)\n\n assert call.wasCalled() is False\n\n deploy_client.wait_for_block(call.targetBlock() + 64)\n\n ffa_txn_h = call.execute(_from=encode_hex(accounts[1]))\n ffa_txn_r = deploy_client.wait_for_transaction(ffa_txn_h)\n\n assert call.wasCalled() is True\n","sub_path":"tests/execution/test_allowed_to_execute_if_claimer_does_not_call.py","file_name":"test_allowed_to_execute_if_claimer_does_not_call.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"372029426","text":"def DAFScriptMain(config, parameter, returnpacket):\r\n # config: ISysConfig object\r\n # parameter: TPClassUIDataPacket\r\n # returnpacket: TPClassUIDataPacket (undefined structure)\r\n\r\n strSQL = '\\\r\n select AccountInstance_ID, \\\r\n branch_code, \\\r\n Currency_Code \\\r\n from AccountInstance \\\r\n where Account_Code = \\'%s\\'' \\\r\n % (parameter.FirstRecord.account_code)\r\n resSQL = config.CreateSQL(strSQL).RawResult\r\n\r\n resSQL.First()\r\n if resSQL.Eof:\r\n raise 'Account Instance is not found.'\r\n\r\n returnpacket.CreateValues(\\\r\n ['AccountInstance_ID',resSQL.AccountInstance_ID],\\\r\n ['branch_code',resSQL.branch_code],\\\r\n ['Currency_Code',resSQL.Currency_Code]\\\r\n )\r\n\r\n return 1\r\n","sub_path":"scripts/accounting/getdefaccinst.py","file_name":"getdefaccinst.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"402775496","text":"import sys\nsys.stdin = open('input.txt', 'r')\nsys.stdout = open('output.txt', 'w') \n\nfor _ in range(int(input())):\n\tn=input()\n\n\tl = n\n\tl+=n[0]\n\tl=l[1:]\n\n\tr = n[len(n)-1]\n\tr+=n\n\t# print(r)\n\tr=r[0:len(r)-1]\n\t# print(r)\n\n\t# print(l,r)\n\n\tif(l==r):\n\t\tprint(\"YES\")\n\telse:\n\t\tprint(\"NO\")","sub_path":"OLD/chefandstring.py","file_name":"chefandstring.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"448089905","text":"from django.urls import path\r\nfrom member import views\r\n\r\nurlpatterns = [\r\n # path('', views.Join.as_view(), name='join'),\r\n path('', views.join),\r\n path('insert', views.insert),\r\n path('login', views.login),\r\n path('logout', views.logout),\r\n path('check', views.check),\r\n path('member_info/', views.member_info),\r\n path('update', views.update),\r\n path('overlapcheck', views.overlapcheck)\r\n]","sub_path":"bsj/member/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"78883884","text":"import requests\nfrom bs4 import BeautifulSoup\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'}\n\nrub_url = 'https://www.google.com/search?rlz=1C1CHZL_enRU818RU818&sxsrf=ALeKk00iyVBpYvmyWkVSdaAOV96yukI9Bw%3A1604236848046&ei=MLaeX9W1Au7HrgT1l5HIDA&q=1+rub+to+usd&oq=1+rub&gs_lcp=CgZwc3ktYWIQAxgAMgUIABCRAjIECAAQQzIECAAQQzIECAAQQzICCAAyAggAMgIIADIFCAAQywEyAggAMgIIADoHCAAQRxCwAzoGCAAQBxAeOgQIABAeOgYIABAKEB46BggAEAgQHlD7LljIPGDbRmgAcAB4AIABkwGIAYEDkgEDMC4zmAEAoAEBqgEHZ3dzLXdpesgBCMABAQ&sclient=psy-ab'\nusd_url = 'https://www.google.com/search?q=%241+to+rub&rlz=1C1CHZL_enRU818RU818&oq=%241+to+rub&aqs=chrome.0.69i59j0i457j0i10i22i30j0i22i30l5.6318j0j4&sourceid=chrome&ie=UTF-8'\n\n\ndef get_cur(page):\n full_page = requests.get(page, headers=headers)\n soap = BeautifulSoup(full_page.content, 'html.parser')\n cur = soap.findAll(\n \"span\", {\"class\": \"DFlfde\", \"class\": \"SwHCTb\"})[0].text\n return cur\n\n\ndef usd_to_rub(inp):\n return inp * get_cur()\n\n\ndef rub_to_usd(inp):\n return inp * get_cur()\n\n\ndef ask_value():\n user_inp = int(input('Provide a number: '))\n return user_inp\n\n\ndef converter():\n req = input('Do you want to convert usd or rub? Choose one: ')\n if req.lower() == 'usd':\n try:\n user_inp = ask_value()\n except:\n print('Must be an integer')\n converter()\n else:\n res = user_inp * float(get_cur(usd_url).replace(',', '.'))\n print('$' + str(user_inp) + ' is ' + str(res) + ' rub')\n elif req.lower() == 'rub':\n try:\n user_inp = ask_value()\n except:\n print('Must be an integer')\n converter()\n else:\n res = user_inp * float(get_cur(rub_url).replace(',', '.'))\n print(str(user_inp) + ' rub is ' + '$' + str(res))\n else:\n print('Please, provide \"usd\" or \"rub\"')\n converter()\n\n\nconverter()\n","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"501264433","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom datetime import datetime, timedelta\nimport pytz\nfrom odoo.exceptions import UserError\nfrom odoo.addons.base.ir import ir_sequence\n\nclass IrSequence(models.Model):\n _inherit = 'ir.sequence'\n\n \n def _get_prefix_suffix(self, prefix=False, suffix=False):\n def _interpolate(s, d):\n return (s % d) if s else ''\n\n def _interpolation_dict():\n now = range_date = effective_date = datetime.now(pytz.timezone(self._context.get('tz') or 'UTC'))\n if self._context.get('ir_sequence_date'):\n effective_date = datetime.strptime(self._context.get('ir_sequence_date'), '%Y-%m-%d')\n if self._context.get('ir_sequence_date_range'):\n range_date = datetime.strptime(self._context.get('ir_sequence_date_range'), '%Y-%m-%d')\n\n sequences = {\n 'year': '%Y', 'month': '%m', 'day': '%d', 'y': '%y', 'doy': '%j', 'woy': '%W',\n 'weekday': '%w', 'h24': '%H', 'h12': '%I', 'min': '%M', 'sec': '%S'\n }\n res = {}\n for key, format in sequences.items():\n res[key] = effective_date.strftime(format)\n res['range_' + key] = range_date.strftime(format)\n res['current_' + key] = now.strftime(format)\n \n res.update({'prefix':prefix if prefix else \"\",'suffix':suffix if suffix else \"\"})\n return res\n\n d = _interpolation_dict()\n try:\n interpolated_prefix = _interpolate(self.prefix, d)\n interpolated_suffix = _interpolate(self.suffix, d)\n except ValueError:\n raise UserError(_('Invalid prefix or suffix for sequence \\'%s\\'') % (self.get('name')))\n return interpolated_prefix, interpolated_suffix\n \n def get_next_char(self, number_next, prefix=False, suffix=False):\n interpolated_prefix, interpolated_suffix = self._get_prefix_suffix(prefix, suffix)\n return interpolated_prefix + '%%0%sd' % self.padding % number_next + interpolated_suffix\n\nclass IrSequenceDateRange(models.Model):\n _inherit = 'ir.sequence.date_range'\n \n prefix = fields.Char(help=\"Prefix value of the record for the sequence\")\n suffix = fields.Char(help=\"Suffix value of the record for the sequence\")\n \n def _next(self):\n if self.sequence_id.implementation == 'standard':\n number_next = ir_sequence._select_nextval(self._cr, 'ir_sequence_%03d_%03d' % (self.sequence_id.id, self.id))\n else:\n number_next = ir_sequence._update_nogap(self, self.sequence_id.number_increment)\n \n return self.sequence_id.get_next_char(number_next, self.prefix, self.suffix)\n \n","sub_path":"fiscal_year_sequence_extensible/models/ir_sequence.py","file_name":"ir_sequence.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"268325375","text":"# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# \"\"\"Converts MNIST data to TFRecords file format with Example protos.\"\"\"\n# from __future__ import absolute_import\n# from __future__ import division\n# from __future__ import print_function\n\nimport os\nimport glob\nimport numpy as np\nimport tensorflow as tf\nfrom scipy import misc\n\n\ntf.app.flags.DEFINE_string('directory', '../data/data_train/',\n 'Directory to download data files and write the '\n 'converted result')\ntf.app.flags.DEFINE_string('directory_valid', '../data/data_valid/',\n 'Directory to download data files and write the '\n 'converted result')\nFLAGS = tf.app.flags.FLAGS\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n# only get serial number\ndef get_filenumber(mypath):\n # mypath = './data/data1/'\n data_lidar = sorted(glob.glob(mypath + '*txt'), key=os.path.basename)\n print (mypath)\n\n file_numbers = []\n for i in range(len(data_lidar)):\n number = data_lidar[i]\n number = number.replace(mypath,\"\")\n number = number.replace(\".txt\",\"\")\n file_numbers.append(number)\n print (\"number of sample: \", len(file_numbers))\n return file_numbers\n\ndef get_filename_list(file_numbers, data_dir):\n # reconstruce filename for three source\n left_image_filename_list = []\n right_image_filename_list = []\n lidar_filename_list = []\n\n for i in range(len(file_numbers)):\n left_image_filename = data_dir + file_numbers[i] + \"left.png\"\n right_image_filename = data_dir + file_numbers[i] + \"right.png\"\n lidar_filename = data_dir + file_numbers[i] + \".txt\"\n left_image_filename_list.append(left_image_filename)\n right_image_filename_list.append(right_image_filename)\n lidar_filename_list.append(lidar_filename)\n\n return [left_image_filename_list, right_image_filename_list, lidar_filename_list]\n\ndef image_filename_list_to_nparray(filename_list):\n image_nparray = []\n for k in filename_list:\n image_nparray.append(misc.imread(k))\n image_nparray = np.asarray(image_nparray)\n# print(image_nparray.shape)\n return image_nparray\n\ndef lidar_filename_list_to_nparray(filename_list):\n lidar_nparray = []\n for k in filename_list:\n lidar_nparray.append(np.loadtxt(k, delimiter=','))\n lidar_nparray = np.asarray(lidar_nparray)\n # print(lidar_nparray.shape)\n return lidar_nparray\n\ndef convert_to(left_images, right_images, lidar_labels, name):\n num_examples = lidar_labels.shape[0]\n if left_images.shape[0] != num_examples:\n raise ValueError(\"Images size %d does not match label size %d.\" %\n (left_images.shape[0], num_examples))\n\n rows_left = left_images.shape[1]\n cols_left = left_images.shape[2]\n depth_left = left_images.shape[3]\n\n rows_right = left_images.shape[1]\n cols_right = left_images.shape[2]\n depth_right = left_images.shape[3]\n\n filename = os.path.join(FLAGS.directory, name + '.tfrecords')\n print('Writing', filename)\n writer = tf.python_io.TFRecordWriter(filename)\n for index in range(num_examples):\n image_raw_left = left_images[index].tostring()\n image_raw_right = right_images[index].tostring()\n lidar_raw = lidar_labels[index].tostring()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height_left': _int64_feature(rows_left),\n 'width_left': _int64_feature(cols_left),\n 'depth_left': _int64_feature(depth_left),\n 'height_right': _int64_feature(rows_right),\n 'width_right': _int64_feature(cols_right),\n 'depth_right': _int64_feature(depth_right),\n 'image_raw_left': _bytes_feature(image_raw_left),\n 'image_raw_right': _bytes_feature(image_raw_right),\n 'lidar_label': _bytes_feature(lidar_raw)\n }))\n writer.write(example.SerializeToString())\n\n\ndef main(argv):\n print('start')\n # Get first half of training data.\n file_numbers = get_filenumber(FLAGS.directory)\n quater = int(len(file_numbers)/4)\n\n file_numbers1 = file_numbers[:quater]\n print('train1 size: ', len(file_numbers1))\n left_image_filename_list1, right_image_filename_list1, lidar_filename_list1 = get_filename_list(file_numbers1, FLAGS.directory)\n # Extract it into numpy arrays.\n left_images = image_filename_list_to_nparray(left_image_filename_list1)\n right_images = image_filename_list_to_nparray(right_image_filename_list1)\n lidar_labels = lidar_filename_list_to_nparray(lidar_filename_list1)\n convert_to(left_images, right_images, lidar_labels, 'train1')\n print('train1 finished')\n del(left_images, right_images, lidar_labels)\n\n # Get second half of training data.\n file_numbers2 = file_numbers[quater:2*quater]\n print('train2 size: ', len(file_numbers2))\n left_image_filename_list2, right_image_filename_list2, lidar_filename_list2 = get_filename_list(file_numbers2, FLAGS.directory)\n # Extract it into numpy arrays.\n left_images = image_filename_list_to_nparray(left_image_filename_list2)\n right_images = image_filename_list_to_nparray(right_image_filename_list2)\n lidar_labels = lidar_filename_list_to_nparray(lidar_filename_list2)\n convert_to(left_images, right_images, lidar_labels, 'train2')\n print('train2 finished')\n del(left_images, right_images, lidar_labels)\n\n # Get second half of training data.\n file_numbers2 = file_numbers[2*quater:3*quater]\n print('train3 size: ', len(file_numbers2))\n left_image_filename_list2, right_image_filename_list2, lidar_filename_list2 = get_filename_list(file_numbers2, FLAGS.directory)\n # Extract it into numpy arrays.\n left_images = image_filename_list_to_nparray(left_image_filename_list2)\n right_images = image_filename_list_to_nparray(right_image_filename_list2)\n lidar_labels = lidar_filename_list_to_nparray(lidar_filename_list2)\n convert_to(left_images, right_images, lidar_labels, 'train3')\n print('train3 finished')\n del(left_images, right_images, lidar_labels)\n\n # Get second half of training data.\n file_numbers2 = file_numbers[3*quater:]\n print('train4 size: ', len(file_numbers2))\n left_image_filename_list2, right_image_filename_list2, lidar_filename_list2 = get_filename_list(file_numbers2, FLAGS.directory)\n # Extract it into numpy arrays.\n left_images = image_filename_list_to_nparray(left_image_filename_list2)\n right_images = image_filename_list_to_nparray(right_image_filename_list2)\n lidar_labels = lidar_filename_list_to_nparray(lidar_filename_list2)\n convert_to(left_images, right_images, lidar_labels, 'train4')\n print('train4 finished')\n del(left_images, right_images, lidar_labels)\n\n # Get validate data.\n file_numbers_valid = get_filenumber(FLAGS.directory_valid)\n left_image_filename_list_valid, right_image_filename_list_valid, lidar_filename_list_valid = get_filename_list(file_numbers_valid, FLAGS.directory_valid)\n left_images_valid = image_filename_list_to_nparray(left_image_filename_list_valid)\n right_images_valid = image_filename_list_to_nparray(right_image_filename_list_valid)\n lidar_labels_valid = lidar_filename_list_to_nparray(lidar_filename_list_valid)\n convert_to(left_images_valid, right_images_valid, lidar_labels_valid, 'validate')\n print('validate finished')\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"lidarnet_fei/convert_to_tfrecords.py","file_name":"convert_to_tfrecords.py","file_ext":"py","file_size_in_byte":8110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"294917712","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 2 23:34:37 2019\r\n\r\n@author: ale57\r\n\"\"\"\r\n\r\nfrom Trace import Trace\r\n\r\nfrom shutil import copyfile\r\nimport os\r\n\r\n \r\nclass Trasher():\r\n \r\n def __init__(self, raw_path,junk_path,interesting_path,copy_junk = True):\r\n self.raw_path = raw_path\r\n self.junk_path = junk_path\r\n self.interesting_path = interesting_path\r\n self.copy_junk = copy_junk\r\n self.junk_count = 0\r\n self.interesting_count = 0\r\n \r\n self.white_perc_thr = 3\r\n self.eccentricity_thr = 30\r\n \r\n \r\n def move_file(self, filename,good):\r\n if good:\r\n copyfile(self.raw_path + filename,self.interesting_path + filename)\r\n print(filename, ' is interesting')\r\n elif self.copy_junk:\r\n copyfile(self.raw_path + filename,self.junk_path + filename)\r\n print(filename, ' is junk')\r\n \r\n def is_good(self, filename):\r\n t = Trace(self.raw_path,filename)\r\n \r\n if t.white_perc > self.white_perc_thr:\r\n print('too crowded: ',t.white_perc)\r\n return False\r\n t.compute_inertia()\r\n if t.eccentricity_from_inertia < self.eccentricity_thr:\r\n print('too spherical ',t.eccentricity_from_inertia)\r\n return False\r\n \r\n self.interesting_count += 1\r\n return True\r\n \r\n def trash(self):\r\n total_count = 0\r\n self.interesting_count = 0\r\n for filename in os.listdir(self.raw_path):\r\n if not filename.endswith('.png'):\r\n continue\r\n good = self.is_good(filename)\r\n self.move_file(filename,good)\r\n total_count += 1\r\n \r\n self.junk_count = total_count - self.interesting_count\r\n \r\n print(self.junk_count,' junk frames')\r\n print(self.interesting_count,' interesting frames')\r\n \r\n def clear_directories(self):\r\n for filename in os.listdir(self.junk_path):\r\n if not filename.endswith('.png'):\r\n continue\r\n os.remove(self.junk_path + filename)\r\n for filename in os.listdir(self.interesting_path):\r\n if not filename.endswith('.png'):\r\n continue\r\n os.remove(self.interesting_path + filename)\r\n \r\ndef Trigger(t,white_perc_thr=3,eccentricity_thr=30,verbose=False):\r\n if t.white_perc > white_perc_thr:\r\n if verbose:\r\n print('\\n'+t.filename+': too crowded: '+str(t.white_perc))\r\n return False\r\n t.compute_inertia()\r\n if t.eccentricity_from_inertia < eccentricity_thr:\r\n if verbose:\r\n print('\\n'+t.filename+': too spherical '+str(t.eccentricity_from_inertia))\r\n return False\r\n \r\n if verbose:\r\n print('\\n'+t.filename+' is INTERESTING')\r\n return True\r\n ","sub_path":"analysis/Trasher.py","file_name":"Trasher.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"597730304","text":"#%%\nimport json\nimport string\nimport matplotlib.pyplot as plt \nimport json_lines\nimport itertools\nclass pageRankRecommender():\n def __init__(self, stories, users):\n with open('PRresult.json') as f:\n PRresults = json.load(f)\n\n userLinkToIdDict = {}\n IdToUserDict = {}\n\n lastUserId = 0\n for user in users:\n userLinkToIdDict[user['name']] = lastUserId\n IdToUserDict[lastUserId] = user\n lastUserId += 1\n\n storyLinkToIdDict = {}\n IdToStoryDict = {}\n\n #create a dict between storied and their id\n lastStoryId = 0\n for story in stories:\n storyLinkToIdDict[story['storyLink']] = lastStoryId\n IdToStoryDict[lastStoryId] = story\n lastStoryId += 1\n \n self.storyLinkToScores = {}\n\n minScore = 0\n maxScore = 0\n\n for res in PRresults:\n score = res['score']\n minScore = min(score, minScore)\n maxScore = max(score, maxScore)\n print(minScore, maxScore)\n delta = maxScore - minScore\n scale = 1 / delta \n\n for res in PRresults:\n score = res['score'] / delta\n link = res['link']\n\n if(link in userLinkToIdDict):\n user = IdToUserDict[userLinkToIdDict[link]]\n for story in user['stories']:\n if(story in storyLinkToIdDict):\n self.storyLinkToScores[story] = score\n\n def predict(self):\n return self.storyLinkToScores\n\n\n#%%\nPRRec = pageRankRecommender(stories, users)\nscores = [ (link, score)for link, score in PRRec.predict().items()]\n\n#print(scores[:10])\nprint(sorted(scores, key=lambda tup: tup[1],reverse=True)[:10])\n\n#%%\n\nwith open('result.jl', 'rb') as f:\n entities = [x for x in json_lines.reader(f)]\n stories = [x for x in entities if str(x['pageType']) == \"story\" and str(x['storyType']) == '/book/Harry-Potter/']\n users = [x for x in entities if str(x['pageType']) == \"user\"]","sub_path":"pageRankRecommender.py","file_name":"pageRankRecommender.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"310790497","text":"__author__ = 'zeemi'\n# -*- coding: utf-8 -*-\nfrom drivers.driver import Driver\n\n\nclass ReminderDriver(Driver):\n \"\"\"Program odpowiedzialny za wysylanie maili\"\"\"\n def __init__(self,id, type, name, master_agent):\n super(ReminderDriver, self).__init__(id, type, name, master_agent)\n print(\"Remider\")\n\n","sub_path":"drivers/reminderDriver.py","file_name":"reminderDriver.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"606573776","text":"import MapReduce\nimport sys\n\nmr = MapReduce.MapReduce()\nsize = 5\n\ndef mapper(record):\n\tmatrix = record[0]\n\ti = record[1]\n\tj = record[2]\n\tv = record[3]\n\tif matrix == \"a\":\n\t\tfor n in xrange(0,size):\n\t\t\tkey = (i,n)\n\t\t\tvalue = (matrix, j, v)\n\t\t\tmr.emit_intermediate(key, value)\n\telse:\n\t\tfor n in xrange(0,size):\n\t\t\tkey = (n,j)\n\t\t\tvalue = (matrix, i, v)\t\t\t\n\t\t\tmr.emit_intermediate(key, value)\n\n \n\ndef reducer(key, list_of_values):\n matrix_a_values = filter(lambda s: s[0] == 'a', list_of_values)\n matrix_b_values = filter(lambda s: s[0] == 'b', list_of_values)\n total = 0\n for n in xrange(0,size):\n \ta_value = filter(lambda s: s[1] == n, matrix_a_values) or [(0,0,0)]\n \tb_value = filter(lambda s: s[1] == n, matrix_b_values) or [(0,0,0)]\n \ttotal += a_value[0][2] * b_value[0][2]\n\n mr.emit((key[0],key[1],total))\n\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n\n# C = A x B \n# C_i_j = sum( A_i_k * B_k_j ) ","sub_path":"assignment3/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"512003689","text":"\n\ndef build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None):\n menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]\n if header_buttons:\n menu.insert(0, header_buttons)\n if footer_buttons:\n menu.append(footer_buttons)\n return menu\n\n\ndef is_next(current, last):\n int_current = int(current)\n int_last = int(last)\n if int_current + 1 <= int_last:\n return str(int_current + 1)\n else:\n return str(int_last)\n\n\ndef is_previous(current, first):\n int_current = int(current)\n int_first = int(first)\n if int_current - 1 >= int_first:\n return str(int_current - 1)\n else:\n return str(int_first)\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"595898999","text":"#!/usr/bin/env python\nfrom gimpfu import *\n\n# Gimp Pixel Outliner\n# MIT / Mophs 2017\n\ndef outline(image, layer, colour):\n\n\t# Register for undo\n\tpdb.gimp_image_undo_group_start(image)\n\t\n\tgimp.progress_init(\"Outlining...\")\n\n\t# Creates a blank layer for outlining\n\toutline_layer = image.new_layer(\"Outline\", pos=1, fill_mode=3)\n\n\ttry:\n\t\tfor x in range(layer.width):\n\t\t\tgimp.progress_update(float(x) / float(layer.width))\n\n\t\t\tfor y in range(layer.height):\n\t\t\t\tpixel = layer.get_pixel(x, y)\n\t\t\t\tif(len(pixel) >= 3):\n\t\t\t\t\tif(pixel[3] != 0):\n\t\t\t\t\t\tmark(outline_layer, x, y, colour)\n\n\t\tlayer.update(0, 0, layer.width, layer.height)\n\n\texcept Exception as err:\n\t\tgimp.message(\"Failed to outline the image, error is: \" + str(err))\n\n\tpdb.gimp_image_undo_group_end(image)\n\n\treturn\n\ndef mark(layer, x, y, col):\n\t#up\n\tlayer.set_pixel(x, clamp_pixel(layer.height,y - 1), col)\n\t#down\n\tlayer.set_pixel(x, clamp_pixel(layer.height,y + 1), col)\n\t#left\n\tlayer.set_pixel(clamp_pixel(layer.width,x - 1), y, col)\n\t#right\n\tlayer.set_pixel(clamp_pixel(layer.width,x + 1), y, col)\ndef clamp_pixel(value, max_value):\n\treturn max(0, min(value, max_value))\n\nregister(\n\t\"pixel_outliner\",\n\t\"Add Pixel Outline\",\n\t\"Outlines pixel art\",\n\t\"Mophs\",\n\t\"MIT\",\n\t\"2017\",\n\t\"/Filters/Mophs/Pixel Outliner\",\n\t\"*\",\n\t[\n\t\t(PF_COLOR, \"outlineColor\", \"Outline Colour\", (255, 0, 0)),\n\t],\n\t[],\n\toutline)\n\nmain()\n","sub_path":"pixel_outliner.py","file_name":"pixel_outliner.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"352373369","text":"from mydns import *\nfrom gal_dnstest import *\nimport pywt\nfrom ellipse import *\nimport numpy as np\n\ne1arr = []\ne2arr = []\nfor i in range(200):\n\te1,e2 = elli(galarray0[i])\n\te1arr.append(e1)\n\te2arr.append(e2)\ne1arr = np.array(e1arr)\ne2arr = np.array(e2arr)\n\nsige1 = []\nsige2 = []\nfor i in range(200):\n\te1,e2 = elli(galarray[i])\n\tsige1.append(e1)\n\tsige2.append(e2)\nsige1 = np.array(sige1)\nsige2 = np.array(sige2)\n\"\"\"\nvarsig = 0\nfor i in range(200):\n\ts1 = (sige[i][0]-orge[i][0])/orge[i][0]\n\ts2 = (sige[i][1]-orge[i][1])/orge[i][1]\n\tvar = s1**2 + s2**2\n\tvarsig += var\n\"\"\"\n\n\n\n\"\"\"\nprint(\"This is the variance of the signal\")\nprint(varsig)\n#testsnr for lpdn\nmin_var = 0\nelp = []\nfor j in range(40):\n\tvarlp = 0\n\tfor i in range(200):\n\t\tlpimage = lpdn(galarray[i],r=j+3)\n\t\t(e1,e2) = elli(lpimage)\n\"\"\"\t\t\t\n#testsnr for wndn\nwne1 = []\nwne2 = []\nfor i in range(200):\n\twnimage = wndn(galarray[i])\n\t(e1,e2) = elli(wnimage)\n\tif (e1**2 < 1):\n\t\t{\n\t\twne1.append(e1)\n\t\t}\n\telse:\n\t\t{\n\t\twne1.append(0)\n\t\t}\nwne1 = np.array(wne1)\nwne2 = np.array(wne2)\n\nplt.figure()\nplt.scatter(e1arr,wne1)\nplt.show()\n\t\n\"\"\"\n#testsnr for mvdn\nmin_var = 0\nfor k in range(4):\n\tvarmv = 0\n\tfor i in range(200):\n\t\tmvimage = mvdn(galarray[i],rg=k+1)\n\t\t(e1,e2) = elli(mvimage)\n\t\ts1 = (e1-orge[i][0])/orge[i][0]\n\t\ts2 = (e2-orge[i][1])/orge[i][1]\n\t\ts = s1**2 + s2**2\n\t\tvarmv += s\n\tif (min_var == 0):\n\t\tmin_var = varmv\n\t\tprint(\"This is the variance for the mvdn\")\n\t\tprint(varmv);print(k+1)\n\telse:\n\t\tif (varmv <= min_var):\n\t\t\tmin_var = varmv\n\t\t\tprint(\"This is the variance for the mvdn\")\n\t\t\tprint(varmv);print(k+1)\n\n#testsnr for wldn1\nmin_var = 0\nfor j in range(5):\n\tvarwl1 = 0\n\tfor i in range(200):\n\t\twlimage1 = wldn1(galarray[i],k=j)\n\t\t(e1,e2) = elli(wlimage1)\n\t\ts1 = (e1-orge[i][0])/orge[i][0]\n\t\ts2 = (e2-orge[i][1])/orge[i][1]\n\t\ts = s1**2 + s2**2\n\t\tvarwl1 += s\n\tif (min_var == 0):\n\t\tmin_var = varwl1\n\t\tprint(\"This is the variance for the wldn1\")\n\t\tprint(varwl1);print(j+1)\n\telse:\n\t\tif (varwl1 <= min_var):\n\t\t\tmin_var = varwl1\n\t\t\tprint(\"This is the variance for the wldn1\")\n\t\t\tprint(varwl1);print(j+1)\n#testsnr for wldn2\nwls = pywt.wavelist()[0:15]+pywt.wavelist()[24:80]+[pywt.wavelist()[89],]\nwls = wls + pywt.wavelist()[92:107]+pywt.wavelist()[108:]\nmin_var = 0\nfor j in range(len(wls)):\n\tfor k in range(5):\n\t\tvarwl2 = 0\n\t\tfor i in range(200):\n\t\t\twlimage2 = wldn2(galarray[i],rat=k/10.,wav=wls[j])\n\t\t\t(e1,e2) = elli(wlimage2)\n\t\t\ts1 = (e1-orge[i][0])/orge[i][0]\n\t\t\ts2 = (e2-orge[i][1])/orge[i][1]\n\t\t\ts = s1**2 + s2**2\n\t\t\tvarwl2 += s\n\t\tif (min_var == 0):\n\t\t\tmin_var =varwl2\n\t\t\tprint(\"This is the variance for the\");print(wls[j]);print(varwl2)\n\t\t\tprint(k/10.)\n\t\telse:\n\t\t\tif (varwl2 <= min_var):\n\t\t\t\tmin_var =varwl2\n\t\t\t\tprint(\"This is the variance for the\");print(wls[j])\n\t\t\t\tprint(varwl2);print(k/10.)\n\"\"\"\n","sub_path":"e1e1.py","file_name":"e1e1.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"210501793","text":"import abc\nimport numpy as np\nimport tensorflow as tf\n\n\nclass Controller(abc.ABC): \n def __init__(self,\n size_input,\n size_output,\n size_memory_vector,\n size_conv_shift,\n num_read_heads,\n num_write_heads=1,\n batch_size=1\n ):\n self.size_input = size_input\n self.size_output = size_output\n \n # size_memory_vector is 'M' from the paper\n self.size_memory_vector = size_memory_vector\n \n self.size_conv_shift = size_conv_shift\n \n # number of read heads of controller\n self.num_read_heads = num_read_heads\n \n # the number of write_heads = 1 by default\n self.num_write_heads = num_write_heads\n \n self.batch_size = batch_size\n \n # size of input vector of shape (batch_size, size_input)\n # concatenated with vectors read by controller\n # each memory vector read by controller is of \n # shape (batch_size, size_memory_vector)\n self.size_concatenated_input = self.size_memory_vector * self.num_read_heads + self.size_input\n \n \"\"\"\n read_interface_size = size_of_key_vector + \n size_of_key_strength (beta_t) +\n size_of_interpolation_gate (g_t) + \n size_of_gamma (gamma_t) + \n size_of_conv_shift_vector\n Here: \n size_of_conv_shift_vector=5 if size_conv_shift=2,\n i.e., if size_conv_shift=2, \n then conv_shift_vector = [p_shift(-2), p_shift(-1), p_shift(0), p_shift(1), p_shift(2)]\n and size_conv_shift_vector=2*2+1=5\n \"\"\"\n size_read_interface = self.size_memory_vector * self.num_read_heads + \\\n 1 * self.num_read_heads + \\\n 1 * self.num_read_heads + \\\n 1 * self.num_read_heads + \\\n (2 * self.size_conv_shift + 1) * self.num_read_heads\n size_write_interface = self.size_memory_vector * self.num_write_heads + \\\n 1 * self.num_write_heads + \\\n 1 * self.num_write_heads + \\\n 1 * self.num_write_heads + \\\n (2 * self.size_conv_shift + 1) * self.num_write_heads\n size_erase_vector = self.size_memory_vector\n size_add_vector = self.size_memory_vector\n self.size_interface_vector = size_read_interface + size_write_interface + size_erase_vector + size_add_vector\n \n \n ###############################\n # get variables for #\n # network and #\n # network_output #\n ###############################\n \n self.variables_for_network()\n self.variables_for_network_output()\n \n \n def parse_interface_vector(self, interface_vector):\n \"\"\"Parse interface vector into components\n \n Parameters:\n -----------\n interface_vector: tf.Tensor (batch_size, size_interface_vector)\n \n returns: dict\n a dictionary with the parsed components\n --------\n \"\"\"\n \n read_key_vector = self.size_memory_vector * self.num_read_heads\n read_key_strength = read_key_vector + 1 * self.num_read_heads\n read_interpolation_gate = read_key_strength + 1 * self.num_read_heads\n read_gamma = read_interpolation_gate + 1 * self.num_read_heads\n read_conv_shift_vector = read_gamma + (2 * self.size_conv_shift + 1) * self.num_read_heads\n \n write_key_vector = read_conv_shift_vector + self.size_memory_vector * self.num_write_heads\n write_key_strength = write_key_vector + 1 * self.num_write_heads\n write_interpolation_gate = write_key_strength + 1 * self.num_write_heads\n write_gamma = write_interpolation_gate + 1 * self.num_write_heads\n write_conv_shift_vector = write_gamma + (2 * self.size_conv_shift + 1) * self.num_write_heads\n \n erase_vector = write_conv_shift_vector + self.size_memory_vector\n add_vector = erase_vector + self.size_memory_vector\n \n shape_read_key_vector = (self.batch_size, self.size_memory_vector, self.num_read_heads)\n shape_read_key_strength = (self.batch_size, self.num_read_heads)\n shape_read_interpolation_gate = (self.batch_size, self.num_read_heads)\n shape_read_gamma = (self.batch_size, self.num_read_heads)\n shape_read_conv_shift_vector = (self.batch_size, 2 * self.size_conv_shift + 1, self.num_read_heads)\n \n shape_write_key_vector = (self.batch_size, self.size_memory_vector, self.num_write_heads)\n shape_write_key_strength = (self.batch_size, self.num_write_heads)\n shape_write_interpolation_gate = (self.batch_size, self.num_write_heads)\n shape_write_gamma = (self.batch_size, self.num_write_heads)\n shape_write_conv_shift_vector = (self.batch_size, 2 * self.size_conv_shift + 1, self.num_write_heads)\n \n shape_erase_vector = (self.batch_size, self.size_memory_vector)\n shape_add_vector = (self.batch_size, self.size_memory_vector)\n \n # the parsing begins... \n parsed = {}\n \n parsed['read_keys'] = tf.reshape(interface_vector[:, :read_key_vector], shape_read_key_vector)\n \n # the key_strength should be >= 0\n # hence, we apply softplus\n parsed['read_strengths'] = tf.nn.softplus(tf.reshape(interface_vector[:, read_key_vector:read_key_strength], shape_read_key_strength))\n \n # the interpolation_gate lies between [0, 1]\n # hence, we apply sigmoid\n parsed['read_gates'] = tf.nn.sigmoid(tf.reshape(interface_vector[:, read_key_strength:read_interpolation_gate], shape_read_interpolation_gate))\n \n # gamma_t >= 1 always\n # hence, we apply (softplus + 1)\n parsed['read_gammas'] = 1 + tf.nn.softplus(tf.reshape(interface_vector[:, read_interpolation_gate:read_gamma], shape_read_gamma))\n \n # conv_shift vector is a vector of probabilities\n # hence, we apply softmax\n parsed['read_shifts'] = tf.nn.softmax(tf.reshape(interface_vector[:, read_gamma:read_conv_shift_vector], shape_read_conv_shift_vector))\n \n # similar shapes and wrapping for the write head \n parsed['write_keys'] = tf.reshape(interface_vector[:, read_conv_shift_vector:write_key_vector], shape_write_key_vector)\n parsed['write_strengths'] = tf.nn.softplus(tf.reshape(interface_vector[:, write_key_vector:write_key_strength], shape_write_key_strength))\n parsed['write_gates'] = tf.nn.sigmoid(tf.reshape(interface_vector[:, write_key_strength:write_interpolation_gate], shape_write_interpolation_gate))\n parsed['write_gammas'] = 1 + tf.nn.softplus(tf.reshape(interface_vector[:, write_interpolation_gate: write_gamma], shape_write_gamma))\n parsed['write_shifts'] = tf.nn.softmax(tf.reshape(interface_vector[:, write_gamma:write_conv_shift_vector], shape_write_conv_shift_vector))\n \n # each element of erase_vector lies between [0, 1]\n # hence, we apply sigmoid\n parsed['erase_vector'] = tf.nn.sigmoid(tf.reshape(interface_vector[:, write_conv_shift_vector:erase_vector], shape_erase_vector))\n parsed['add_vector'] = tf.reshape(interface_vector[:, erase_vector:add_vector], shape_add_vector)\n \n return parsed\n \n \n \n @abc.abstractmethod\n def variables_for_network(self):\n \"\"\"Defines the variables of the neural network model\n inside the controller\n \"\"\"\n \n \n @abc.abstractmethod\n def variables_for_network_output(self):\n \"\"\"Defines the initial weights for the controller\n \"\"\"\n \n \n @abc.abstractmethod\n def network_operation(self, concatenated_input):\n \"\"\"Defines the internal operation of the neural \n network model inside the controller\n \"\"\"\n \n \n @abc.abstractmethod\n def network_output(self):\n \"\"\"Pushes through the input_data to get out the\n parsed interface vector and the pre_output\n \"\"\"\n","sub_path":"neural_turing_machine/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":8363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"343443598","text":"list_of_info = []\n\n# import re\n# id_find = '\\#[0-9]+?'\n# location_find = '[0-9]+?\\,[0-9]{1,3}'\n# area_find = '[0-9]+?x[0-9]{1,3}'\n\n# with open('..\\d3-inputs') as txtfile:\n# for line in txtfile:\n# # print(line)\n# line_info = []\n# id_match = re.compile(id_find)\n# i_find = id_match.findall(line)\n# if i_find:\n# # print(id_find)\n# line_info.append(int(i_find[0].replace('#', '')))\n#\n# loc_match = re.compile(location_find)\n# loc_find = loc_match.findall(line)\n# if loc_find:\n# # print(loc_find)\n# loc_list = loc_find[0].split(',')\n# line_info.append((int(loc_list[0]), int(loc_list[1])))\n#\n# area_match = re.compile(area_find)\n# a_find = area_match.findall(line)\n# if a_find:\n# # print(a_find)\n# area_list = a_find[0].split('x')\n# line_info.append((int(area_list[0]), int(area_list[1])))\n#\n# list_of_info.append(line_info)\n# # print(line_info)\n\n\n\nwith open('d3-inputs') as txtfile:\n for line in txtfile:\n line_info = []\n line = line.replace('#', '')\n split_line = line.split(' @ ')\n number = split_line[0]\n\n split_line = split_line[1].split(': ')\n coords_list = split_line[0].split(',')\n coords = []\n for i in coords_list:\n coords.append(int(i))\n\n\n area_list = split_line[1].split('x')\n area_list[1] = area_list[1].replace('\\n', '')\n area = []\n for a in area_list:\n area.append(int(a))\n\n list_of_info.append([int(number), coords, area])\n\n\nprint(list_of_info)\n\nboard = []\nfor i in range(2000):\n row = []\n for k in range(2000):\n row.append(0)\n board.append(row)\n\nfor info in list_of_info:\n x_coord = info[1][0] # First coord to pass\n y_coord = info[1][1] # Second coord to pass\n\n x_width = info[2][0]\n y_height = info[2][1]\n for height in range(y_height):\n for width in range(x_width):\n location = board[y_coord + height][x_coord + width]\n\n board[y_coord + height][x_coord + width] += 1\n\ncounter = 0\nfor rw in board:\n for col in rw:\n if col >= 2:\n counter += 1\n\nprint(counter)\n","sub_path":"Day 3/Puzzle 1.py","file_name":"Puzzle 1.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"553738244","text":"#!/Users/anthonyquivers/anaconda3/bin/python\n#Date Started: 190601\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the queensAttack function below.\ndef queensAttack(n, k, r_q, c_q, obstacles):\n #n: width and height of board\n #k: number of obstacles\n\n def make_board(S, obstacles):\n def get_row(obstacle):\n return obstacle[0]\n\n def get_col(obstacle):\n return obstacle[1]\n\n for obj in obstacles:\n S[get_row(obj) - 1] |= 1 << (n - get_col(obj))\n\n return S\n\n def show_board(S):\n width = len(S)\n for row in reversed(S):\n ## Fixed Width Binary number\n print(('{0:0' + str(width) + 'b}').format(row))\n\n S = [0] * n #an number on the list for each row\n S = make_board(S, obstacles)\n\n show_board(S)\n return 0\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nk = input().split()\n\n n = int(nk[0])\n\n k = int(nk[1])\n\n r_qC_q = input().split()\n\n r_q = int(r_qC_q[0])\n\n c_q = int(r_qC_q[1])\n\n obstacles = []\n\n for _ in range(k):\n obstacles.append(list(map(int, input().rstrip().split())))\n\n result = queensAttack(n, k, r_q, c_q, obstacles)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n","sub_path":"queensAttachII.py","file_name":"queensAttachII.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"621068201","text":"\nfrom __future__ import division\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n \n \nimport nltk\nnltk.download('stopwords')\n\nfrom nltk.corpus import stopwords\nimport string\n#import xgboost as xgb\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn import ensemble, metrics, model_selection, naive_bayes\n \n\neng_stopwords = set(stopwords.words(\"english\"))\npd.options.mode.chained_assignment = None\n## Read the train and test dataset and check the top few lines ##\ntrain_df = pd.read_csv(\"train.csv\")\ntest_df = pd.read_csv(\"test.csv\")\nprint(\"Number of rows in train dataset : \",train_df.shape[0])\nprint(\"Number of rows in test dataset : \",test_df.shape[0])\n\n\ndef remove(txt):\n result = ''.join([i for i in txt if not i.isdigit()])\n return result\ntrain_df=train_df.fillna('kinetic')\ntest_df=test_df.fillna('kinetic')\ndef kinetic(row):\n probs=np.unique(row,return_counts=True)[1]/len(row)\n kinetic=np.sum(probs**2)\n return kinetic\n\n# def kinetic_letters(text):\n# text = text.lower()\n# letterRepartition = np.zeros(26)\n# i = 0\n# for letter in text:\n# if ord(letter) in range(97, 123) :\n# letterRepartition[ord(letter)-97] +=1 \n# probs = letterRepartition/len(text)\n# kinetic = np.sum(probs**2)\n# return kinetic\n \ndef kinetic_letters(text):\n \n letterRepartition = np.zeros(26)\n for letter in text:\n if ord(letter) in range(97, 123) :\n letterRepartition[ord(letter)-97] +=1\n letterRepartition = letterRepartition / len(text)\n return kinetic(letterRepartition)\n\ndef kinetic_voals(text):\n \n letterRepartition = np.zeros(26)\n for letter in text:\n if ord(letter) in range(97, 123) :\n letterRepartition[ord(letter)-97] +=1 \n \n letterRepartition = letterRepartition / len(text) \n return kinetic(letterRepartition[[0, 4, 8, 14, 20, 24]])\n\ndef kinetic_cons(text):\n \n letterRepartition = np.zeros(26)\n for letter in text:\n if ord(letter) in range(97, 123) :\n letterRepartition[ord(letter)-97] +=1 \n letterRepartition = letterRepartition / len(text)\n return kinetic(letterRepartition[[1, 2, 3 , 5, 6, 7, 9, 10, 11, 12, 13, 15, 16, 17, 18 ,19 , 21, 22, \n 23, 25]])\n\ndef kinetic_ponct(text):\n \n ponct_list = list(['.', ',', ';', '?', '!'])\n ponct_repart = np.zeros(5)\n for letter in text:\n if letter in ponct_list:\n ponct_repart[ponct_list.index(letter)] += 1\n ponct_repart = ponct_repart / len(text)\n return kinetic(ponct_repart)\n\ndef kinetic_average_words(text):\n \n ponct_list = list(['.', ',', ';', '?', '!'])\n for ponct in ponct_list:\n text = text.replace(ponct, '')\n text = text.split(' ')\n avg_kin = 0\n for word in text:\n avg_kin += kinetic_letters(word)\n return avg_kin/len(text)\n \n\nprint(train_df[\"comment_text\"].apply(kinetic_average_words))\n\n\n## kinetic in letters\ntrain_df[\"kinetic_letters\"] = train_df[\"comment_text\"].apply(kinetic_letters)\ntest_df[\"kinetic_letters\"] = test_df[\"comment_text\"].apply(kinetic_letters)\n\n## kinetic in voals\ntrain_df[\"kinetic_voals\"] = train_df[\"comment_text\"].apply(kinetic_voals)\ntest_df[\"kinetic_voals\"] = test_df[\"comment_text\"].apply(kinetic_voals)\n\n## kinetic in cons\ntrain_df[\"kinetic_cons\"] = train_df[\"comment_text\"].apply(kinetic_cons)\ntest_df[\"kinetic_cons\"] = test_df[\"comment_text\"].apply(kinetic_cons)\n\n## kinetic in ponct\ntrain_df[\"kinetic_ponct\"] = train_df[\"comment_text\"].apply(kinetic_ponct)\ntest_df[\"kinetic_ponct\"] = test_df[\"comment_text\"].apply(kinetic_ponct)\n\n## kinetic in ponct\ntrain_df[\"kinetic_avg_words\"] = train_df[\"comment_text\"].apply(kinetic_average_words)\ntest_df[\"kinetic_avg_words\"] = test_df[\"comment_text\"].apply(kinetic_average_words)\n\n## Number of words in the text ##\ntrain_df[\"num_words\"] = train_df[\"comment_text\"].apply(lambda x: len(str(x).split()))\ntest_df[\"num_words\"] = test_df[\"comment_text\"].apply(lambda x: len(str(x).split()))\n\n## Number of unique words in the text ##\ntrain_df[\"num_unique_words\"] = train_df[\"comment_text\"].apply(lambda x: len(set(str(x).split())))\ntest_df[\"num_unique_words\"] = test_df[\"comment_text\"].apply(lambda x: len(set(str(x).split())))\n\n\n## Number of characters in the text ##\ntrain_df[\"num_chars\"] = train_df[\"comment_text\"].apply(lambda x: len(str(x)))\ntest_df[\"num_chars\"] = test_df[\"comment_text\"].apply(lambda x: len(str(x)))\n\n## Number of stopwords in the text ##\ntrain_df[\"num_stopwords\"] = train_df[\"comment_text\"].apply(lambda x: len([w for w in str(x).lower().split() if w in eng_stopwords]))\ntest_df[\"num_stopwords\"] = test_df[\"comment_text\"].apply(lambda x: len([w for w in str(x).lower().split() if w in eng_stopwords]))\n\n## Number of punctuations in the text ##\ntrain_df[\"num_punctuations\"] =train_df['comment_text'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]) )\ntest_df[\"num_punctuations\"] =test_df['comment_text'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]) )\n\n# Number of conconnes in the text ##\n\n\n## Number of title case words in the text ##\ntrain_df[\"num_words_upper\"] = train_df[\"comment_text\"].apply(lambda x: len([w for w in str(x).split() if w.isupper()]))\ntest_df[\"num_words_upper\"] = test_df[\"comment_text\"].apply(lambda x: len([w for w in str(x).split() if w.isupper()]))\n\n## Number of title case words in the text ##\ntrain_df[\"num_words_title\"] = train_df[\"comment_text\"].apply(lambda x: len([w for w in str(x).split() if w.istitle()]))\ntest_df[\"num_words_title\"] = test_df[\"comment_text\"].apply(lambda x: len([w for w in str(x).split() if w.istitle()]))\n\n## Average length of the words in the text ##\ntrain_df[\"mean_word_len\"] = train_df[\"comment_text\"].apply(lambda x: np.mean([len(w) for w in str(x).split()]))\ntest_df[\"mean_word_len\"] = test_df[\"comment_text\"].apply(lambda x: np.mean([len(w) for w in str(x).split()]))\n\nfeatures=test_df.columns[1:]\nfeatures\nfrom sklearn.model_selection import train_test_split\ntrain_mes, valid_mes, train_l,valid_l = train_test_split(train_df[features],train_df[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']], test_size=0.1, random_state=2)\ndef text_process(comment):\n nopunc = [char for char in comment if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n\ntransform_com = TfidfVectorizer().fit(pd.concat\n ([train_df['comment_text'],test_df['comment_text']],axis=0))\n\n\ncomments_train = transform_com.transform(train_mes['comment_text'])\ncomments_valid = transform_com.transform(valid_mes['comment_text'])\ncomments_test = transform_com.transform(test_df['comment_text'])\n\n\nimport scipy\ncomments_train=scipy.sparse.hstack([comments_train,train_mes[features[1:]]])\n\n\ncomments_valid=scipy.sparse.hstack([comments_valid,valid_mes[features[1:]]])\n\n\n\n\ncomments_test = scipy.sparse.hstack([comments_test,test_df[features[1:]]])\n\n\n \n\ncol = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\npreds = np.zeros((test_df.shape[0], len(col)))\n\nimport gc\n#for i, j in enumerate(col):\n# \n# print('fit '+j)\n# model = runXGB(comments_train, train_l[j], comments_valid,valid_l[j])\n# preds[:,i] = model.predict(xgb.DMatrix(comments_test))\n# gc.collect()\nnrow=comments_train.shape[0]\n\ncoly = [c for c in train.columns if c not in ['id','comment_text']]\ny = train[coly]\ntest_id = test['id'].values\nfrom sklearn.ensemble import ExtraTreesClassifier\nmodel = ensemble.ExtraTreesClassifier(n_jobs=-1, random_state=3)\n\n\n\nmodel = ensemble.ExtraTreesClassifier(n_jobs=-1, random_state=3)\nmodel.fit(comments_train,train_l)\npreds=model.predict(comments_test)\n\n\n\n\nsubm = pd.read_csv('sample_submission.csv') \nsubmid = pd.DataFrame({'id': subm[\"id\"]})\nsubmission = pd.concat([submid, pd.DataFrame(preds, columns = col)], axis=1)\nsubmission.to_csv('sub_kinetic_forest.csv', index=False)\n\n\n\n\n\n\n\n","sub_path":"toxic1.py","file_name":"toxic1.py","file_ext":"py","file_size_in_byte":8238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"402916510","text":"# -*- coding: utf-8 -*-\n\nfrom Embedded16_Server import app\n\nimport flask\nimport sqlite3\n\ndef get_db():\n db = getattr(flask.g, '_database', None)\n if not db:\n db = flask.g._database = sqlite3.connect(app.config['DATABASE'])\n return db\n\ndef init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read().decode('utf-8'))\n db.commit()\n\ndef execute_db(query, args = ()):\n print('Execute DB: {} / args: {}'.format(query, repr(args)))\n get_db().execute(query, args)\n\ndef query_db(query, args = (), one = False):\n print('Query DB: {} / args: {}'.format(query, repr(args)))\n cur = get_db().execute(query, args)\n rv = cur.fetchall()\n print('Query Result: {}'.format(repr(rv)))\n cur.close()\n return (rv[0] if rv else None) if one else rv\n\n@app.teardown_appcontext\ndef close_connection(ex):\n db = getattr(flask.g, '_database', None)\n if db:\n db.close()\n","sub_path":"Embedded16_Server/dbutils.py","file_name":"dbutils.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"119105309","text":"# 🚨 Don't change the code below 👇\nage = input(\"What is your current age?\")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\nage_as_int = int(age)\nyears_remaining = 90 - age_as_int\ndays = 365\nweeks = 52\nmonths = 12\n\nd = days * years_remaining\nw = weeks * years_remaining\nm = months * years_remaining\n\nmessage = f\"You have {d} days, {w} weeks, and {m} months remaining\"\nprint(message)\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"197155691","text":"import re\r\n\r\nregex = r\"data-id=(\\\"\\d*\\\")\"\r\n\r\nfile_log = open(\"D:/test_py/data_base.txt\", \"r\")\r\ntest_str = str(file_log.read())\r\nmatches = re.finditer(regex, test_str, re.MULTILINE)\r\nresult = ''\r\nfor matchNum, match in enumerate(matches, start=1):\r\n #print (\"Match {matchNum} was found at {start}-{end}: {match}\".format(matchNum = matchNum, start = match.start(), end = match.end(), match = match.group()))\r\n for groupNum in range(0, len(match.groups())):\r\n result = result + str(match.groups())\r\n #groupNum = groupNum + 1\r\n #print (\"Group {groupNum} found at {start}-{end}: {group}\".format(groupNum = groupNum, start = match.start(groupNum), end = match.end(groupNum), group = match.group(groupNum)))\r\n\r\n# Note: for Python 2.7 compatibility, use ur\"\" to prefix the regex and u\"\" to prefix the test string and substitution.\r\ndef delete(string):\r\n a = string.replace('\\'','')\r\n b = a.replace('\\\"','')\r\n c = b.replace('(','')\r\n d = c.replace(')','')\r\n return(d[0:-1])\r\nresult = delete(result)\r\nresult = result.split(',')\r\nfor i in range (len(result)):\r\n #https://www.facebook.com/stories/1493136314137727\r\n if '1493136314137727' in result[i]:\r\n print('1493136314137727: Nguyen')\r\n print(result[i])","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"317698226","text":"#!/usr/bin/env python\n#\n# PyQt API:\n# http://www.riverbankcomputing.co.uk/static/Docs/PyQt4/html/classes.html\n# Tutorials:\n# http://zetcode.com/tutorials/pyqt4/\n\nimport os\nimport sys\nimport optparse\nimport warnings\n\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4.QtCore import QEvent, Qt\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg\n\nfrom obspy.signal import pazToFreqResp, cornFreq2Paz\n\nclass MyMainWindow(QtGui.QMainWindow):\n \"\"\"\n Main Window docstring...\n \"\"\"\n def __init__(self, options):\n # make all commandline options available for later use\n # e.g. in update() methods\n self.options = options\n # for convenience set some instance wide attributes\n self.file = options.file\n\n # read in the calibration data and set the nfft/max freq\n tmp = np.loadtxt(self.file).T\n self.freq = tmp[0]\n self.ampl = tmp[1]\n self.phase = tmp[2]\n self.nfft = (len(self.freq) - 1) *2\n self.spr = 1.0 / (2 * self.freq[-1])\n\n # setup initial poles/zeros\n self.paz = {}\n self.paz['poles'] = parse_paz_string(options.poles)\n self.paz['zeros'] = parse_paz_string(options.zeros)\n self.paz['gain'] = options.normalization_factor\n\n # check if corner frequencies should be used\n # override paz if corner frequencies used!\n self.corn_freqs = options.corner_frequencies\n if self.corn_freqs != 0:\n self.paz = {}\n self.paz['poles'] = [0j, 0j] * self.corn_freqs\n self.paz['zeros'] = [0j, 0j]\n self.paz['gain'] = 1.0\n \n # setup GUI\n QtGui.QMainWindow.__init__(self)\n self.__setup_GUI()\n self.__connect_signals()\n\n # make initial plot and show it\n if self.corn_freqs != 0:\n self.on_anyButton_editingFinished()\n self.update()\n self.canv.show()\n self.show()\n\n def __add_doublespinboxes(self, layout, complex, label, number):\n \"\"\"\n Add a new set of real/imag QDoubleSpinBox'es to given layout.\n Initial settings given by complex.\n Label should be a String for the label in front of the two boxes.\n \"\"\"\n box_real = QtGui.QDoubleSpinBox()\n box_real.setMaximum(1e3)\n box_real.setMinimum(-1e3)\n box_real.setSingleStep(self.options.step)\n box_real.setDecimals(6)\n box_real.setValue(complex.real)\n layout.addWidget(QtGui.QLabel(\"%s %i real\" % (label, number)))\n layout.addWidget(box_real)\n box_imag = QtGui.QDoubleSpinBox()\n box_imag.setMaximum(1e3)\n box_imag.setMinimum(-1e3)\n box_imag.setDecimals(6)\n box_imag.setSingleStep(self.options.step)\n box_imag.setValue(complex.imag)\n layout.addWidget(QtGui.QLabel(\"imag\"))\n layout.addWidget(box_imag)\n return box_real, box_imag\n\n def __add_doublespinboxes_cornfreq(self, layout, freq, damping):\n \"\"\"\n Add a new set of corner frequency / damping QDoubleSpinBox'es to given\n layout.\n \"\"\"\n box_cornfreq = QtGui.QDoubleSpinBox()\n box_cornfreq.setMaximum(1e3)\n box_cornfreq.setMinimum(0)\n box_cornfreq.setSingleStep(self.options.step)\n box_cornfreq.setDecimals(6)\n box_cornfreq.setValue(freq)\n layout.addWidget(QtGui.QLabel(\"corn. freq.\"))\n layout.addWidget(box_cornfreq)\n box_damping = QtGui.QDoubleSpinBox()\n box_damping.setMaximum(1e3)\n box_damping.setMinimum(-1e3)\n box_damping.setSingleStep(self.options.step)\n box_damping.setDecimals(6)\n box_damping.setValue(damping)\n layout.addWidget(QtGui.QLabel(\"damping\"))\n layout.addWidget(box_damping)\n return box_cornfreq, box_damping\n\n def __add_doublespinboxes_fit(self, layout, lfreq, hfreq):\n \"\"\"\n Add a new set of corner frequency / damping QDoubleSpinBox'es to given\n layout.\n \"\"\"\n box_lfreq = QtGui.QDoubleSpinBox()\n box_lfreq.setMaximum(1e3)\n box_lfreq.setMinimum(0)\n box_lfreq.setSingleStep(self.options.step)\n box_lfreq.setDecimals(6)\n box_lfreq.setValue(lfreq)\n layout.addWidget(QtGui.QLabel(\"lower. freq.\"))\n layout.addWidget(box_lfreq)\n box_hfreq = QtGui.QDoubleSpinBox()\n box_hfreq.setMaximum(1e3)\n box_hfreq.setMinimum(-1e3)\n box_hfreq.setSingleStep(self.options.step)\n box_hfreq.setDecimals(6)\n box_hfreq.setValue(hfreq)\n layout.addWidget(QtGui.QLabel(\"upper freq.\"))\n layout.addWidget(box_hfreq)\n return box_lfreq, box_hfreq\n\n def __setup_GUI(self):\n \"\"\"\n Add matplotlib canvas, some boxs and stuff...\n \"\"\"\n self.setWindowTitle(\"FitResp\")\n self.setGeometry(300, 300, 500, 500)\n main = QtGui.QWidget()\n self.setCentralWidget(main)\n # add matplotlib canvas and setup layouts to put boxes in\n vlayout = QtGui.QVBoxLayout()\n vlayout.addStretch(1)\n main.setLayout(vlayout)\n canv = QMplCanvas()\n vlayout.addWidget(canv)\n hlayout_poles = QtGui.QHBoxLayout()\n hlayout_poles.addStretch(1)\n vlayout.addLayout(hlayout_poles)\n hlayout_zeros = QtGui.QHBoxLayout()\n hlayout_zeros.addStretch(1)\n vlayout.addLayout(hlayout_zeros)\n hlayout_normfac = QtGui.QHBoxLayout()\n hlayout_normfac.addStretch(1)\n vlayout.addLayout(hlayout_normfac)\n\n # add boxes for corner frequencies\n if self.corn_freqs != 0:\n # add layout\n hlayout_cf = QtGui.QHBoxLayout()\n hlayout_cf.addStretch(1)\n vlayout.addLayout(hlayout_cf)\n # add boxes\n self.boxes_corn_freqs = []\n self.boxes_dampings = []\n for _i in xrange(self.corn_freqs):\n if _i == 0:\n freq, damping = 1.0, 0.707\n elif _i == 1:\n freq, damping = 10.0, 0.707\n box_cornfreq, box_damping = \\\n self.__add_doublespinboxes_cornfreq(hlayout_cf, freq,\n damping)\n self.boxes_corn_freqs.append(box_cornfreq)\n self.boxes_dampings.append(box_damping)\n\n # add some boxes\n self.boxes_poles_real = []\n self.boxes_poles_imag = []\n for i, pole in enumerate(self.paz['poles']):\n box_real, box_imag = self.__add_doublespinboxes(hlayout_poles,\n pole, \"Pole\", i+1)\n self.boxes_poles_real.append(box_real)\n self.boxes_poles_imag.append(box_imag)\n self.boxes_zeros_real = []\n self.boxes_zeros_imag = []\n for i, zero in enumerate(self.paz['zeros']):\n box_real, box_imag = self.__add_doublespinboxes(hlayout_zeros,\n zero, \"Zero\", i+1)\n self.boxes_zeros_real.append(box_real)\n self.boxes_zeros_imag.append(box_imag)\n # add box for normalization factor\n box_norm = QtGui.QDoubleSpinBox()\n box_norm.setMaximum(1e10)\n box_norm.setMinimum(-1e10)\n box_norm.setSingleStep(self.options.step)\n box_norm.setValue(self.paz['gain'])\n hlayout_normfac.addWidget(QtGui.QLabel(\"Norm.Fac.\"))\n hlayout_normfac.addWidget(box_norm)\n self.box_norm = box_norm\n\n lfreq = 0.1\n hfreq = 10.\n res = 0.0\n hlayout_fit = QtGui.QHBoxLayout()\n hlayout_fit.addStretch(1)\n vlayout.addLayout(hlayout_fit)\n\n box_lfreq, box_hfreq = self.__add_doublespinboxes_fit(hlayout_fit,lfreq,hfreq)\n self.box_lfreq = box_lfreq\n self.box_hfreq = box_hfreq\n\n\n qToolBar = QtGui.QToolBar()\n self.toolbar = NavigationToolbar2QTAgg(canv, qToolBar)\n qToolBar.addWidget(self.toolbar)\n qToolBar.setMovable(False)\n qToolBar.setFloatable(False)\n self.addToolBar(Qt.BottomToolBarArea, qToolBar)\n\n # make matplotlib stuff available\n self.canv = canv\n self.fig = canv.figure\n self.ax1 = self.fig.add_subplot(121)\n self.ax2 = self.fig.add_subplot(122)\n\n def __connect_signals(self):\n \"\"\"\n Connect box signals to methods...\n \"\"\"\n connect = QtCore.QObject.connect\n all_boxes = self.boxes_poles_real + self.boxes_poles_imag + \\\n self.boxes_zeros_real + self.boxes_zeros_imag + \\\n [self.box_lfreq] + [self.box_hfreq] + \\\n [self.box_norm]\n if self.corn_freqs != 0:\n all_boxes += self.boxes_corn_freqs + self.boxes_dampings\n for box in all_boxes:\n connect(box, QtCore.SIGNAL(\"editingFinished()\"),\n self.on_anyButton_editingFinished)\n\n def update(self):\n \"\"\"\n This method should do everything to update the plot.\n \"\"\"\n try:\n # clear axes before anything else\n ax1 = self.ax1\n ax1.clear()\n ax2 = self.ax2\n ax2.clear()\n\n # plot theoretical responses from paz here\n paz = self.paz\n h, f = pazToFreqResp(paz['poles'], paz['zeros'], paz['gain'],\n self.spr, self.nfft, freq=True)\n ampl = abs(h)\n #compute the residuum\n resid = 0\n \n ampl1 = self.ampl[(f>self.box_lfreq.value()) & (fself.box_lfreq.value()) & (f 0]\n \n d_moves = [Move(p.pos, _m.pos1, _m.code) for _m in moves_p]\n \n moves.extend(d_moves)\n \n return moves\n\ndef get_possible_check(pieces, board, player):\n \n moves = []\n for p in pieces:\n if p.white == player:\n \n b_check = p.get_available_moves(board\n ,move_type_flag = True\n ,check_flag = True)\n \n if b_check:\n return True\n \n return False #no checks found\n\n\ndef check_endgame(moves, pieces, board, player):\n \n check_code = 0\n outcome = None\n\n if len(moves) == 0:\n \n if board.b_in_check(player):\n outcome = (player, 'LOSS', 'CHECKMATE')\n check_code = -1\n else: \n outcome = (player, 'STALEMATE', 'NOMOVES')\n check_code = -2\n \n elif board.player_only_king_moves[1 - int(player)] == 50: #TODO - player_i(player)\n outcome = (player, 'WIN', '50MOVES')\n check_code = -3\n\n elif len(pieces) <= 3:\n \n if len(pieces) == 2:\n \n outcome = (player, 'STALEMATE', 'KINGVKING')\n check_code = -4\n\n else:\n \n white_pieces = [p for p in pieces if p.white]\n black_pieces = [p for p in pieces if not(p.white)]\n \n b_white_more = len(white_pieces) > len(black_pieces)\n\n more_pieces = white_pieces if b_white_more else black_pieces\n \n more_piece_names = [p.__class__.__name__ for p in more_pieces]\n\n alive_pieces = (\"Pawn\", \"Queen\", \"Rook\")\n \n alives_in_more = [ap in more_piece_names for ap in alive_pieces]\n\n if not(any(alives_in_more)):\n outcome = (player, 'STALEMATE', 'NOQUEENPAWNROOK')\n\n # king versus king\n # king and bishop versus king\n # king and knight versus king\n \n # TODO - # king and bishop versus king and bishop with the bishops on the same colour.\n \n \n return check_code, outcome\n \n\n\ndef apply_move(move, board, pieces, _player):\n \n '''This mutates board and pieces based on move. \n Mutator Class can handle half-turn ahead board/piece mutations on \n regular move codes, but this function can handle the exotic moves:\n enpassant, castling, promotion. And also sets/clears the permission\n properties to allow these moves.'''\n\n move_code = move.code\n move = (move.pos0, move.pos1)\n\n b_enpassant = (move_code == MOVE_CODE['en_passant'])\n b_castling = (move_code == MOVE_CODE['castling'])\n b_promotion = (move_code == MOVE_CODE['promotion'])\n \n pos0, pos1 = move[0], move[1]\n\n #TODO - add helper func: piece_from_pos( index_=True)\n #TODO - py3 : make this a list comprehension\n piece_i = list(filter(lambda _p: _p[1].pos == pos0, enumerate(pieces)))[0][0]\n #TODO - piece = pieces[piece_i]\n\n kill_flag = False # before the move, check if opp's piece is there\n if (board.get_data_pos(pos1) != 0 or b_enpassant) and not(b_castling):\n kill_flag = True\n\n #Turn-Reset: clear previous before the move is applied\n board.clear_enpassant_vulnerability(_player)\n\n if not(b_castling):\n \n board.old_player_pos(pos0)\n \n b_two_advances = board.two_advances(pos0,pos1) #bool: will it be enpassant_vuln?\n \n board.new_player_pos(_player, pos1, pieces[piece_i], b_two_advances)\n \n pieces[piece_i].pos = pos1\n \n else:\n\n # is it a left castle or a right castle, from POV of white\n castle_absolute_left = True if (KING_COL > pos1[1]) else False\n \n r_pos0, r_pos1 = board.get_rook_castle_move(_player \n ,left_side = castle_absolute_left)\n\n k_pos0, k_pos1 = board.get_king_castle_move(_player\n ,left_side = castle_absolute_left)\n \n #TODO - helper func\n rook_i = list(filter(lambda _p: _p[1].pos == r_pos0, enumerate(pieces)))[0][0]\n\n pieces[rook_i].pos = r_pos1 \n pieces[piece_i].pos = k_pos1 #piece_i already king\n \n board.new_player_pos(_player, k_pos1, pieces[piece_i])\n board.new_player_pos(_player, r_pos1, pieces[rook_i])\n\n board.old_player_pos(k_pos0)\n board.old_player_pos(r_pos0)\n\n\n #Fallout from Move\n pieces[piece_i].modify_castling_property()\n board.modify_castling_property( _player, pieces[piece_i], pos0)\n\n if kill_flag:\n \n kill_pos = pos1 if not(b_enpassant) else board.en_passant_pos(pos1, _player)\n \n #TODO - helper func\n killed_piece_i = [_p for _p in enumerate(pieces) if (_p[1].pos == kill_pos) and \n not(_p[1].white == _player)]\n killed_piece_i = killed_piece_i[0][0]\n\n #TODO - don't pop piece in hypothetical_pieces=True\n # pieces[killed_piece_i].alive = False\n killed_piece = pieces.pop(killed_piece_i)\n board.modify_castling_property( not(_player), killed_piece, killed_piece.pos)\n \n if (b_enpassant):\n board.old_player_pos(kill_pos) \n #otherwise, you already overwrote it's position on board\n \n #TODO - any promotions here \n if b_promotion:\n \n pro_piece_i = list(filter(lambda _p: _p[1].pos == pos1, enumerate(pieces)))[0][0]\n pro_piece = pieces.pop(pro_piece_i)\n\n #new piece\n new_queen = Queen(b_white = _player, pos = pos1)\n #if rook, need to turn off castling\n\n pieces.append(new_queen)\n\n\n # Unnec as it sets and acted upon at beginning of turn, \n # still nice to have it always being reset here.\n board.set_player_not_in_check(_player) #based on previous validations\n\n return board, pieces\n\n \nclass Mirror():\n\n '''This handles the data and calculation of check from \n superking.available_moves. It builds an increasingly large\n and informative tuples within a list entered. \n run_calc() answers the question is the piece at init_pos\n threatened by capture of any other piece '''\n\n def __init__(self):\n self.white = None\n self.init_pos = None\n self.moves = None\n self.pieces = None\n\n self.move_types = None\n self.piece_classes = None \n self.move_spaces = None\n self.class_move_types = None\n\n self.outcome = None\n\n def set_white(self, white):\n self.white = white #adding for pawn forward-diag\n \n def set_init_pos(self, init_pos):\n self.init_pos = init_pos\n\n def set_moves(self, moves):\n self.moves = moves\n # move_code enpassant and castling not applicable here, only regular captures\n\n def set_pieces(self, pieces):\n self.pieces = pieces\n # does this create a problem with byref for pieces list?\n\n @staticmethod\n def get_piece_class(pieces, pos):\n piece = list(filter(lambda piece: piece.pos == pos, pieces))[0]\n return piece.__class__.__name__\n\n def infer_move_type(self, move):\n # none of the three atomic move types overlap, thus deduce the\n # move-type from the (pos0, pos1).\n\n white = self.white\n\n pos0 = self.init_pos\n pos1 = move\n \n row0, row1 = pos0[0], pos1[0]\n col0, col1 = pos0[1], pos1[1]\n \n if (row0 == row1) or (col0 == col1):\n return MOVE_TYPE['upacross']\n \n elif abs(row0 - row1) == abs(col0 - col1):\n \n if not( white ^ ((row0 - row1) > 0) ):\n return MOVE_TYPE['forward-diagonal']\n else:\n return MOVE_TYPE['diagonal']\n \n else:\n return MOVE_TYPE['twobyone']\n\n @staticmethod\n def chess_squares(pos0, pos1):\n return max(abs(pos0[0] - pos1[0]), abs(pos0[1] - pos1[1]))\n # meaningless but relevant to knight downstream\n\n @staticmethod\n def class_movements(_class):\n #or make this reflective?\n #TODO - remove hard coded 8's\n if _class == \"Pawn\":\n return [(MOVE_TYPE['forward-diagonal'], 1)]\n if _class == \"King\":\n return [(MOVE_TYPE['diagonal'], 1), (MOVE_TYPE['upacross'], 1)]\n if _class == \"Queen\":\n return [(MOVE_TYPE['diagonal'], 8), (MOVE_TYPE['upacross'], 8)]\n if _class == \"Bishop\":\n return [(MOVE_TYPE['diagonal'], 8)]\n if _class == \"Rook\":\n return [(MOVE_TYPE['upacross'], 8)]\n if _class == \"Knight\":\n return [(MOVE_TYPE['twobyone'], 2)] #2 needed to satisfy max_spaces in match()\n\n def calc_move_type(self):\n self.move_types = [self.infer_move_type(x) for x in self.moves]\n\n def calc_classes(self):\n #using pos to find piece\n #right now it uses pieces, later it may have to use board\n self.piece_classes = [self.get_piece_class(self.pieces, x) for x in self.moves]\n\n def calc_move_spaces(self):\n self.move_spaces = [self.chess_squares(self.init_pos, x) for x in self.moves]\n\n def calc_class_move_types(self):\n self.class_move_types = [self.class_movements(x) for x in self.piece_classes]\n\n \n @staticmethod\n def match(class_move_type, move_type, move_space):\n \n temp_class_move_type = [x[0] for x in class_move_type]\n\n if move_type in temp_class_move_type:\n\n max_spaces_ind = temp_class_move_type.index(move_type)\n \n max_spaces = class_move_type[max_spaces_ind][1]\n \n if move_space <= max_spaces: \n return True\n\n # need this below if max_spaces for knight is not hard-coded to 2\n # if move_type == MOVE_TYPE['twobyone']:\n # return True\n \n if (move_type == MOVE_TYPE['forward-diagonal']) \\\n and (MOVE_TYPE['diagonal'] in temp_class_move_type):\n\n max_spaces_ind = temp_class_move_type.index(MOVE_TYPE['diagonal'])\n \n max_spaces = class_move_type[max_spaces_ind][1]\n \n if move_space <= max_spaces:\n return True\n\n\n\n return False\n \n\n def calc_match(self):\n self.outcome = [self.match(self.class_move_types[i]\n ,self.move_types[i]\n ,self.move_spaces[i]\n )\n for i in range(len(self.moves))\n ]\n\n def run_calc(self):\n\n self.calc_move_spaces()\n self.calc_classes()\n self.calc_class_move_types()\n self.calc_move_type() \n\n self.calc_match()\n return any(self.outcome)\n\n\n\ndef get_possible_check_optimal(pieces, board, move, player):\n \n ''' An optimized substitute for get_possible_check() (the niave func).\n It uses the SuperKing.get_available_moves(mirror_flag = True) \n to see if any other piece possibly move in a way to caputre it.\n Then Mirror class to see if any of those threats is capable.'''\n\n #Helper func: piece_by_class_player\n player_king = [p for p in pieces if p.white == player and p.__class__.__name__ == \"King\"]\n \n player_king_pos = player_king[0].pos \n\n if move is not None: #when calling at beginning of turn\n if player_king_pos == move.pos0:\n player_king_pos = move.pos1\n\n player_king_code = 3 if player else -3\n\n hypo_king = SuperKing(b_white = player,pos = player_king_pos )\n\n opp_kill_moves = hypo_king.get_available_moves(board\n ,move_type_flag=True\n ,check_flag=False\n ,mirror_flag=True\n )\n \n if len(opp_kill_moves) == 0:\n return False #optimization, bypass next section\n\n mirror = Mirror() \n \n mirror.set_white(player)\n mirror.set_init_pos(player_king_pos)\n mirror.set_moves(opp_kill_moves)\n mirror.set_pieces(pieces)\n \n b_check = mirror.run_calc()\n\n return b_check\n\n\n\nclass Mutator():\n \n '''Helper Class for preserving board state without deepcopying.'''\n\n # Do not use this for move_code = enpassant, castling.\n # move_code = promotion should be fine b/c your new piece class is irrelevant\n\n # mutate() consists of \n # (pos0, pos0-val) - always going to be 0 as new value\n # (pos1, pos1-val) - either 0 or opponents enum\n \n def __init__(self):\n self.old_mutation = None\n self.new_mutation = None\n self.mutation_king_piece = None\n\n def mutate_board(self, board, move):\n\n '''apply new board state and save the changes to class data'''\n\n #can use methods on .data_by_player ?\n #this might need adjusting under promotion\n #no need to account for enpassant\n \n r,c = move.pos0[0], move.pos0[1]\n old_piece_enum = board.data_by_player[r][c] \n \n #mutation\n self.old_mutation = ((r,c), old_piece_enum)\n \n #set new\n board.data_by_player[r][c] = 0 #always leaving\n\n new_val = old_piece_enum #save this as well overwrite\n r,c = move.pos1[0], move.pos1[1]\n old_piece_enum = board.data_by_player[r][c]\n\n #mutation\n self.new_mutation = ((r,c), old_piece_enum)\n \n #set_new\n board.data_by_player[r][c] = new_val #use saved value from leaving square\n\n return board\n\n def demutate_board(self,board):\n \n '''pull changes from class data, and apply them to board state'''\n \n pos = self.old_mutation[0]\n r, c = pos[0], pos[1]\n val = self.old_mutation[1]\n board.data_by_player[r][c] = val\n\n pos = self.new_mutation[0]\n r, c = pos[0], pos[1]\n val = self.new_mutation[1]\n board.data_by_player[r][c] = val\n\n return board\n\n def mutate_pieces(self, pieces, player):\n \n ''' [possibly] apply spot changes to property of a piece[s] in pieces\n and save those changes to class data. '''\n \n #get_possible_check_optimal(): uses King's POS, otherwise does\n # not use pieces.\n #get_possible_check_naive(): uses opponent's pieces, \n # so you must eliminate captured piece.\n \n self.mutation_king_piece = None\n \n moving_piece_enum = self.old_mutation[1]\n if (moving_piece_enum) == 3:\n \n #TODO - py3\n player_king = list(filter(lambda p: p.white == player and p.__class__.__name__ == \"King\" , pieces))[0]\n old_pos = player_king.pos\n new_pos = self.new_mutation[0]\n\n self.mutation_king_piece = (old_pos, new_pos)\n \n player_king.pos = new_pos\n\n #TODO - have not set alive = False anywhere here\n\n return pieces\n\n def demutate_pieces(self, pieces, player):\n \n ''' [possibly] pull saved piece-change-data from class data and apply those\n to correct piece in pieces; resetting pieces to original state. '''\n \n if self.mutation_king_piece is None:\n return pieces\n else:\n #TODO - helper func\n player_king = list(filter(lambda p: p.white == player and p.__class__.__name__ == \"King\" , pieces))[0]\n old_pos = self.mutation_king_piece[0]\n player_king.pos = old_pos\n return pieces\n\n\ndef filter_check_naive(board, pieces, moves, player, b_bypass):\n\n ''' Naive method to see remove any individual _move\n that would put current player in check. Runs in\n N^2 time, where N = num_available_moves.\n b_bypass filters all non-regular move-coded moves.'''\n \n out = []\n \n for _move in moves:\n\n if b_bypass and (_move.code == MOVE_CODE['regular']):\n continue\n \n _board = copy.deepcopy(board)\n _pieces = copy.deepcopy(pieces)\n\n board2, pieces2 = apply_move(_move, _board, _pieces, player)\n\n player2 = not(player)\n\n b_check = get_possible_check(pieces2, board2, player2)\n \n if not(b_check):\n out.append(_move)\n\n return out\n\n\ndef filter_check_opt(board, pieces, moves, player, b_bypass):\n \n ''' Fully optimized filter_check(). Uses Mutator and \n get_possible_check_optimal to run in ~3.5N time instead of N^2,\n where N = num_available_moves, and is typically between 18-30.\n b_bypass filters all non-regular move-coded moves.'''\n\n out = []\n\n mutator = Mutator()\n \n for _move in moves:\n\n b_regular = (_move.code == MOVE_CODE['regular'])\n\n if b_regular:\n _board = mutator.mutate_board(board, _move)\n _pieces = mutator.mutate_pieces(pieces, player)\n else:\n if b_bypass:\n continue\n\n #Non-Standard Board/Piece Mutation\n _board = copy.deepcopy(board)\n _pieces = copy.deepcopy(pieces)\n _board, _pieces = apply_move(_move, _board, _pieces, player)\n\n b_check = get_possible_check_optimal(_pieces, _board, _move, player)\n \n if not(b_check):\n out.append(_move)\n\n if b_regular:\n board = mutator.demutate_board(_board)\n pieces = mutator.demutate_pieces(_pieces, player)\n\n return out\n\n\ndef filter_check_test_copy(board, pieces, moves, player):\n \n ''' A perf-test function: to analyze the computational cost of \n deepcopying board, pieces. It does not call apply_move or\n get_possible_check. Use this to figure out how much just copying\n costs in computation out of the whole required function. '''\n\n out = []\n \n for _move in moves:\n\n _board = copy.deepcopy(board) # .copy?\n _pieces = copy.deepcopy(pieces)\n\n #board2, pieces2 = apply_move(_move, _board, _pieces, player) \n\n out.append(_move)\n\n return out\n\ndef filter_check_test_copy_apply(board, pieces, moves, player):\n\n ''' A perf-test function: to analyze the computational cost of \n deepcopying board, pieces and running apply_move(). The only thing\n it doesn't do is call get_possible_check allowing us to interpret \n the difference between this and filter_check_naive. '''\n \n out = []\n \n for _move in moves:\n\n _board = copy.deepcopy(board) # .copy?\n _pieces = copy.deepcopy(pieces)\n\n board2, pieces2 = apply_move(_move, _board, _pieces, player) \n\n out.append(_move)\n\n return out\n\n\ndef filter_check_test_copy_apply_2(board, pieces, moves, player):\n \n ''' A perf-test function: to analyze the computational cost of \n mutating board instead of deepcopying it. Does not call any\n get_possible_check function.'''\n \n #We'll need to set this as the default and run pytest to see if\n # it's working\n\n out = []\n\n mutator = Mutator()\n \n for _move in moves:\n\n b_regular = (_move.code == MOVE_CODE['regular'])\n\n if b_regular:\n _board = mutator.mutate_board(board, _move)\n else:\n #continue #dont process these for computational testing\n _board = copy.deepcopy(board)\n \n\n _pieces = copy.deepcopy(pieces)\n\n if not(b_regular):\n board2, pieces2 = apply_move(_move, _board, _pieces, player) \n\n #Call Here: get_possible_check_optimal()\n\n if b_regular:\n board = mutator.demutate_board(board)\n \n out.append(_move)\n\n return out\n\n\ndef filter_check_test_copy_apply_3(board, pieces, moves, player):\n \n ''' A perf-test function: to analyze the computational cost of \n mutating board and pieces instead of deepcopying them. Does not\n call any get_possible_check function.'''\n \n #We'll need to set this as the default and run pytest to see if\n # it's working\n\n out = []\n\n mutator = Mutator()\n \n for _move in moves:\n\n b_regular = (_move.code == MOVE_CODE['regular'])\n\n if b_regular:\n _board = mutator.mutate_board(board, _move)\n #TODO - in other routines, account for piece.alive\n # this only includes get_possible_check, \n # also get_possible_optimal need king_pos altered\n _pieces = mutator.mutate_pieces(pieces, player)\n else:\n # continue #dont process these for computational testing\n _board = copy.deepcopy(board)\n _pieces = copy.deepcopy(pieces)\n\n\n if not(b_regular):\n board2, pieces2 = apply_move(_move, _board, _pieces, player) \n\n #Call Here: get_possible_check_optimal()\n\n if b_regular:\n board = mutator.demutate_board(_board)\n pieces = mutator.demutate_pieces(_pieces, player)\n \n out.append(_move)\n\n return out\n\n\ndef filter_check_test_copy_opt(board, pieces, moves, player):\n \n ''' A perf-test function: uses deepcopy instead of mutator, but does\n use possible_check_optimal, so it's faster than naive.'''\n \n out = []\n \n for _move in moves:\n\n #cant these just move outside the loop?\n #The problem is apply_move mutates state piece, right?\n _board = copy.deepcopy(board) \n _pieces = copy.deepcopy(pieces)\n\n board2, pieces2 = apply_move(_move, _board, _pieces, player)\n\n b_check = get_possible_check_optimal(pieces2, board2, _move, player)\n \n if not(b_check):\n out.append(_move)\n\n return out\n\n\ndef is_king_in_check(board, pieces, player):\n\n '''return a boolean for if current player is in check'''\n\n # It's ultimately O(n+1) not O(n*2) , because it's not called\n # for each available_move once but for all of them, at start of the turn.\n\n #TODO - here construct cache_pos0_king_check_calc_needed list\n # for downstream consumption by filter_king_check()\n\n # 1st, if opp_kill_move == [], then only need to check king_moves\n # 2nd , if 1st doesnt apply then check cache to see if calc_needed\n\n return get_possible_check_optimal(pieces, board, None, player)\n\n \n \n","sub_path":"basic_engine/src/TurnStage.py","file_name":"TurnStage.py","file_ext":"py","file_size_in_byte":23032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"153175348","text":"#!/usr/bin/env python\n\nimport os\nimport sys\n_i = os.path.dirname(os.path.abspath(__file__))\nsys.path.extend([os.path.abspath(os.path.join(_i, os.pardir))])\n\nprint('Python %s on %s' % (sys.version, sys.platform))\nprint('Argument List:', str(sys.argv))\n\nimport websocket\n\ntry:\n import thread\nexcept ImportError:\n import _thread as thread\n\nfrom onyx.OnyxGame import OnyxGame\nfrom onyx.pytorch.NNet import NNetWrapper as NNet\nfrom MCTS import MCTS\n\nimport numpy as np\nfrom utils import *\nimport time\n\nwebsocket.enableTrace(False)\n\n\nclass GameClient:\n def __init__(self, url):\n self.ws = websocket.WebSocketApp(url,\n on_message=self.on_message,\n on_error=self.on_error,\n on_close=self.on_close)\n self.ws.on_open = self.on_open\n self.turn = False\n self.started = False\n\n self.game = OnyxGame()\n self.color = 1\n self.board = self.game.getInitBoard()\n\n n = NNet(self.game)\n n.load_checkpoint('./pretrained_models/onyx/pytorch/', '6x6_120.pth.tar')\n args = dotdict({'numMCTSSims': 50, 'cpuct': 1.0})\n mcts = MCTS(self.game, n, args)\n self.ai_player = lambda x: np.argmax(mcts.getActionProb(x, temp=0))\n\n def on_message(self, message):\n # COMMAND\n if message[0] == '$':\n if message[1:] == \"AWAITING\":\n self.turn = False\n print(\"En attente de l'autre joueur\")\n if message[1:] == \"START\":\n self.started = True\n if message[1:] == \"READY\":\n self.turn = True\n if message[1:] == \"WIN\":\n print(\"Vous avez gagné !\")\n if message[1:] == \"LOOSE\":\n print(\"Vous avez perdu !\")\n if message[1:] == \"DRAW\":\n print(\"Match nul !\")\n self.ws.close()\n if message[1:] == \"END\":\n print(\"Partie terminée.\")\n self.ws.close()\n # ERROR\n if message[0] == '!':\n print(\"Erreur : \", message[1:])\n\n # INFO\n if message[0] == '#':\n if \"OPPONENT\" in message[1:]:\n print(\"L'aversaire à joué : \", message.split(\" \")[1])\n action = self.game.convert_action_to_int(self.board, message.split(\" \")[1])\n x, y = self.game.convert_action_to_coord(self.board, action)\n self.board[y, x] = self.color * -1\n elif message[1:] == \"Room created\":\n print(\"Partie créée\")\n self.color = -1\n elif message[1:] == \"Room joined\":\n print(\"Partie rejointe\")\n self.color = 1\n else:\n print(\"Information : \", message[1:])\n\n # RESULT\n if message[0] == '=':\n self.turn = False\n print(\"Capturé(s) : \", message[1:].split(\" \"))\n for captured in message[1:].split(\" \"):\n action = self.game.convert_action_to_int(self.board, captured)\n x, y = self.game.convert_action_to_coord(self.board, action)\n self.board[y, x] = 0\n\n if self.started and self.turn:\n self.play()\n\n def on_error(self, error):\n print(error)\n\n def on_close(self):\n print(\"### closed ###\")\n\n def on_open(self):\n def run(*args):\n print(\"Connecté.\")\n\n thread.start_new_thread(run, ())\n\n def run(self):\n self.ws.run_forever()\n\n def play(self):\n time.sleep(0.2)\n action = self.ai_player(self.game.getCanonicalForm(self.board, self.color))\n coord = self.game.convert_action_to_str(self.board, action)\n self.board, next_player = self.game.getNextState(self.board, self.color, action)\n print(\"put \" + str(action) + \" for player \" + str(self.color) + \"\\nboard : \" + str(\n self.game.base_board.with_np_pieces(self.board).string_test_js()) + \"\\n\")\n self.ws.send(coord)\n\n\nclient = GameClient(\"ws://localhost:8889/room/\" + str(sys.argv[1]))\nclient.run()\n","sub_path":"onyx/ai_player.py","file_name":"ai_player.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"649783414","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 26 13:26:41 2020\n\n@author: Oliver\n\"\"\"\nfrom dicompylercore import dicomparser\nfrom PIL import Image\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pydicom\nimport scipy.ndimage\nimport collections\nimport random\nimport decimal\nimport math\n\nfrom dicompylercore import util\nfrom numbers import Number\n\nimport Data_Dictionary\nimport Transformation\nfrom time import gmtime\nimport time\n\n\n########################## OUTPUT FILE PATH OF INDIVIDUAL PATIENT##############################\ndef getOutputPath(structureFile):\n slashcount=0\n output_path =\"\"\n for i, c in enumerate(structureFile):\n if(c =='\\\\'or c =='/'):\n slashcount+=1\n \n if(slashcount== 2):\n output_path =structureFile[:i+2]\n return output_path\n\ndef getImagePath(structureFile):\n slashcount=0\n output_path =\"\"\n for i, c in enumerate(structureFile):\n if(c =='\\\\'or c =='/'):\n slashcount+=1\n \n if(slashcount== 3):\n output_path =structureFile[:i+2]\n return output_path\n########################## LOAD FILES FOR PREPROCESSING ##############################\ndef load_Saved_Data(pathIndex,Organ,structureFiles,PrintInfo=False):\n ##Load\n output_path = getOutputPath(structureFiles[pathIndex]) \n imagesFolders = np.load('RT Simulation CT Image Folder Paths.npy')\n \n #Added this line so that the correct contour is loaded\n k = pathIndex\n print(getImagePath(structureFiles[pathIndex]))\n while(getImagePath(structureFiles[pathIndex]) != getImagePath(imagesFolders[k])):\n k +=1 #match the right contour with the right image\n print(getImagePath(imagesFolders[k]))\n ##CT Slices\n DicomImageSet = os.listdir(imagesFolders[k])\n \n #Output Checks\n if(PrintInfo): print(\"\\n\"+output_path)\n #print(structureFiles[pathIndex])\n #print(imagesFolders[k])\n Organ_Data = []\n ##Load Parotids Contours\n if (Organ.find(\"Right_Parotid\")!=-1):\n Organ_Data = np.load(output_path+'Right_Contour_Parotids.npy', allow_pickle=True)\n elif (Organ.find(\"Left_Parotid\")!=-1):\n Organ_Data = np.load(output_path+'Left_Contour_Parotids.npy', allow_pickle=True)\n elif (Organ.find(\"Brainstem\")!=-1):\n Organ_Data = np.load(output_path+'Brainstem_Contour.npy', allow_pickle=True)\n elif (Organ.find(\"Right_Cochlea\")!=-1):\n Organ_Data = np.load(output_path+'Right_Contour_Cochleas.npy', allow_pickle=True)\n elif (Organ.find(\"Left_Cochlea\")!=-1):\n Organ_Data = np.load(output_path+'Left_Contour_Cochleas.npy', allow_pickle=True)\n return imagesFolders,k,DicomImageSet,Organ_Data\n\ndef get_First_Slice_Height(pathIndex,structureFiles):\n imagesFolders_Brainstem,imageFolderIndex_Brainstem,DicomImageSet_Brainstem,Brainstem_Data = load_Saved_Data(pathIndex,\"Brainstem\",structureFiles)\n zValues = []\n maxValue = -100000\n for z in range(0,len(DicomImageSet_Brainstem)-1): \n ds_Brainstem = pydicom.read_file(imagesFolders_Brainstem[imageFolderIndex_Brainstem]+DicomImageSet_Brainstem[z])\n for i in range(0,len(Brainstem_Data)-1): \n for j in range(0,len(Brainstem_Data[i])-1):\n num = str(ds_Brainstem.SliceLocation)\n num = decimal.Decimal(num)\n num = abs(num.as_tuple().exponent)\n loc = round(Brainstem_Data[i][j][2],num) \n \n if (loc == ds_Brainstem.SliceLocation or Brainstem_Data[i][j][2] == ds_Brainstem.ImagePositionPatient[2]):\n zValues.append(ds_Brainstem.SliceLocation)\n if (Brainstem_Data[i][j][2]>maxValue):\n maxValue =Brainstem_Data[i][j][2]\n return maxValue,zValues\n########################## MANUAL EXTRACTION ##############################\ndef GetDefaultImageWindowLevel(dcm,rescaled_image,intercept,slope, window=0,level=0): \n \n \n if ('WindowWidth' in dcm.ds) and ('WindowCenter' in dcm.ds):\n if isinstance(dcm.ds.WindowWidth, float):\n window = dcm.ds.WindowWidth\n elif isinstance(dcm.ds.WindowWidth, str):\n try:\n window = dcm.ds.WindowWidth\n except:\n print(\"image conversion error\")\n elif isinstance(dcm.ds.WindowWidth, list):\n if (len(dcm.ds.WindowWidth) > 1):\n window = dcm.ds.WindowWidth[1]\n \n if isinstance(dcm.ds.WindowCenter, float):\n level = dcm.ds.WindowCenter\n elif isinstance(dcm.ds.WindowCenter, str):\n try:\n level = dcm.ds.WindowCenter\n except:\n print(\"image conversion error\")\n elif isinstance(dcm.ds.WindowCenter, list):\n if (len(dcm.ds.WindowCenter) > 1):\n level = dcm.ds.WindowCenter[1] \n \n if ((window, level) == (0, 0)):\n wmax = 0\n wmin = 0\n if (rescaled_image.max() > wmax):\n wmax = rescaled_image.max()\n if (rescaled_image.min() < wmin):\n wmin = rescaled_image.min()\n # Default window is the range of the data array\n window = int(wmax - wmin)\n # Default level is the range midpoint minus the window minimum\n level = int(window / 2 - abs(wmin))\n return window, level\n\ndef GetLUTValue(data, window, level):\n \"\"\"Apply the RGB Look-Up Table for the data and window/level value.\"\"\"\n\n lutvalue = util.piecewise(data,\n [data <= (level - 0.5 - (window - 1) / 2),\n data > (level - 0.5 + (window - 1) / 2)],\n [0, 255, lambda data:\n ((data - (level - 0.5)) / (window-1) + 0.5) *\n (255 - 0)])\n # Convert the resultant array to an unsigned 8-bit array to create\n # an 8-bit grayscale LUT since the range is only from 0 to 255\n return np.array(lutvalue, dtype=np.uint8)\n\ndef manualextration(ds,dcm,pixel_array,window,level):\n #rescale\n intercept, slope = 0, 1\n if ('RescaleIntercept' in ds and 'RescaleSlope' in ds):\n intercept = ds.RescaleIntercept if \\\n isinstance(ds.RescaleIntercept, Number) else 0\n slope = ds.RescaleSlope if \\\n isinstance(ds.RescaleSlope, Number) else 1 \n rescaled_image = pixel_array * slope + intercept\n \n #finish\n if(window == 0 and level == 0 ):\n window, level = GetDefaultImageWindowLevel(dcm, rescaled_image,intercept,slope)\n image = GetLUTValue(rescaled_image, window, level) ##converted to uint8\n #im = Image.fromarray(image).convert('L')\n \n return image\n\n\n########################## OBTAIN CONTOURED CT IMAGES ##############################\ndef get_contoured_organ(pathIndex,Organ,key_Dict,no_Classes,structureFiles,flip = 0):\n ##Load Data\n imagesFolders,imageFolderIndex,DicomImageSet,Organ_Data = load_Saved_Data(pathIndex,Organ,structureFiles)\n TotalImageDictionary = {}\n maxValue, zValues = get_First_Slice_Height(pathIndex,structureFiles)\n DicomPatient,thickness = get_Patient(pathIndex,Organ,structureFiles)\n\n #Dimensions of Image\n imageDimensions = np.load(\"D:\\HNSCC/ImageDimensions.npy\", allow_pickle=True)\n #set orgin\n ds = pydicom.read_file(imagesFolders[imageFolderIndex]+DicomImageSet[0])\n\n #Cropping Image\n if(key_Dict.find(\"Uncropped\")!=-1):\n height = 512 \n width = 512 \n else:\n padding =0\n height = imageDimensions[3]- imageDimensions[2]\n width = imageDimensions[1]- imageDimensions[0]\n \n if(Organ.find(\"Shift\")!=-1): \n x_Translation= random.randint(5,20)\n x_Translation= x_Translation*(-1)**random.randint(1,2)\n \n y_Translation= random.randint(5,20)\n y_Translation= y_Translation*(-1)**random.randint(1,2)\n \n zlowlimit = int(10/thickness)\n zhighlimit = int(30/thickness)\n zshift =random.randint(zlowlimit,zhighlimit)\n zshift= zshift*(-1)**random.randint(1,2)\n zshift *= thickness\n else:\n zshift = 0\n if(Organ.find(\"Bright\")!=-1):\n xbright =random.randint(0,width)\n ybright =random.randint(0,height)\n isOrgan = False \n #Image manipulation \n for z in range(0,len(DicomImageSet)-1): \n #Get CT Image and info\n ds = pydicom.read_file(imagesFolders[imageFolderIndex]+DicomImageSet[z])\n dcm = dicomparser.DicomParser(imagesFolders[imageFolderIndex]+DicomImageSet[z])\n #Only uses CT Images below the max height of the brainstem\n if(ds.ImagePositionPatient[2]requiredNoHigh):\n OrderedImagesArray.pop()\n while(len(OrderedImagesArray) > requiredNo and len(OrderedImagesArray) !=requiredNoHigh):\n OrderedImagesArray.pop()\n \n print(\"Required:%2i\"%requiredNo)\n else:\n if(len(keys)!=0):\n top = keys[-1]\n #Extract only the relevant slices \n for key, value in OrderedImagesDictionary.items():\n if (value[1] == 1):\n ImageArray =value[0] \n OrderedImagesArray.append(ImageArray) \n check=True\n \n #Get the middle of the organ\n numberOfSlices = len(OrderedImagesArray)\n middleOrganIndex = round(numberOfSlices/2)\n \n #Makes sure outputs are homogenous in dimensions\n if (key_Dict.find(\"11\")!=-1):\n top=middleOrganIndex+6\n bottom =middleOrganIndex-5\n else:\n top=middleOrganIndex+2\n bottom =middleOrganIndex-1\n requiredNo = top-bottom\n OrderedImagesArray = OrderedImagesArray[bottom:top]\n\n #Automate Labelling process\n if(key_Dict.find(\"Aug\")!=-1):\n Organs = [\"Right_Parotid\",\"Right_Parotid_Shift\",\"Right_Parotid_Aug\",\"Right_Parotid_Translate\"]\n elif(key_Dict.find(\"Bright\")!=-1):\n Organs = [\"Right_Parotid_Bright\",\"Left_Parotid_Bright\",\"Brainstem_Bright\"]\n else:\n Organs = [\"Right_Parotid\",\"Left_Parotid\",\"Brainstem\",\"Right_Cochlea\",\"Left_Cochlea\"]\n label = []\n \n for i in range(no_Classes):\n label.append(0)\n if (Organ==Organs[i]):\n label[i] =1\n \n label = np.array(label)\n \n numberOfSlices = len(OrderedImagesArray) \n if(numberOfSlices slices[0].SliceThickness and thickness-0.250):\n temp-=0.25\n count+=1\n check = count*0.25\n\n if(thickness-check>0.125):\n thickness =check+0.25\n elif(thickness-check<-0.125):\n thickness =check-0.25\n else:\n thickness =check\n \n if(thickness%0.25):\n thickness = math.trunc(thickness)\n \n if(PrintInfo): print(\"Thickness: %3.3f\"%thickness)\n return DicomPatient,thickness\ndef interpolateArray(pathIndex,Organ,key_Dict,no_Classes,structureFiles):\n \n try:\n DicomPatient,thickness =get_Patient(pathIndex,Organ,structureFiles)\n except:\n print(\"Error when obtaining patient\")\n tempArray, tempLabel =get_contoured_organ(pathIndex,Organ,key_Dict,no_Classes,structureFiles)\n if (tempLabel!=\"False\"):\n tempArray = np.array(tempArray, \"uint8\")\n \n \"\"\"\n # Determine current pixel spacing\n new_spacing=[1,1,1]\n spacing_list = [thickness]\n spacing_list.extend(DicomPatient.PixelSpacing)\n spacing = np.array(spacing_list, dtype=np.float32)\n \n #Calculate resize factor for interpolation\n resize_factor = spacing / new_spacing\n new_real_shape = tempArray[...,0].shape * resize_factor\n new_shape = np.round(new_real_shape)\n real_resize_factor = new_shape / tempArray[...,0].shape\n new_spacing = spacing / real_resize_factor\n \"\"\"\n if(key_Dict.find(\"Mask\")!=-1):\n outputArray= scipy.ndimage.interpolation.zoom(tempArray,(thickness/3,1,1) , order=0, mode='nearest')\n else:\n #Interpolation of the medical image real_resize_factor\n newArray = []\n newArray = scipy.ndimage.interpolation.zoom(tempArray[...,0],(thickness/3,1,1) , order=0, mode='nearest')\n shape = [s for s in np.array(newArray).shape]\n shape.append(2)\n \n outputArray = np.zeros(shape, \"uint8\")\n outputArray[...,0] = newArray\n \n newArray = scipy.ndimage.interpolation.zoom(tempArray[...,1], (thickness/3,1,1), order=0, mode='nearest')\n outputArray[...,1] = newArray\n \n return np.array(outputArray, \"uint8\"),tempLabel\n else:\n return tempArray, tempLabel\n########################## IMAGE PREPROCESSING ##########################\ndef image_preprocessing_2d(start, end,key_Dict,no_Classes,structureFiles):\n #Initialise arrays\n Organs = [\"Right_Parotid\",\"Left_Parotid\",\"Brainstem\",\"Right_Cochlea\",\"Left_Cochlea\"] \n neuralNetArray = []\n countDict = {}\n for organ in Organs:\n countDict[organ] = 0\n \n #Call Preprocessing Functions\n for pathIndex in range(start,end):\n for j in range(no_Classes):\n tempArray, tempLabel =get_contoured_organ(pathIndex,Organs[j],key_Dict,no_Classes,structureFiles)\n if(tempLabel!=\"False\"):\n for i in range(0,len(tempArray)):\n neuralNetArray.append([tempArray[i],tempLabel])\n countDict[Organs[j]] +=1\n print(np.array(tempArray).shape) \n \n print(\"Pre Shuffle\") \n random.shuffle (neuralNetArray)\n print(\"Processing Complete\")\n \n organ_count =\"\"\n for organ in Organs:\n organ_count += organ+\": %2i, \"%countDict[organ] \n print(organ_count)\n \n return neuralNetArray,organ_count\ndef image_preprocessing_3d(filename, start, end,key_Dict,no_Classes,structureFiles): \n #Initialise arrays\n if(key_Dict.find(\"Aug\")!=-1):\n Organs = [\"Right_Parotid\",\"Right_Parotid_Shift\",\"Right_Parotid_Aug\",\"Right_Parotid_Translate\"]\n elif(key_Dict.find(\"Bright\")!=-1):\n Organs = [\"Right_Parotid_Bright\",\"Left_Parotid_Bright\",\"Brainstem_Bright\"]\n else:\n Organs = [\"Right_Parotid\",\"Left_Parotid\",\"Brainstem\",\"Right_Cochlea\",\"Left_Cochlea\"]\n \n countDict = {}\n directories =[]\n for organ in Organs:\n countDict[organ] = 0\n if(key_Dict.find(\"Uncropped\")!=-1):\n requiredNo = 58\n else:\n requiredNo = 45\n #Call Preprocessing Functions\n for pathIndex in range(start,end):\n for j in range(no_Classes):\n DicomPatient,thickness = get_Patient(pathIndex,Organs[j],structureFiles,True)\n print(thickness)\n if(thickness== 3):\n tempArray, tempLabel =get_contoured_organ(pathIndex,Organs[j],key_Dict,no_Classes,structureFiles)\n else:\n tempArray, tempLabel =interpolateArray(pathIndex,Organs[j],key_Dict,no_Classes,structureFiles)\n \n if(tempLabel!=\"False\"and len(tempArray) ==requiredNo):\n patient_name = structureFiles[pathIndex][9:22]\n directory_Features,directory_Labels = saveArray_3d( filename,tempArray,tempLabel,Organs[j],key_Dict,patient_name)\n directories.append([directory_Features,directory_Labels])\n countDict[Organs[j]] +=1\n else:\n print(\"Skipped: Only has %2i slices\"%len(tempArray))\n \n print(\"Pre Shuffle\") \n random.shuffle (directories)\n print(\"Processing Complete\")\n \n organ_count =\"\"\n for organ in Organs:\n organ_count += organ+\": %2i, \"%countDict[organ] \n print(organ_count)\n \n \n TrainingFeaturesDict,TrainingLabelsDict = Data_Dictionary.get_Training_Dictionary()\n TestingFeaturesDict,TestingLabelsDict = Data_Dictionary.get_Testing_Dictionary()\n \n featuresDirect = []\n labelsDirect = []\n \n for feature, label in directories:\n featuresDirect.append(feature)\n labelsDirect.append(label)\n \n current_Time = time.strftime(\"%a, %d %b %Y %I:%M:%S %p %Z\",time.gmtime())\n if(filename==\"Training\"):\n stringTrainingDirectory = \"D:/Image Preprocessing/Training\"\n f = open(stringTrainingDirectory+\"/TrainingCount.txt\", \"a\")\n \n np.save(TrainingFeaturesDict[key_Dict]+\"Patient Directories\",featuresDirect)\n np.save(TrainingLabelsDict[key_Dict]+\"Patient Directories\",labelsDirect)\n elif(filename==\"Testing\"):\n stringTestingDirectory = \"D:/Image Preprocessing/Testing\"\n f = open(stringTestingDirectory+\"/TestingCount.txt\", \"a\")\n \n np.save(TestingFeaturesDict[key_Dict]+\"Patient Directories\",featuresDirect)\n np.save(TestingLabelsDict[key_Dict]+\"Patient Directories\",labelsDirect)\n f.write(current_Time+\", \"+key_Dict+\", \"+organ_count+\"\\n\")\n f.close()\n\n print(\"SUCCESSS!\")\n \n########################## OBTAIN PAROTID CT IMAGES #############################\n\ndef isolate_Brainstem_Images(structureFiles,pathIndex,k):\n ds = pydicom.dcmread(structureFiles[pathIndex])\n imagesFolder = np.load('RT Simulation CT Image Folder Paths.npy')\n arrayFiles = os.listdir(imagesFolder[k]) \n\n #Finds correct contour id\n referenceNumbersFound =[]\n\n for i in range(0,len(ds.StructureSetROISequence)):\n #Gets name of Structure\n contourName = ds.StructureSetROISequence[i].ROIName\n contourName = contourName.lower()\n\n if((contourName.find(\"brainstem\")!=-1 or contourName.find(\"brain stem\")!=-1 )and contourName.find(\"ex\")==-1 \n and contourName.find(\"2\")==-1 and contourName.find(\"cm\")==-1 and contourName.find(\"mm\")==-1 and contourName.find(\"pv\")==-1):\n #parotids references structure\n referenceNumbersFound.append(ds.StructureSetROISequence[i].ROINumber) \n\n\n #Extracts Parotid CT scans from data set according to found contour ID\n ParotidCTImageFiles = []\n \n for i in range(0,len(ds.ROIContourSequence)):\n for referenceNumber in referenceNumbersFound:\n\n if (ds.ROIContourSequence[i].ReferencedROINumber == referenceNumber):\n contourSequences = ds.ROIContourSequence[i].ContourSequence\n\n for contourSequence in contourSequences:\n contourslice = contourSequence.ContourImageSequence[0].ReferencedSOPInstanceUID\n\n #Search through image files \n for z in range(0,len(arrayFiles)):\n sliceDataset = pydicom.dcmread(imagesFolder[k]+arrayFiles[z])\n\n if (sliceDataset.SOPInstanceUID ==contourslice):\n \n ParotidCTImageFiles.append(arrayFiles[z])\n \n return list(ParotidCTImageFiles)\n########################## DETECT IMAGE SIZE FOR PREPROCESSING ##############################\ndef save_Image_Dimensions(structureFiles,Organs =[\"Brainstem\"]):\n #Load files\n for Organ in Organs:\n print(Organ)\n for pathIndex in range(0,len(structureFiles)): \n #Set maximal/minimal values\n minimumExternalIndex1 = 10000000\n maximumExternalIndex1 = -10000000\n minimumExternalIndex2 = 10000000\n maximumExternalIndex2 = -10000000\n \n #Load in Files:\n output_path = getOutputPath(structureFiles[pathIndex])\n imagesFolders,imageFolderIndex,DicomImageSet,Organ_Data = load_Saved_Data(pathIndex,Organ,structureFiles) \n ds = pydicom.read_file(imagesFolders[imageFolderIndex]+DicomImageSet[0])\n \n Organ_Data = np.load(output_path+\"External_Boundary_Contour.npy\", allow_pickle=True)\n if(len(Organ_Data) == 0):\n print(\"Loading Ring Contour\")\n Organ_Data = np.load(output_path+\"Extended_Ring_Boundary_Contour.npy\", allow_pickle=True)\n\n #Center_Data = np.load(output_path+\"Isocenter_Contour.npy\", allow_pickle=True)\n try:\n DicomImageSet = isolate_Brainstem_Images(structureFiles,pathIndex,imageFolderIndex)\n \n #Search through found datasets\n for z in range(0,len(DicomImageSet)-1):\n ds = pydicom.read_file(imagesFolders[imageFolderIndex]+DicomImageSet[z])\n dcm = dicomparser.DicomParser(imagesFolders[imageFolderIndex]+DicomImageSet[z])\n \n image = dcm.GetImage(300,40)\n image = image.convert(mode ='RGB')\n ImageArray = np.asarray(image)\n ImageArray=ImageArray.copy()\n ImageArray.flags.writeable = 1 \n for i in range(0,len(Organ_Data)-1): \n for j in range(0,len(Organ_Data[i])-1):\n \n if (Organ_Data[i][j][2] == ds.ImagePositionPatient[2]):\n #Get Relevant Coordinates\n ExternalIndex2 = int((Organ_Data[i][j][0]-ds.ImagePositionPatient[0])/ds.PixelSpacing[0])\n ExternalIndex1 = int((Organ_Data[i][j][1]-ds.ImagePositionPatient[1])/ds.PixelSpacing[1])\n \n #Update maximal/minimal values\n if(ExternalIndex1maximumExternalIndex1):\n maximumExternalIndex1 = ExternalIndex1\n \n if(ExternalIndex2maximumExternalIndex2):\n maximumExternalIndex2 = ExternalIndex2\n \n except:\n print(\"Error at index %2i\"%pathIndex) \n \n #Save Values\n dimensions = [minimumExternalIndex1,maximumExternalIndex1,minimumExternalIndex2,maximumExternalIndex2]\n print(\"Centre Orgin: Left: %2.3f, Right: %2.3f, Bottom: %2.3f, Top: %2.3f\"%(dimensions[0],dimensions[1],dimensions[2],dimensions[3]))\n np.save(output_path+\"ImageDimensions_\"+Organ, dimensions) \n \n save_Final_Image_Dimensions(structureFiles,Organs)\ndef save_Final_Image_Dimensions(structureFiles,Organs):\n #Load files\n minimumExternalIndex1 = 10000000\n maximumExternalIndex1 = -10000000\n minimumExternalIndex2 = 10000000\n maximumExternalIndex2 = -10000000\n for Organ in Organs:\n for pathIndex in range(0,len(structureFiles)): \n #Dimensions of Image\n output_path = getOutputPath(structureFiles[pathIndex])\n print(output_path)\n imageDimensions = np.load(output_path+\"ImageDimensions_\"+Organ+\".npy\", allow_pickle=True)\n \n if(imageDimensions[0]maximumExternalIndex1):\n maximumExternalIndex1 = imageDimensions[1]\n if(imageDimensions[2]maximumExternalIndex2):\n maximumExternalIndex2 = imageDimensions[3]\n \n print(imageDimensions)\n \n dimensions = [minimumExternalIndex1,maximumExternalIndex1,minimumExternalIndex2,maximumExternalIndex2] \n print(\"Left: %2.3f, Right: %2.3f, Bottom: %2.3f, Top: %2.3f\"%(dimensions[0],dimensions[1],dimensions[2],dimensions[3]))\n np.save(\"D:\\HNSCC/ImageDimensions\", dimensions)","sub_path":"Image_Preprocessing.py","file_name":"Image_Preprocessing.py","file_ext":"py","file_size_in_byte":36167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"340063596","text":"#! /usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom lxml import html\r\nimport requests\r\n\r\nAPIKEY = 'VLJn8VU0yBV4yf18zXbs' # 이 곳에 네이버 API 키를 입력 (http://developer.naver.com/wiki/pages/OpenAPI)\r\nMAPAPI = 'http://openapi.map.naver.com/api/geocode.php?key=%s&encoding=utf-8&coord=LatLng&query=%s'\r\n\r\n\r\ndef get_latlon(query):\r\n root = html.parse(MAPAPI % (APIKEY, query))\r\n lon, lat = root.xpath('//point/x/text()')[0], root.xpath('//point/y/text()')[0]\r\n return (lat, lon)\r\n\r\n\r\ndef prep(item):\r\n n, name = item[0].split(' ', 1)\r\n lat, lon = get_latlon(item[3])\r\n return {\r\n 'num': n, 'name': name,\r\n 'lat': lat, 'lon': lon,\r\n 'description': item[1],\r\n 'phone': item[2],\r\n 'addr': item[3]\r\n }\r\n\r\n\r\n# get data from article\r\nr = requests.get('http://m.wikitree.co.kr/main/news_view.php?id=217101')\r\nroot = html.document_fromstring(r.text)\r\nstring = '\\n'.join(root.xpath('//div[@id=\"ct_size\"]/div//text()'))\r\n\r\nitems = []\r\nfor i in range(1, 21):\r\n tmp = string.split('%s.' % i, 1)\r\n string = tmp[1]\r\n items.append([j.strip() for j in tmp[0].split('\\n') if j and j != '\\xa0'])\r\n\r\ndata = [prep(i[:4]) for i in items[1:]]\r\n\r\n# save data to file\r\nwith open('places.csv', 'w') as f:\r\n f.write('name,lat,lon\\n')\r\n for d in data:\r\n f.write('%(name)s,%(lat)s,%(lon)s\\n' % d)\r\n","sub_path":"01.gourmetMap/egTest.py","file_name":"egTest.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"314130057","text":"#import socket module\nfrom socket import *\n\nHOST = '127.0.0.1'\nPORT = 12345\n\n#Create socket\nserverSocket = socket(AF_INET, SOCK_STREAM)\n\n#Try to bind socket to a port\ntry:\n serverSocket.bind((HOST, PORT))\n serverSocket.listen(5)\nexcept Exception as msg:\n print('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])\n sys.exit()\n \nwhile True:\n #Establish the connection\n print('Ready to serve...')\n \n connectionSocket, addr = serverSocket.accept()\n \n try:\n message = connectionSocket.recv(1024)\n \n #Get the filename\n filename = message.split()[1]\n \n #Open file\n f = open(filename[1:]).read()\n \n #Send one HTTP header line into socket\n outputdata = 'HTTP/1.1 200 OK\\r\\n'\n outputdata += \"Content-Type: text/html\\r\\n\\r\\n\"\n \n outputdata += f\n\n #Send the content of the requested file to the client\n for i in range(0, len(outputdata)):\n connectionSocket.send(outputdata[i].encode('utf-8'))\n \n #close client socket\n connectionSocket.close()\n \n except IOError:\n \n #Send response message for file not found\n outputdata = 'HTTP/1.1 404 Not Found\\r\\n'\n outputdata += \"Content-Type: text/html\\r\\n\\r\\n\"\n \n #Send the content of the requested file to the client\n for i in range(0, len(outputdata)):\n connectionSocket.send(outputdata[i].encode('utf-8'))\n \n #Close client socket\n connectionSocket.close()\n\n#Close server socket\nserverSocket.close()\n\n \n","sub_path":"WebServer/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"601272667","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n#Suggest use to execute this file command line : python2 __init__.py\n#Or execute normal switch function dump to dumps\n\nfrom b2w import B2W\nimport json\n\ncodes = B2W.Get_Prod_Codes_By_Cat(\"https://www.submarino.com.br/categoria/informatica/tablet-e-ipad/tablet\", 5)\nprods = B2W.Get_Properties_By_Cods(codes)\n\nfile = open(\"products_sub.txt\", \"w\")\n\n\nfor i in prods :\n json.dump(i, file, ensure_ascii=False, indent=4)\n file.write(\"\\n\\n\")\n\nfile.close()\n","sub_path":"1-Anotacao/ws_tablet/submarino/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"462393957","text":"\"\"\"Module to test the local module.\r\n\r\n\"\"\"\r\nimport unittest\r\nimport numpy as np\r\nfrom polymers import physics\r\nfrom ..test import Parameters\r\n\r\nparameters = Parameters()\r\nLENNARDJONESFJC = physics.single_chain.ufjc.lennard_jones.thermodynamics.\\\r\n isometric.asymptotic.LENNARDJONESFJC\r\n\r\n\r\nclass Base(unittest.TestCase):\r\n \"\"\"Class for basic tests.\r\n\r\n \"\"\"\r\n def test_init(self):\r\n \"\"\"Function to test instantiation.\r\n\r\n \"\"\"\r\n for _ in range(parameters.number_of_loops):\r\n _ = LENNARDJONESFJC(\r\n parameters.number_of_links_minimum,\r\n parameters.link_length_reference,\r\n parameters.hinge_mass_reference,\r\n parameters.link_stiffness_reference\r\n )\r\n\r\n def test_number_of_links(self):\r\n \"\"\"Function to test the number of links during instantiation.\r\n\r\n \"\"\"\r\n for _ in range(parameters.number_of_loops):\r\n number_of_links = \\\r\n np.random.randint(\r\n parameters.number_of_links_minimum,\r\n high=parameters.number_of_links_maximum\r\n )\r\n self.assertEqual(\r\n number_of_links,\r\n LENNARDJONESFJC(\r\n number_of_links,\r\n parameters.link_length_reference,\r\n parameters.hinge_mass_reference,\r\n parameters.link_stiffness_reference\r\n ).number_of_links\r\n )\r\n\r\n def test_link_length(self):\r\n \"\"\"Function to test the link length during instantiation.\r\n\r\n \"\"\"\r\n for _ in range(parameters.number_of_loops):\r\n link_length = \\\r\n parameters.link_length_reference + \\\r\n parameters.link_length_scale*(0.5 - np.random.rand())\r\n self.assertEqual(\r\n link_length,\r\n LENNARDJONESFJC(\r\n parameters.number_of_links_minimum,\r\n link_length,\r\n parameters.hinge_mass_reference,\r\n parameters.link_stiffness_reference\r\n ).link_length\r\n )\r\n\r\n def test_hinge_mass(self):\r\n \"\"\"Function to test the hinge mass during instantiation.\r\n\r\n \"\"\"\r\n for _ in range(parameters.number_of_loops):\r\n hinge_mass = \\\r\n parameters.hinge_mass_reference + \\\r\n parameters.hinge_mass_scale*(0.5 - np.random.rand())\r\n self.assertEqual(\r\n hinge_mass,\r\n LENNARDJONESFJC(\r\n parameters.number_of_links_minimum,\r\n parameters.link_length_reference,\r\n hinge_mass,\r\n parameters.link_stiffness_reference\r\n ).hinge_mass\r\n )\r\n\r\n def test_link_stiffness(self):\r\n \"\"\"Function to test the link stiffness during instantiation.\r\n\r\n \"\"\"\r\n for _ in range(parameters.number_of_loops):\r\n link_stiffness = \\\r\n parameters.link_stiffness_reference + \\\r\n parameters.link_stiffness_scale*(0.5 - np.random.rand())\r\n self.assertEqual(\r\n link_stiffness,\r\n LENNARDJONESFJC(\r\n parameters.number_of_links_minimum,\r\n parameters.link_length_reference,\r\n parameters.hinge_mass_reference,\r\n link_stiffness\r\n ).link_stiffness\r\n )\r\n\r\n def test_all_parameters(self):\r\n \"\"\"Function to test all parameters during instantiation.\r\n\r\n \"\"\"\r\n for _ in range(parameters.number_of_loops):\r\n number_of_links = \\\r\n np.random.randint(\r\n parameters.number_of_links_minimum,\r\n high=parameters.number_of_links_maximum\r\n )\r\n link_length = \\\r\n parameters.link_length_reference + \\\r\n parameters.link_length_scale*(0.5 - np.random.rand())\r\n hinge_mass = \\\r\n parameters.hinge_mass_reference + \\\r\n parameters.hinge_mass_scale*(0.5 - np.random.rand())\r\n link_stiffness = \\\r\n parameters.link_stiffness_reference + \\\r\n parameters.link_stiffness_scale*(0.5 - np.random.rand())\r\n model = LENNARDJONESFJC(\r\n number_of_links,\r\n link_length,\r\n hinge_mass,\r\n link_stiffness\r\n )\r\n self.assertEqual(\r\n number_of_links,\r\n model.number_of_links\r\n )\r\n self.assertEqual(\r\n link_length,\r\n model.link_length\r\n )\r\n self.assertEqual(\r\n hinge_mass,\r\n model.hinge_mass\r\n )\r\n self.assertEqual(\r\n link_stiffness,\r\n model.link_stiffness\r\n )\r\n","sub_path":"src/physics/single_chain/ufjc/lennard_jones/thermodynamics/isometric/asymptotic/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"557926399","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport csv\nimport time\n\nfrom datetime import date\nfrom multiprocessing import Pool\n\n\nimport requests\n\n\ndef Crawler(targets):\n '''Request to Market Information System'''\n # def __init__(self, targets,timestamp):\n try:\n timestamp = int(time.time() * 1000 + 1000000)\n endpoint = 'http://mis.twse.com.tw/stock/api/getStockInfo.jsp'\n channels = 'tse_{}.tw'.format(targets)\n s = '{}?ex_ch={}&json=1&delay=0&_={}'.format(endpoint, channels, timestamp)\n\n req = requests.session()\n req.get('http://mis.twse.com.tw/stock/index.jsp',\n headers={'Accept-Language': 'zh-TW'})\n\n response = req.get(s)\n content = json.loads(response.text)\n except Exception as err:\n print(err)\n data = []\n else:\n data = content['msgArray']\n return data\n\ndef Recorder(data):\n '''Record data to csv'''\n\n folder_path = '{}/{}'.format('data', date.today().strftime('%Y%m%d'))\n\n if not os.path.isdir(folder_path):\n os.mkdir(folder_path)\n\n\n for row in data:\n try:\n file_path = '{}/{}.csv'.format(folder_path, row['c'])\n with open(file_path, 'a') as output_file:\n writer = csv.writer(output_file, delimiter=',')\n writer.writerow([\n row['t'], # 資料時間\n row['c'], # 最近成交價\n row['tv'], # 當盤成交量\n row['v'], # 當日累計成交量\n row['a'], # 最佳五檔賣出價格\n row['f'], # 最價五檔賣出數量\n row['b'], # 最佳五檔買入價格\n row['g'] # 最佳五檔買入數量\n\n ])\n\n except Exception as err:\n print(err)\n print(\"@@\")\n\n\nif __name__ == '__main__':\n target = [_.strip() for _ in open('stocknumber.csv', 'r')]\n with Pool(processes=50) as pool:\n contents = pool.map(Crawler, target)\n pool.map(Recorder, contents)\n #print(contents)","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"652218066","text":"import signal\nimport time\n\n\ndef signal_handler(signum, frame):\n print(\"got signal\", signum)\n if signum == signal.SIGINT:\n print(\"got Ctrl+C, will do exit(1)\")\n exit(1)\n\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(2)\n","sub_path":"my_project/tests/base/signal_test.py","file_name":"signal_test.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"36125213","text":"import turtle as tur\nimport colorsys as cs\n\ntur.setup(600,600)\ntur.speed(0)\ntur.width(2)\ntur.bgcolor(\"black\")\nfor j in range(15):\n for i in range(10):\n tur.pensize(j)\n tur.color(cs.hsv_to_rgb(i/15,j/15,1))\n tur.right(90)\n tur.circle(200-j*4,90)\n tur.left(90)\n tur.circle(200-j*4,90)\n tur.right(360)\n tur.circle(50,24)\ntur.circle(50,24)\ntur.hideturtle()\ntur.done\n","sub_path":"Designs/Rangoli_design.py","file_name":"Rangoli_design.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"497911413","text":"\"\"\"\nAdvent of Code 2015 Day 5\n\nProblem description: santa is given a list of strings nad needs to decide\nwhich are good and which are bad based on arbitary conditions. We need to\ncount the number of nice strings in the list.\n\nhttps://regex101.com/ or www.regexr.com\n\n\"\"\"\nimport sys\nimport re\nimport time\n\n\ndef conditions_A(single_string):\n vowel = bool(re.compile(r'(.*[aeiou]){3,}').search(single_string))\n duplicate = bool(re.compile(r'(.)\\1').search(single_string))\n banned = not(bool(re.compile(r'(ab|cd|pq|xy)').search(single_string))) # Note use of not as the Regex found matches that had the banned strings\n result_final = vowel and duplicate and banned\n return result_final\n\n\ndef conditions_B(single_string):\n repeated_pair = bool(re.compile(r'(..).*\\1').search(single_string))\n repeat_with_gap = bool(re.compile(r'.*(.).\\1.*').search(single_string))\n result_final = repeated_pair and repeat_with_gap\n return result_final\n\n\ndef main(filename, option):\n with open(filename, \"r\") as myfile:\n aoc_input_strings = myfile.readlines()\n\n number_nice = 0 # initilise count of nice strings at 0\n for count, single_string in enumerate(aoc_input_strings):\n if option == 'A':\n single_string_outcome = conditions_A(single_string)\n elif option == 'B':\n single_string_outcome = conditions_B(single_string)\n else:\n single_string_outcome = 0 # An error occured but we still need a value here\n if count == 0:\n print('Error! The function argument for option must be \\'A\\' or \\'B\\'')\n number_nice += single_string_outcome # we can add booleans as int\n return number_nice\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n \"\"\"\n if len(sys.argv) != 3:\n print('The correct syntax to run this code from the command line\\\n is python [file.py] [textfile.txt] [option] where option should\\\n be A or B')\n else:\n answer = main(sys.argv[1], sys.argv[2])\n if sys.argv[2] == 'A' or sys.argv[2] == 'B':\n print(f'There are {answer} nice strings')\n \n \"\"\"\n # For running from spyder\n option ='B'\n answer = main('Day5_input.txt', option)\n if option == 'A' or option == 'B':\n print(f'There are {answer} nice strings')\n \n \n end_time = time.time()\n duration = end_time - start_time\n print('The code took {:.2f} milliseconds to execute'.format(1000*duration))\n","sub_path":"2015/Day05_V2.py","file_name":"Day05_V2.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"209523287","text":"from collections import Counter, defaultdict\nimport itertools\nimport sys\n\ndef main():\n n = int(input())\n spec = 3\n poss = True\n for _ in range(n):\n winner = int(input())\n if winner != spec:\n spec = 6 - winner - spec\n else:\n poss = False\n print('YES' if poss else 'NO')\n\n\n\nmain()\n","sub_path":"codeforces/893/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"196795696","text":"# -*- coding: ISO-8859-1 -*-\r\nfrom tsm.core.base.core_base_test import Testsbase\r\nfrom tsm.oportunidade.models.receita import Receita\r\n\r\nclass TestsReceitas(Testsbase):\r\n modelStandard = Receita\r\n modelMommy = 'oportunidade.Receita'\r\n urlBase = '/oportunidade/receita/'\r\n fieldTestRender = 'nome'\r\n dataInsert = {\r\n 'nome' : 'PAR_01'\r\n }","sub_path":"tsm/oportunidade/tests/test_receita.py","file_name":"test_receita.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"93177501","text":"#!/bin/env python3\nimport numpy as np\nimport math\nfrom collections import Counter\n\n# with open('text123.txt', 'w') as f123:\n# f123.write(str2)\n\ndef actions( alph, fstr ):\n\tsum54 = 0\n\ta = {}\n\tfstr = fstr.lower()\n\tstr2 = ''\n\tfor i in fstr:\n\t # if i==len(fstr): break\n\t\tif i == ' ' and str2[-1] == ' ':\n\t\t\tcontinue\n\t\tif i in alph:\n\t\t\tstr2 += i\n\t\telif i == 'ё':\n\t\t str2 += 'е'\n\t\telif i == 'ъ':\n\t\t\tstr2+='ь'\n\t\t\n\tprint('----------------------------------------------------')\n\tc1 = 0\n\tfor i in range(int((len(str2)/2))):\n\t if str2[c1:c1 + 2] in a.keys():\n\t a[str2[c1:c1 + 2]] += 1\n\t else:\n\t a[str2[c1:c1 + 2]] = 1\n\t c1 += 2\n\t \n\tsum4 = sum(a.values())\n#\tbigrams_b = []\n#\tfor i in range(0,(len(str2)-2), 2):\t\n#\t\tbigrams_b.append(str2[i:i+2])\n#\tbigrams_b1,cnt = np.unique(bigrams_b, return_counts=True)\n#\tp2 = cnt/np.sum(cnt)\n#\tH_b = -np.sum(p2 * np.log2(p2))/2\n\tres = Counter(str2[idx: idx + 2] for idx in range(len(str2) - 1))\n\tsum1 = sum(res.values())\n\tcnt = 0\n\t\n\tsum2 = 0\n\t\n\tfor i in list(res.keys()):\n\t sum2 = sum2 + (res[i]/sum1)*math.log2(res[i]/sum1)\n\t res[i] = round((res[i]) / sum1, 3)\n\t\n\tsum3 = 0\n\t\n\tfor i in list(a.keys()):\n\t sum3 = sum3 + (a[i]/sum4)*math.log2(a[i]/sum4)\n\t a[i] = round((a[i]) / sum4, 3)\n\t\n\tlst_nakl = list(res.items())\n\t\n\tlst_nakl.sort(key=lambda i: i[1], reverse=1)\n\tlst_wo_nakl = list(a.items())\n\t\n\tlst_wo_nakl.sort(key=lambda i: i[1], reverse=1)\n\tsum1 = 0\n\tfor i in alph:\n\t print('symbol \"{0}\" ---- {1}'.format(i, round((str2.count(i) / len(str2)), 3)))\n\t sum54 = sum54 + ((str2.count(i) / len(str2)) * math.log2(str2.count(i) / len(str2)))\n\t\n#\tfor i in range(len(lst_wo_nakl)):# lst_nakl, lst_wo_nakl:\n#\t print('{0} {1}'.format(str(lst_nakl[i]),str(lst_wo_nakl[i])),i)\n\tprint(lst_nakl)\n\tprint(lst_wo_nakl)\n\t\n\tprint('----------------------------------------------------')\n\tprint('entropy simv- ' + str(sum54 * (-1)))\n\tprint('entropy bigr z povt- ' + str(sum2/2 * (-1)))\n\tprint('entropy bigr bez povt- ' + str(sum3/2 * (-1)))\n\tprint('----------------------------------------------------')\n\t\n\treturn\n\n\nwith open('text1', 'r') as f:\n fstr1 = f.read()\n\nalph1 = \"абвгдежзийклмнопрстуфхцчшщьюяыэ \"\nalph2 = \"абвгдежзийклмнопрстуфхцчшщьюяыэ\"\nactions(alph1, fstr1)\nactions(alph2, fstr1)\n\n# for i in lst_nakl:\n# if cnt % 2 == 0:\n# print(i)\n# cnt += 1\n#print(str2[:])\n","sub_path":"cp_1/Moroz_fb84_Yalovchuk_fb84_cp1/b00bs3.py","file_name":"b00bs3.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"401423272","text":"import numpy as np\n\ndef centroid_histogram(clt):\n # クラスターの数からbinの決定\n numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)\n (hist, _) = np.histogram(clt.labels_, bins=numLabels)\n\n # 割合\n hist = hist.astype(\"float\")\n hist /= hist.sum()\n\n # return the histogram\n return hist\n","sub_path":"scripts/utils/hist.py","file_name":"hist.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"70232097","text":"from django.contrib import admin\nfrom jobs.models import Teachers, Emailjobs\n# Register your models here.\nfrom core.base.baseadmin import BaseModelAdmin\n\n\nclass TeacherAmdin(admin.ModelAdmin):\n fields = (\n 'current_url', 'name', 'professional_title', 'ttype', 'major', 'email', 'lab_name', 'lab_tel',\n 'office_name', 'office_tel', 'remark', 'is_old_customer', 'is_lab_leader', 'is_write_back', 'status'\n )\n\n\nadmin.site.register(Teachers, TeacherAmdin)\nadmin.site.register(Emailjobs, BaseModelAdmin)","sub_path":"jobs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"622244606","text":"import RPi.GPIO as GPIO\nfrom thermostat.enums import StateEnum\nfrom thermostat import config, logger\n\n\nclass ThermalHeating(object):\n \"\"\" Set thermal heating on/off\n This class should not be directly instantiated.\n Import thermal_heating object instead like this:\n >> from thermostat.heating import thermal_heating\n \"\"\"\n\n def __init__(self):\n GPIO.setmode(GPIO.BCM)\n status = GPIO.gpio_function(config.RELAY_OUT_CHANNEL)\n self._status = StateEnum(status) if status else StateEnum.OFF\n logger.log('Thermal heating initialized')\n\n def get_state(self):\n \"\"\" Get heating state (on|off)\n \"\"\"\n return self._status\n\n def set_state(self, state: StateEnum):\n \"\"\" Set heating state (on|off)\n \"\"\"\n if isinstance(state, StateEnum) and self._status != state:\n GPIO.setup(config.RELAY_OUT_CHANNEL, state.value)\n self._status = state\n logger.log(self.__unicode__())\n\n state = property(fget=get_state, fset=set_state)\n\n def __unicode__(self):\n return 'Heating is {0}'.format(self._status.name)\n\n def __del__(self):\n self._status = StateEnum.OFF\n try:\n GPIO.cleanup(config.RELAY_OUT_CHANNEL)\n except RuntimeWarning:\n pass\n\nthermal_heating = ThermalHeating()\n","sub_path":"thermostat/heating.py","file_name":"heating.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"116029224","text":"import yaml\n\n\ndef add_cfg(cfg, yaml_file):\n # Read YAML file\n try:\n cfg.update(yaml.load(open(yaml_file, 'r')))\n except Exception:\n print('Error: cannot parse cfg', yaml_file)\n raise Exception\n\n\ndef load_cfg_yamls(yaml_files):\n cfg = dict()\n for yf in yaml_files:\n add_cfg(cfg, yf)\n return cfg\n","sub_path":"cfgs/config_v2.py","file_name":"config_v2.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"484556188","text":"import fractions\r\n \r\nminnum,maxnum,div1,div2 = map(int, input().split(\" \"))\r\n \r\nnum1 = (minnum - 1) // div1\r\nnum2 = maxnum // div1\r\nnum3 = (minnum - 1) // div2\r\nnum4 = maxnum // div2\r\nlcm = div1 * div2 // fractions.gcd(div1, div2)\r\nnum5 = (minnum - 1)// lcm\r\nnum6 = (maxnum // lcm)\r\n \r\n#print(num1)\r\n#print(num2)\r\n#print(num3)\r\n#print(num4)\r\n#print(num5)\r\n#print(num6)\r\nans = (num4 - num3) + (num2 - num1)\r\n \r\nprint((maxnum - minnum) + 1 - ans + (num6 - num5))","sub_path":"131c.py","file_name":"131c.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509080740","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nfrom typing import Any, Optional, TYPE_CHECKING\n\nfrom azure.mgmt.core import AsyncARMPipelineClient\nfrom msrest import Deserializer, Serializer\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from azure.core.credentials_async import AsyncTokenCredential\n\nfrom ._configuration_async import SitesOneNoteConfiguration\nfrom .operations_async import SiteOperations\nfrom .operations_async import SiteOnenoteOperations\nfrom .operations_async import SiteOnenoteNotebookOperations\nfrom .operations_async import SiteOnenoteNotebookSectionGroupOperations\nfrom .operations_async import SiteOnenoteNotebookSectionGroupSectionOperations\nfrom .operations_async import SiteOnenoteNotebookSectionGroupSectionPageOperations\nfrom .operations_async import SiteOnenoteNotebookSectionOperations\nfrom .operations_async import SiteOnenoteNotebookSectionPageOperations\nfrom .operations_async import SiteOnenoteNotebookSectionParentSectionGroupOperations\nfrom .operations_async import SiteOnenotePageOperations\nfrom .operations_async import SiteOnenotePageParentNotebookOperations\nfrom .operations_async import SiteOnenotePageParentNotebookSectionGroupOperations\nfrom .operations_async import SiteOnenotePageParentNotebookSectionGroupSectionOperations\nfrom .operations_async import SiteOnenotePageParentNotebookSectionOperations\nfrom .operations_async import SiteOnenotePageParentNotebookSectionParentSectionGroupOperations\nfrom .operations_async import SiteOnenotePageParentSectionOperations\nfrom .operations_async import SiteOnenotePageParentSectionParentNotebookOperations\nfrom .operations_async import SiteOnenotePageParentSectionParentNotebookSectionGroupOperations\nfrom .operations_async import SiteOnenotePageParentSectionGroupOperations\nfrom .operations_async import SiteOnenotePageParentSectionGroupParentNotebookOperations\nfrom .operations_async import SiteOnenoteSectionGroupOperations\nfrom .operations_async import SiteOnenoteSectionGroupParentNotebookOperations\nfrom .operations_async import SiteOnenoteSectionGroupParentNotebookSectionOperations\nfrom .operations_async import SiteOnenoteSectionGroupParentNotebookSectionPageOperations\nfrom .operations_async import SiteOnenoteSectionGroupSectionOperations\nfrom .operations_async import SiteOnenoteSectionGroupSectionPageOperations\nfrom .operations_async import SiteOnenoteSectionGroupSectionPageParentNotebookOperations\nfrom .operations_async import SiteOnenoteSectionGroupSectionParentNotebookOperations\nfrom .operations_async import SiteOnenoteSectionOperations\nfrom .operations_async import SiteOnenoteSectionPageOperations\nfrom .operations_async import SiteOnenoteSectionPageParentNotebookOperations\nfrom .operations_async import SiteOnenoteSectionPageParentNotebookSectionGroupOperations\nfrom .operations_async import SiteOnenoteSectionParentNotebookOperations\nfrom .operations_async import SiteOnenoteSectionParentNotebookSectionGroupOperations\nfrom .operations_async import SiteOnenoteSectionParentSectionGroupOperations\nfrom .operations_async import SiteOnenoteSectionParentSectionGroupParentNotebookOperations\nfrom .. import models\n\n\nclass SitesOneNote(object):\n \"\"\"SitesOneNote.\n\n :ivar site: SiteOperations operations\n :vartype site: sites_one_note.aio.operations_async.SiteOperations\n :ivar site_onenote: SiteOnenoteOperations operations\n :vartype site_onenote: sites_one_note.aio.operations_async.SiteOnenoteOperations\n :ivar site_onenote_notebook: SiteOnenoteNotebookOperations operations\n :vartype site_onenote_notebook: sites_one_note.aio.operations_async.SiteOnenoteNotebookOperations\n :ivar site_onenote_notebook_section_group: SiteOnenoteNotebookSectionGroupOperations operations\n :vartype site_onenote_notebook_section_group: sites_one_note.aio.operations_async.SiteOnenoteNotebookSectionGroupOperations\n :ivar site_onenote_notebook_section_group_section: SiteOnenoteNotebookSectionGroupSectionOperations operations\n :vartype site_onenote_notebook_section_group_section: sites_one_note.aio.operations_async.SiteOnenoteNotebookSectionGroupSectionOperations\n :ivar site_onenote_notebook_section_group_section_page: SiteOnenoteNotebookSectionGroupSectionPageOperations operations\n :vartype site_onenote_notebook_section_group_section_page: sites_one_note.aio.operations_async.SiteOnenoteNotebookSectionGroupSectionPageOperations\n :ivar site_onenote_notebook_section: SiteOnenoteNotebookSectionOperations operations\n :vartype site_onenote_notebook_section: sites_one_note.aio.operations_async.SiteOnenoteNotebookSectionOperations\n :ivar site_onenote_notebook_section_page: SiteOnenoteNotebookSectionPageOperations operations\n :vartype site_onenote_notebook_section_page: sites_one_note.aio.operations_async.SiteOnenoteNotebookSectionPageOperations\n :ivar site_onenote_notebook_section_parent_section_group: SiteOnenoteNotebookSectionParentSectionGroupOperations operations\n :vartype site_onenote_notebook_section_parent_section_group: sites_one_note.aio.operations_async.SiteOnenoteNotebookSectionParentSectionGroupOperations\n :ivar site_onenote_page: SiteOnenotePageOperations operations\n :vartype site_onenote_page: sites_one_note.aio.operations_async.SiteOnenotePageOperations\n :ivar site_onenote_page_parent_notebook: SiteOnenotePageParentNotebookOperations operations\n :vartype site_onenote_page_parent_notebook: sites_one_note.aio.operations_async.SiteOnenotePageParentNotebookOperations\n :ivar site_onenote_page_parent_notebook_section_group: SiteOnenotePageParentNotebookSectionGroupOperations operations\n :vartype site_onenote_page_parent_notebook_section_group: sites_one_note.aio.operations_async.SiteOnenotePageParentNotebookSectionGroupOperations\n :ivar site_onenote_page_parent_notebook_section_group_section: SiteOnenotePageParentNotebookSectionGroupSectionOperations operations\n :vartype site_onenote_page_parent_notebook_section_group_section: sites_one_note.aio.operations_async.SiteOnenotePageParentNotebookSectionGroupSectionOperations\n :ivar site_onenote_page_parent_notebook_section: SiteOnenotePageParentNotebookSectionOperations operations\n :vartype site_onenote_page_parent_notebook_section: sites_one_note.aio.operations_async.SiteOnenotePageParentNotebookSectionOperations\n :ivar site_onenote_page_parent_notebook_section_parent_section_group: SiteOnenotePageParentNotebookSectionParentSectionGroupOperations operations\n :vartype site_onenote_page_parent_notebook_section_parent_section_group: sites_one_note.aio.operations_async.SiteOnenotePageParentNotebookSectionParentSectionGroupOperations\n :ivar site_onenote_page_parent_section: SiteOnenotePageParentSectionOperations operations\n :vartype site_onenote_page_parent_section: sites_one_note.aio.operations_async.SiteOnenotePageParentSectionOperations\n :ivar site_onenote_page_parent_section_parent_notebook: SiteOnenotePageParentSectionParentNotebookOperations operations\n :vartype site_onenote_page_parent_section_parent_notebook: sites_one_note.aio.operations_async.SiteOnenotePageParentSectionParentNotebookOperations\n :ivar site_onenote_page_parent_section_parent_notebook_section_group: SiteOnenotePageParentSectionParentNotebookSectionGroupOperations operations\n :vartype site_onenote_page_parent_section_parent_notebook_section_group: sites_one_note.aio.operations_async.SiteOnenotePageParentSectionParentNotebookSectionGroupOperations\n :ivar site_onenote_page_parent_section_group: SiteOnenotePageParentSectionGroupOperations operations\n :vartype site_onenote_page_parent_section_group: sites_one_note.aio.operations_async.SiteOnenotePageParentSectionGroupOperations\n :ivar site_onenote_page_parent_section_group_parent_notebook: SiteOnenotePageParentSectionGroupParentNotebookOperations operations\n :vartype site_onenote_page_parent_section_group_parent_notebook: sites_one_note.aio.operations_async.SiteOnenotePageParentSectionGroupParentNotebookOperations\n :ivar site_onenote_section_group: SiteOnenoteSectionGroupOperations operations\n :vartype site_onenote_section_group: sites_one_note.aio.operations_async.SiteOnenoteSectionGroupOperations\n :ivar site_onenote_section_group_parent_notebook: SiteOnenoteSectionGroupParentNotebookOperations operations\n :vartype site_onenote_section_group_parent_notebook: sites_one_note.aio.operations_async.SiteOnenoteSectionGroupParentNotebookOperations\n :ivar site_onenote_section_group_parent_notebook_section: SiteOnenoteSectionGroupParentNotebookSectionOperations operations\n :vartype site_onenote_section_group_parent_notebook_section: sites_one_note.aio.operations_async.SiteOnenoteSectionGroupParentNotebookSectionOperations\n :ivar site_onenote_section_group_parent_notebook_section_page: SiteOnenoteSectionGroupParentNotebookSectionPageOperations operations\n :vartype site_onenote_section_group_parent_notebook_section_page: sites_one_note.aio.operations_async.SiteOnenoteSectionGroupParentNotebookSectionPageOperations\n :ivar site_onenote_section_group_section: SiteOnenoteSectionGroupSectionOperations operations\n :vartype site_onenote_section_group_section: sites_one_note.aio.operations_async.SiteOnenoteSectionGroupSectionOperations\n :ivar site_onenote_section_group_section_page: SiteOnenoteSectionGroupSectionPageOperations operations\n :vartype site_onenote_section_group_section_page: sites_one_note.aio.operations_async.SiteOnenoteSectionGroupSectionPageOperations\n :ivar site_onenote_section_group_section_page_parent_notebook: SiteOnenoteSectionGroupSectionPageParentNotebookOperations operations\n :vartype site_onenote_section_group_section_page_parent_notebook: sites_one_note.aio.operations_async.SiteOnenoteSectionGroupSectionPageParentNotebookOperations\n :ivar site_onenote_section_group_section_parent_notebook: SiteOnenoteSectionGroupSectionParentNotebookOperations operations\n :vartype site_onenote_section_group_section_parent_notebook: sites_one_note.aio.operations_async.SiteOnenoteSectionGroupSectionParentNotebookOperations\n :ivar site_onenote_section: SiteOnenoteSectionOperations operations\n :vartype site_onenote_section: sites_one_note.aio.operations_async.SiteOnenoteSectionOperations\n :ivar site_onenote_section_page: SiteOnenoteSectionPageOperations operations\n :vartype site_onenote_section_page: sites_one_note.aio.operations_async.SiteOnenoteSectionPageOperations\n :ivar site_onenote_section_page_parent_notebook: SiteOnenoteSectionPageParentNotebookOperations operations\n :vartype site_onenote_section_page_parent_notebook: sites_one_note.aio.operations_async.SiteOnenoteSectionPageParentNotebookOperations\n :ivar site_onenote_section_page_parent_notebook_section_group: SiteOnenoteSectionPageParentNotebookSectionGroupOperations operations\n :vartype site_onenote_section_page_parent_notebook_section_group: sites_one_note.aio.operations_async.SiteOnenoteSectionPageParentNotebookSectionGroupOperations\n :ivar site_onenote_section_parent_notebook: SiteOnenoteSectionParentNotebookOperations operations\n :vartype site_onenote_section_parent_notebook: sites_one_note.aio.operations_async.SiteOnenoteSectionParentNotebookOperations\n :ivar site_onenote_section_parent_notebook_section_group: SiteOnenoteSectionParentNotebookSectionGroupOperations operations\n :vartype site_onenote_section_parent_notebook_section_group: sites_one_note.aio.operations_async.SiteOnenoteSectionParentNotebookSectionGroupOperations\n :ivar site_onenote_section_parent_section_group: SiteOnenoteSectionParentSectionGroupOperations operations\n :vartype site_onenote_section_parent_section_group: sites_one_note.aio.operations_async.SiteOnenoteSectionParentSectionGroupOperations\n :ivar site_onenote_section_parent_section_group_parent_notebook: SiteOnenoteSectionParentSectionGroupParentNotebookOperations operations\n :vartype site_onenote_section_parent_section_group_parent_notebook: sites_one_note.aio.operations_async.SiteOnenoteSectionParentSectionGroupParentNotebookOperations\n :param credential: Credential needed for the client to connect to Azure.\n :type credential: ~azure.core.credentials_async.AsyncTokenCredential\n :param top: Show only the first n items.\n :type top: int\n :param skip: Skip the first n items.\n :type skip: int\n :param search: Search items by search phrases.\n :type search: str\n :param filter: Filter items by property values.\n :type filter: str\n :param count: Include count of items.\n :type count: bool\n :param str base_url: Service URL\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n \"\"\"\n\n def __init__(\n self,\n credential: \"AsyncTokenCredential\",\n top: Optional[int] = None,\n skip: Optional[int] = None,\n search: Optional[str] = None,\n filter: Optional[str] = None,\n count: Optional[bool] = None,\n base_url: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n if not base_url:\n base_url = 'https://graph.microsoft.com/beta'\n self._config = SitesOneNoteConfiguration(credential, top, skip, search, filter, count, **kwargs)\n self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)\n\n client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}\n self._serialize = Serializer(client_models)\n self._deserialize = Deserializer(client_models)\n\n self.site = SiteOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote = SiteOnenoteOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_notebook = SiteOnenoteNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_notebook_section_group = SiteOnenoteNotebookSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_notebook_section_group_section = SiteOnenoteNotebookSectionGroupSectionOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_notebook_section_group_section_page = SiteOnenoteNotebookSectionGroupSectionPageOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_notebook_section = SiteOnenoteNotebookSectionOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_notebook_section_page = SiteOnenoteNotebookSectionPageOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_notebook_section_parent_section_group = SiteOnenoteNotebookSectionParentSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page = SiteOnenotePageOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_notebook = SiteOnenotePageParentNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_notebook_section_group = SiteOnenotePageParentNotebookSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_notebook_section_group_section = SiteOnenotePageParentNotebookSectionGroupSectionOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_notebook_section = SiteOnenotePageParentNotebookSectionOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_notebook_section_parent_section_group = SiteOnenotePageParentNotebookSectionParentSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_section = SiteOnenotePageParentSectionOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_section_parent_notebook = SiteOnenotePageParentSectionParentNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_section_parent_notebook_section_group = SiteOnenotePageParentSectionParentNotebookSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_section_group = SiteOnenotePageParentSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_page_parent_section_group_parent_notebook = SiteOnenotePageParentSectionGroupParentNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_group = SiteOnenoteSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_group_parent_notebook = SiteOnenoteSectionGroupParentNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_group_parent_notebook_section = SiteOnenoteSectionGroupParentNotebookSectionOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_group_parent_notebook_section_page = SiteOnenoteSectionGroupParentNotebookSectionPageOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_group_section = SiteOnenoteSectionGroupSectionOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_group_section_page = SiteOnenoteSectionGroupSectionPageOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_group_section_page_parent_notebook = SiteOnenoteSectionGroupSectionPageParentNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_group_section_parent_notebook = SiteOnenoteSectionGroupSectionParentNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section = SiteOnenoteSectionOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_page = SiteOnenoteSectionPageOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_page_parent_notebook = SiteOnenoteSectionPageParentNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_page_parent_notebook_section_group = SiteOnenoteSectionPageParentNotebookSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_parent_notebook = SiteOnenoteSectionParentNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_parent_notebook_section_group = SiteOnenoteSectionParentNotebookSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_parent_section_group = SiteOnenoteSectionParentSectionGroupOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.site_onenote_section_parent_section_group_parent_notebook = SiteOnenoteSectionParentSectionGroupParentNotebookOperations(\n self._client, self._config, self._serialize, self._deserialize)\n\n async def close(self) -> None:\n await self._client.close()\n\n async def __aenter__(self) -> \"SitesOneNote\":\n await self._client.__aenter__()\n return self\n\n async def __aexit__(self, *exc_details) -> None:\n await self._client.__aexit__(*exc_details)\n","sub_path":"msgraph-cli-extensions/src/sitesonenote/azext_sitesonenote/vendored_sdks/sitesonenote/aio/_sites_one_note_async.py","file_name":"_sites_one_note_async.py","file_ext":"py","file_size_in_byte":20720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"32500728","text":"# -*- encoding: utf-8 -*-\n\"\"\" CONSTANTS.PY \"\"\"\n\nimport win32gui\n\n\nDOFUS_AUTH_IP = [\"34.251.172.139\"]\nDOFUS_GAME_IP = [\"52.19.56.159\"]\n\nis_CMSG = lambda p: p[\"IP\"].dst in DOFUS_AUTH_IP + DOFUS_GAME_IP\nis_SMSG = lambda p: p[\"IP\"].src in DOFUS_AUTH_IP + DOFUS_GAME_IP\n\nversion = '1.30.14'\nwin_title_fmt = '{} - Dofus Retro v%s' % version\n\nCMSG = {\n}\n\nSMSG = {\n 'GDF': 'Game_onFrameObject2',\n 'GA': 'GameActions_onActions',\n 'GC': 'Game_onCreate',\n 'GD': 'Game_GD',\n 'Im': 'Infos_onMessage',\n 'EHP': 'Exchange_onItemMiddlePriceInBigStore',\n 'EHl': 'Exchange_onBigStoreItemsList'\n}\n\nNETWORK_INTERFACE = 'Intel(R) Dual Band Wireless-AC 3165'\n\nRESOLUTION = {\n 'min_x': 270,\n 'max_x': 1650,\n 'min_y': 20,\n 'max_y': 850\n}\n\nSCAN_ON_LOADING_MAP = False\n\nACTIONS = [\n 'nothing',\n 'wait',\n 'move_random',\n 'move_center'\n]\n\nRESOURCE_IDS = [\n # Bûcheron\n 3404, # Frêne\n 3405, # Chataignier\n 3406, # Noyer\n 3407, # Chêne\n 3408, # Erable\n 3445, # Bombu\n # Mineur\n 3426, # Cuivre\n 3427, # Bronze\n 3428, # Manganèse\n 3429 # Kobalte\n]\n\n# PLAYER_ID: PLAYER_NAME\nENTITY_PLAYER = {\n}\n\n# PLAYER_ID: [MAP_IDS]\nPLAYER_MAPS = {\n}\n\n# MAP_ID : {\n# 'bot': MAP_ID,\n# 'top': MAP_ID,\n# 'left': MAP_ID,\n# 'right': MAP_ID\n# }\nMAP_PATH = {\n}","sub_path":"dr/snf/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"594020579","text":"import sqlite3\nimport argparse\n\ndef create_db(conn):\n pp = conn.cursor()\n pp.executescript('drop table if exists users;')\n\n try:\n pp.executescript(\"begin\")\n pp.executescript(\"\"\"\n CREATE TABLE uv\n (id INTEGER PRIMARY KEY, variant VARCHAR(100), time INTEGER , uvlevel INTEGER, type VARCHAR(100));\n \"\"\")\n pp.executescript(\"commit\")\n return True\n except conn.Error as e:\n pp.executescript(\"rollback\")\n print(e.args[0])\n return False\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='creates db tables required for primer program')\n parser.add_argument('--db')\n\n args = parser.parse_args()\n\n db = args.db\n print(db)\n\n conn_main = sqlite3.connect(db)\n complete = create_db(conn_main)\n\n\nif __name__ == '__main__':\n main()","sub_path":"setup_db.py","file_name":"setup_db.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"441697137","text":"# Verify that we can open and read the election results CSV correctly\n# Showing a \"test-driven\" style\n\nfrom electiondata import ElectionResults\nimport unittest\n\nclass ElectionResultsTest(unittest.TestCase):\n\n def setUp(self):\n self.results = ElectionResults('electiondata.py')\n\n def testVotes(self):\n self.results.load()\n votes = self.results.votes()\n #Check that the first state is correct\n #Check with teacher why this syntax is incorrect (and object oriented.py)\n assert all_votes[0] == 212930\n \n\n\n# if this file is run directly, run the tests\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"a1-test.py","file_name":"a1-test.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"535088075","text":"from flask import current_app, request\nfrom flask_jwt_extended import jwt_required\nfrom flask_restful import fields, marshal, reqparse\n\nfrom api.models.database import BaseModel\nfrom api.models.tray import Tray\nfrom api.resources.base_resource import BaseResource\nfrom api.resources.decorators.user_role_decorators import is_theme_admin\nfrom api.resources.rack_resource import RackResource\nfrom api.utils import format_and_lower_str, log_create, log_duplicate, log_update, log_delete, \\\n has_required_request_params, standard_non_empty_string, log_304, get_query_params, fake, get_boxes\n\n\nclass TrayResource(BaseResource):\n fields = {\n 'number': fields.String,\n 'rack.number': fields.String,\n 'code': fields.String\n }\n\n def get(self):\n query_strings = get_query_params()\n if query_strings is not None:\n for query_string in query_strings:\n query, total = Tray.search(query_string, 1, 15)\n\n # query tray to check for boxes\n box = TrayResource.get_tray(query_string)\n\n if box is not None:\n data = get_boxes(box.id)\n return BaseResource.send_json_message(data, 200)\n else:\n trays = query.all()\n\n data = marshal(trays, self.fields)\n return BaseResource.send_json_message(data, 200)\n\n elif request.headers.get('code') is not None:\n code = format_and_lower_str(request.headers['code'])\n tray = TrayResource.get_tray(code)\n if tray is None:\n return BaseResource.send_json_message(\"Tray not found\", 404)\n data = marshal(tray, self.fields)\n return BaseResource.send_json_message(data, 200)\n else:\n trays = Tray.query.all()\n if trays is None:\n return BaseResource.send_json_message(\"Trays not found\", 404)\n data = marshal(trays, self.fields)\n return BaseResource.send_json_message(data, 200)\n\n @jwt_required\n @is_theme_admin\n def post(self):\n args = TrayResource.tray_parser()\n rack = RackResource.get_rack(args['rack']).id\n number = int(args['number'])\n code = fake.ean(length=8)\n\n if not Tray.tray_exists(code):\n try:\n tray = Tray(rack=rack, num=number, code=code)\n BaseModel.db.session.add(tray)\n BaseModel.db.session.commit()\n log_create(tray)\n return BaseResource.send_json_message(\"Tray successfully created\", 201)\n\n except Exception as e:\n current_app.logger.error(e)\n BaseModel.db.session.rollback()\n return BaseResource.send_json_message(\"Error while adding tray\", 500)\n log_duplicate(Tray.query.filter(Tray.code == code).first())\n return BaseResource.send_json_message(\"Tray already exists\", 409)\n\n @jwt_required\n @is_theme_admin\n @has_required_request_params\n def put(self):\n code = format_and_lower_str(request.headers['code'])\n tray = TrayResource.get_tray(code)\n\n if tray is not None:\n args = TrayResource.tray_parser()\n rack = RackResource.get_rack(args['rack']).id\n number = int(args['number'])\n code = args['code']\n\n if rack != tray.rack_id or number != tray.number or code != tray.code:\n old_info = str(tray)\n try:\n tray.rack_id = rack\n tray.number = number\n tray.code = code\n BaseModel.db.session.commit()\n log_update(old_info, tray)\n return BaseResource.send_json_message(\"Tray successfully updated\", 202)\n\n except Exception as e:\n current_app.logger.error(e)\n BaseModel.db.session.rollback()\n return BaseResource.send_json_message(\"Error while adding tray. Another tray has that number\", 500)\n log_304(tray)\n return BaseResource.send_json_message(\"No changes made\", 304)\n return BaseResource.send_json_message(\"Tray not found\", 404)\n\n @jwt_required\n @is_theme_admin\n @has_required_request_params\n def delete(self):\n code = format_and_lower_str(request.headers['code'])\n tray = TrayResource.get_tray(code)\n\n if not tray:\n return BaseResource.send_json_message(\"Tray not found\", 404)\n\n BaseModel.db.session.delete(tray)\n BaseModel.db.session.commit()\n log_delete(tray)\n return BaseResource.send_json_message(\"Tray deleted\", 200)\n\n @staticmethod\n def tray_parser():\n parser = reqparse.RequestParser()\n parser.add_argument('rack', required=True)\n parser.add_argument('number', required=True)\n parser.add_argument('code', required=True, type=standard_non_empty_string)\n\n args = parser.parse_args()\n return args\n\n @staticmethod\n def get_tray(code):\n return BaseModel.db.session.query(Tray).filter_by(code=code).first()\n","sub_path":"api/resources/tray_resource.py","file_name":"tray_resource.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"492315759","text":"import a1\nimport unittest\n\n\nclass TestSwapK(unittest.TestCase):\n \"\"\" Test class for function a1.swap_k. \"\"\"\n\n def test_empty_case(self):\n \"\"\"\n Test for when empty list is provided\n \"\"\"\n actual = []\n expected = []\n\n a1.swap_k(actual, 0)\n self.assertEqual(actual, expected)\n\n \n\n\n def test_too_small(self):\n \"\"\"\n Test for a one element list\n \"\"\"\n actual = [5]\n expected = [5]\n\n a1.swap_k(actual, 0)\n self.assertEqual(actual, expected)\n\n def test_small_even(self):\n \"\"\"\n Test for the smallest possible even-numbered long list\n \"\"\"\n actual = [1, 2]\n expected = [2, 1]\n\n a1.swap_k(actual, 1)\n self.assertEqual(actual, expected)\n\n def test_small_odd(self):\n \"\"\"\n Test for the smallest possible odd-numbered long list\n \"\"\"\n actual = [\"one\", \"two\", \"three\"]\n expected = [\"three\", \"two\", \"one\"]\n\n a1.swap_k(actual, 1)\n self.assertEqual(actual, expected)\n\n def general_case(self):\n \"\"\"\n Test when k is not close to the middle of the list\n \"\"\"\n\n actual = [1, 2, 3, 4, 5, 6]\n expected = [5, 6, 3, 4, 1, 2]\n\n a1.swap_k(actual, 2)\n self.assertEqual(actual, expected)\n\n\n\n def test_longer_at_middle(self):\n \"\"\"\n Test for a longer even-length list where k is close to the middle of the list\n \"\"\"\n\n actual = [1, 2, 3, 4, 5, 6]\n expected = [4, 5, 6, 1, 2, 3]\n\n a1.swap_k(actual, 3)\n self.assertEqual(actual, expected)\n\n\n \n\n\nif __name__ == '__main__':\n unittest.main(exit=False)\n","sub_path":"ltp2/assignment-1/test_swap_k.py","file_name":"test_swap_k.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"67581329","text":"import pytest\n\nfrom main import app, change_for_tests\nfrom starlette.testclient import TestClient\n\nclient = TestClient(app)\n\n\nclass TestMain:\n\n @pytest.fixture(scope=\"session\", autouse=True)\n def set_up(self):\n change_for_tests()\n\n def test_happy(self):\n response = client.get(\"/interview/5/1\")\n assert response.status_code == 200\n result = response.json()\n assert result[\"admin\"] == \"/interview/e1177e54-d903-4e91-a299-ddc56606785b/9ff2faff-dc3f-40b0-8c02-53a67750281e\"\n assert result[\"job_seeker\"] == \"/interview/e1177e54-d903-4e91-a299-ddc56606785b\"\n\n def test_older_interview(self):\n response = client.get(\"/interview/5/1?which=1\")\n assert response.status_code == 200\n result = response.json()\n assert result[\"admin\"] == \"/interview/cbb6b884-888a-4ec4-9446-0a25ba2f2e9e/9ff2faff-dc3f-40b0-8c02-53a67750281e\"\n assert result[\"job_seeker\"] == \"/interview/cbb6b884-888a-4ec4-9446-0a25ba2f2e9e\"\n\n def test_hrpartner_not_exists(self):\n response = client.get(\"/interview/1/1\")\n result = response.json()\n assert response.status_code == 404\n assert result['detail'] == \"This hrpartner doesn't exists\"\n\n def test_interviews_not_exists(self):\n response = client.get(\"/interview/5/5\")\n result = response.json()\n assert response.status_code == 404\n assert result['detail'] == \"This application doesn't have got interviews\"\n\n def test_not_enough_interviews(self):\n response = client.get(\"/interview/5/1?which=5\")\n result = response.json()\n assert response.status_code == 404\n assert result['detail'] == f\"This application doesn't have got 6 interviews\"\n\n def test_jobseeker_not_proper_uuid(self):\n response = client.get(\"/interview/jobseeker/zasdafasdfsacasdfsfadfasd\")\n result = response.json()\n assert response.status_code == 404\n assert result['detail'] == \"This string is not valid uuid\"\n\n def test_jobseeker_interview_not_exists(self):\n uuid = \"a1177e54-d903-4e91-a299-ddc56606785b\"\n response = client.get(f\"/interview/jobseeker/{uuid}\")\n result = response.json()\n assert response.status_code == 404\n assert result['detail'] == f\"Interview with id: {uuid} doesn't exist\"\n\n def test_jobseeker_happy(self):\n response = client.get(\"/interview/jobseeker/e1177e54-d903-4e91-a299-ddc56606785b\")\n assert response.status_code == 200\n result = response.json()\n assert result['first_name'] == \"John\"\n assert result['last_name'] == \"Adams\"\n","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"64711960","text":"import pickle\nimport json\nimport os\n\nseparator_task = \"*\"*100\n\ndef separete(something,separator):\n print()\n print(separator,something,separator)\n print()\n\ndef check_file(file_name):\n f_path = os.path.join(os.getcwd(),file_name)\n if os.path.exists(f_path) and os.path.isfile(f_path):\n return True\n print(\"file {} not found\".format(f_path))\n return False\n\ndef read_pickle(file_name):\n if not check_file(file_name):\n return None\n with open(file_name,\"rb\") as f:\n return pickle.load(f)\n\ndef read_json(file_name):\n if not check_file(file_name):\n return None\n with open(file_name,\"r\",encoding=\"utf-8\") as f:\n return json.load(f)\n\n\nseparete(\"task №2\",separator_task)\n\nfor key,value in {\"group.pickle\":read_pickle,\"group.json\":read_json}.items():\n print(\"object from {}: \".format(key))\n data = value(key)\n if data:\n print(data)\n","sub_path":"python/base/hw_6/music_deserialize.py","file_name":"music_deserialize.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"299521319","text":"\"\"\"\n闭包:\n1,函数内部嵌套函数\n2,内部函数访问外部函数局部变量\n3,外部函数将内部函数返回\n\n因为外部函数返回的为内部函数的引用\n外部函数每执行一次都要返回内部函数的引用(该引用保存了外部函数的局部变量)\n\n\"\"\"\n\ndef fun1():\n a = 1\n def fun2(b):\n return a+b\n return fun2\n\n# print(fun1, type(fun1))\n# print(fun1(), type(fun1()))\n# print(fun1()(2))\n\nr1 = fun1()\nprint(r1(5))\nprint(r1(5))\nprint(r1(5))\nr2 = fun1()\nprint(r1(6))\n\n","sub_path":"PythonAdvance/day0213/demo07.py","file_name":"demo07.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"544542731","text":"\n\n#calss header\nclass _PICCALILLI():\n\tdef __init__(self,): \n\t\tself.name = \"PICCALILLI\"\n\t\tself.definitions = [u'small pieces of different vegetables preserved in a mustard sauce, usually eaten with cold meat']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_piccalilli.py","file_name":"_piccalilli.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"628672987","text":"from kafka import KafkaConsumer\nimport json\n\ntopic = \"texts\"\ncontactpoint= ['127.0.0.1']\n\nconsumer = KafkaConsumer(\n topic,\n bootstrap_servers='localhost:9092',\n auto_offset_reset='latest',\n enable_auto_commit=True)\n\nfor message in consumer:\n message = json.loads(message.value)\n msgstr = str(message)\n print(msgstr)\n\n","sub_path":"test_consumer.py","file_name":"test_consumer.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"173792063","text":"from helpers import Container\n\n\"\"\"\nRouteTableCreator Class:\nCreates the route table for every action element in the html page\n\"\"\"\n\n\nclass RouteTableCreator:\n def createTable(root):\n dictionary = {}\n for tag in root.getTags():\n if isinstance(tag, Container):\n dictionary =\\\n RouteTableCreator.createTableContainer(tag,\n dictionary,\n root.getTag() + \".\")\n else:\n dictionary[root.getTag()+\".\"+tag.getTag()] = tag\n return dictionary\n\n def createTableContainer(container, dictionary, route):\n for tag in container.getTags():\n newRoute = route + container.getTag()\n if isinstance(tag, Container):\n dictionary =\\\n RouteTableCreator.createTableContainer(tag,\n dictionary,\n newRoute + \".\")\n else:\n tempRoute = newRoute + \".\" + tag.getTag() + \"_\" + tag.getId()\n dictionary[tempRoute] = tag\n return dictionary\n","sub_path":"automationtool/routetablecreator.py","file_name":"routetablecreator.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"108658858","text":"\n# vs \nfrom starter2 import *\nimport data_locations as dl\nfrom collections import defaultdict\nimport davetools\nreload(davetools)\n\nimport testing.early_mask as em #testing old method\nreload(em) #testing old method\n\nimport scipy\nplt.close('all')\n\n\nclass magfield_density_tool(): \n def __init__(self,this_looper):\n self.this_looper=this_looper\n \n self.mean_rho=defaultdict(list)\n self.mean_rho_py=defaultdict(list) \n \n self.mean_field_comps=defaultdict(list) \n self.mean_field_comps_py=defaultdict(list) \n \n self.mean_fieldOverRho = defaultdict(list) \n self.mean_fieldOverRhoCheck = defaultdict(list) \n self.mean_fieldOverRhoLogged = defaultdict(list)\n \n self.angle_mean = defaultdict(list)\n self.mean_cv=defaultdict(list)\n self.alphaSum = defaultdict(list)\n self.alphaProd = defaultdict(list)\n\n self.cores_used=[] \n\n self.alpharr1 = np.empty([0],dtype=float)\n self.alpharr2 = np.empty([0],dtype=float)\n self.alpharr3 = np.empty([0],dtype=float)\n self.alpharr1_ad = np.empty([0],dtype=float)\n self.alpharr2_ad = np.empty([0],dtype=float)\n self.alpharr3_ad = np.empty([0],dtype=float)\n\n self.pearr1 = np.empty([0],dtype=float)\n self.pearr2 = np.empty([0],dtype=float)\n self.pearr3 = np.empty([0],dtype=float)\n\n \n def labelled(ax,xscale=None,yscale=None,xlabel=None,ylabel=None,\\\n xlim=None,ylim=None,title=None,linthreshx=0.1,linthreshy=0.1): \n if xscale and yscale != None:\n ax.set_xscale(xscale)\n ax.set_yscale(yscale)\n if xscale == 'symlog':\n ax.set_xscale(xscale,linthreshx=linthreshx)\n if yscale == 'symlog':\n ax.set_yscale(yscale,linthreshy=linthreshy)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel) \n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n ax.set_title(title)\n\n def run(self,core_list=None):\n dx=1./2048\n nx = 1./dx\n thtr = self.this_looper.tr\n all_cores = np.unique(thtr.core_ids)\n if core_list is None:\n core_list = all_cores\n\n print('core_list',len(core_list))\n thtr.sort_time()\n tsorted = thtr.times\n self.core_list=core_list #initiated?\n \n for core_id in core_list:\n ms = trackage.mini_scrubber(thtr,core_id)\n self.ms = ms #initiated?\n\n #print('go ', core_id)\n self.cores_used.append(core_id)\n self.times = thtr.times\n asort = np.argsort(self.times) #added\n\n for nf,frame in enumerate(thtr.frames): \n mask = ms.compute_unique_mask(core_id, dx=1./2048,frame=nf)\n\n density = thtr.c([core_id],'density')[mask,nf]\n cell_volume = thtr.c([core_id],'cell_volume')[mask,nf]\n\n #magfield = thtr.c([core_id],'magnetic_field_stregnth')[mask,nf] #ERROR, CHECK[ip]\n bx = thtr.c([core_id],'magnetic_field_x')[mask,nf]\n by = thtr.c([core_id],'magnetic_field_y')[mask,nf]\n bz = thtr.c([core_id],'magnetic_field_z')[mask,nf] \n bb = np.sqrt(bx*bx+by*by+bz*bz) \n\n # \n bx_o = thtr.c([core_id],'magnetic_field_x')[mask,0]\n by_o = thtr.c([core_id],'magnetic_field_y')[mask,0]\n bz_o = thtr.c([core_id],'magnetic_field_z')[mask,0]\n bb_o = np.sqrt(bx_o*bx_o + by_o*by_o + bz_o*bz_o)\n \n BtdotBo = bx*bx_o + by*by_o + bz*bz_o \n costheta = BtdotBo/(bb*bb_o)\n mean_cos_theta = (density*cell_volume*costheta).sum()/(density*cell_volume).sum()\n\n # EXPLORING THE ADDITIVE VS MULTIPLICATIVE INTEGRALS \n bbsum = bb.sum()\n rhosum = density.sum()\n bblog = np.log(bb)\n rholog = np.log(density)\n\n fig,ax=plt.subplots(1,1)\n alpha1 = np.log(bbsum)/np.log(rhosum) \n alpha2 = bblog.sum()/rholog.sum()\n \n \n # EXPLORING OTHER B over RHO COMBINATIONS\n BRho = bb/density \n density_log = np.log10(density)\n field_log = np.log10(bb)\n BRho_log = np.log10(BRho) \n BovRho = density_log/field_log #for lnB/lnRho\n\n self.angle_mean[core_id].append(mean_cos_theta) \n self.mean_field_comps[core_id].append((bb * cell_volume).sum()/cell_volume.sum())\n #self.mean_field_comps_py[core_id].append(bb.mean()) \n self.mean_rho[core_id].append((density * cell_volume).sum()/(cell_volume.sum())) \n #self.mean_rho_py[core_id].append(density.mean()) \n self.mean_cv[core_id].append((cell_volume * cell_volume).sum()/(cell_volume.sum())) #DOES THIS MAKE SENSE TO DO... \n\n self.mean_fieldOverRho[core_id].append((BRho * cell_volume).sum()/(cell_volume.sum()))\n self.mean_fieldOverRhoCheck[core_id].append((BRho_log * cell_volume).sum()/(cell_volume.sum()))\n self.mean_fieldOverRhoLogged[core_id].append((BovRho * cell_volume).sum()/(cell_volume.sum()))\n\n self.alphaSum[core_id].append(alpha1)\n self.alphaProd[core_id].append(alpha2)\n\n\n def profiles(self,name=None):\n thtr = self.this_looper.tr \n all_cores = np.unique(thtr.core_ids)\n core_list = all_cores\n\n for nf,theframe in enumerate(thtr.frames): \n print('name ',name)\n fig,ax=plt.subplots(1,1)\n\n ds = self.this_looper.load(frame=theframe) #derived=[em.add_tracer_density]), was testing old method\n #em.add_tracer_density(ds)\n ad = ds.all_data()\n #deposit_tuple = (\"deposit\",\"target_particle_volume\") \n\n #all_target_indices = np.concatenate([self.this_looper.target_indices[core_id] for core_id in core_list])\n #ad.set_field_parameter('target_indices',all_target_indices)\n #ad.set_field_parameter('mask_to_get',np.zeros_like(all_target_indices,dtype='int32')) \n\n if 0: #ALL DATA\n prof_ad = yt.create_profile(ad,bin_fields=['density'],fields=['magnetic_field_strength'],weight_field='cell_volume', override_bins=None)\n\n xbins = prof_ad.x_bins\n bin_center = 0.5*(xbins[1:]+xbins[:-1])\n pdf = prof_ad['magnetic_field_strength']\n\n the_xx = bin_center\n the_x = np.log10(ad['density'].v) \n the_yy = pdf\n the_y = np.log10(ad['magnetic_field_strength'].v)\n\n ax.plot(the_xx,the_yy,c='k',linewidth=1.0)\n \n # PLOT THE POWER LAW \n pfit = np.polyfit(the_x,the_y,1)\n alpha = pfit[0]\n if name == 'u401':\n self.alpharr1_ad = np.append(self.alpharr1_ad,alpha)\n if name == 'u402':\n self.alpharr2_ad = np.append(self.alpharr2_ad,alpha)\n if name == 'u403':\n self.alpharr3_ad = np.append(self.alpharr3_ad,alpha) \n Bavg_o = pfit[1]\n\n X = np.linspace(the_x.min(),the_x.max(),num=len(the_x)) #short: -2, +3 \n XX = 10 ** X\n Y = 10 ** (pfit[0]*X + pfit[1]) \n #ax.plot(XX,Y,c='b',linewidth=1.0)\n #ax.set_title(r'$\\alpha = %.3f$'%alpha) \n \n\n if 1: #ALL CORE DATA \n # the YT way:\n #prof_cores = yt.create_profile(ad,bin_fields=['density'],fields=['magnetic_field_strength'],weight_field=deposit_tuple, override_bins=None)\n #xbins = prof_cores.x_bins\n #bin_center = 0.5*(xbins[1:]+xbins[:-1])\n #pdf = prof_cores['magnetic_field_strength']\n #the_xx = bin_center\n #the_yy = pdf\n #ax.plot(the_xx,the_yy,c='k',linewidth=1.0,linestyle='dashed')\n\n # fields:\n Cellvolume_all_cores_all_time = thtr.track_dict['cell_volume'] # shape: (particles,14) for u401\n CellVolume = Cellvolume_all_cores_all_time[:,nf] # shape: (particles,) for u401 \n\n Rho_all_cores_all_time = thtr.track_dict['density'] \n Density = Rho_all_cores_all_time[:,nf]\n\n Magfield_all_cores_all_time = thtr.track_dict['magnetic_field_strength']\n Magfield = Magfield_all_cores_all_time[:,nf]\n \n #BovRho_all_cores_all_time = Magfield_all_cores_all_time/Rho_all_cores_all_time \n #BovRho = BovRho_all_cores_all_time[:,nf]\n BovRho = Magfield/Density #SIMPLY\n\n # 2D histograms:\n cellvolume_bins = np.geomspace(1e-11,1e-6) \n density_bins = np.geomspace(1e-3,1e8) #pdfs on overleaf\n magfield_bins = np.geomspace(1e-1,1e4)\n \n\n# maybe there's a better way to do this, but perhaps this is the way to weight all our fields with cell volume\n# these pdfs would correspond to alpha fit then average since it involves the particles of all cores at the same time\n# (the black dot which essentially corresponds to the circled blue dot)\n #theArray, binsX, binsY = np.histogram2d(Density, CellVolume, bins=[density_bins,cellvolume_bins],density=True)\n theArray, binsX = np.histogram(BovRho,bins=density_bins, weights=CellVolume, density =True)\n \n # BinsX are the edges, center it by averaging:\n bin_centers = 0.5*(binsX[1:]+binsX[:-1])\n bin_widths = binsX[1:]-binsX[:-1]\n #why do we need these next two lines again?\n bin_centers.shape = bin_centers.size, 1 \n bin_widths.shape = bin_widths.size, 1 \n\n the_cxx = bin_centers \n the_cyy = theArray\n \n # attempts for 2D histograms:\n #the_cyy = theArray.sum(axis=1) \n #the_cyy = (bin_centers * bin_widths * theArray).sum(axis=1) \n #the_cyy = (bin_widths * theArray).sum(axis=1) \n\n # it should make sense:\n #whatsthis = the_cxx * the_cyy * bin_widths\n #print('one or not',whatsthis)\n #isitone = (the_cyy * bin_widths).sum() #THINK!! or ask.. :/\n #print('one or not',isitone)\n\n ax.plot(the_cxx,the_cyy,c='k',linewidth=1.0)#, linestyle='dashed') \n #ax.set_xlim(1e-3,1e7) #write in axbonk\n ax.set_ylim(1e-10,1e1) #write in axbonk \n \n if 0:\n # FOR HEAT MAP PURPOSES\n #fig2,ax2 = plt.subplots(1,1) \n xbins = 0.5*(binsX[1:]+binsX[:-1])\n ybins = 0.5*(binsY[1:]+binsY[:-1])\n nx = len(xbins) ; ny=len(ybins)\n # 2d array of xbins\n TheX = np.r_[(ny)*[xbins]].transpose()\n TheY = np.r_[(nx)*[ybins]]\n \n\n # all other heat maps have been for time in the x axis, something like this has been done:\n # but here have our theArray for one time frame...and we want all time frames, let's see...\n\n #hist = np.zeros([xbins.size,ybins.size])\n #for ntime, time in enumerate(these_times):\n # thishist,bins = np.histogram(masses[ntime,:],bins=mass_bins_edge)\n # hist[ntime,:]=thishist\n #print('len_hist ',hist.shape) #49,49\n #print('len_hist[nf,:] ',hist[nf,:].shape) #49\n #print('len_theArray ',theArray.shape) #49,49\n\n cmap = copy.copy(mpl.cm.get_cmap(\"viridis\"))\n cmap.set_under('w')\n minmin = theArray[theArray>0].min()\n norm = mpl.colors.LogNorm(vmin=minmin,vmax=theArray.max())\n ploot=ax.pcolormesh(TheX, TheY, theArray, cmap=cmap,norm=norm,shading='nearest') # interpolation ~ shading\n\n axbonk(ax,xscale='log',yscale='log') \n #ax.imshow(theArray,origin='lower',interpolation='nearest') # imshow ~ plotting in Python\n outname = 'PDF_BovRho_%s_%s'%(theframe,name) \n fig.savefig(outname)\n print('save ',outname)\n plt.close(fig)\n \n #axbonk(ax,xlabel=r'$\\rho/\\rho_{o}$',ylabel=r'$|B|(\\mu g)$',xscale='log',yscale='log') \n #outname = 'Brho_profs_%s_%s'%(theframe,name) \n #fig.savefig(outname)\n #print(outname)\n #plt.close(fig)\n\n\n\n#import three_loopers_mountain_top as TLM\nimport three_loopers_tenfour as TLTF\nif 'clobber' not in dir():\n clobber=True\n\nif 'mag_den1' not in dir() or clobber:\n mag_den1=magfield_density_tool(TLTF.loops['u401'])\n simname1 = 'u401'\n mag_den1.run()\n #mag_den1.profiles(simname1)\n\nif 'mag_den2' not in dir() or clobber:\n mag_den2=magfield_density_tool(TLTF.loops['u402'])\n simname2 = 'u402'\n mag_den2.run()\n #mag_den2.profiles(simname2)\n\nif 'mag_den3' not in dir() or clobber:\n mag_den3=magfield_density_tool(TLTF.loops['u403'])\n simname3 = 'u403'\n mag_den3.run()\n #mag_den3.profiles(simname3)\nsimnames = [simname1, simname2, simname3]\n\n\n# UNCOMMENT IF OVERLAYING ALL PLOTS\n#import alphaFieldDensity as afd\nif 1:\n # TO PLOT FIGUERS OF ALL THREE SIMS AT ONCE\n #fig0, ax0=plt.subplots(1,1) \n\n for nt,tool in enumerate([mag_den1,mag_den2,mag_den3]):\n # SET UP THE VARIABLE\n G=1620./(4*np.pi)\n tff_global = np.sqrt(3*np.pi/(32*G*1))\n ncores = len(tool.cores_used)\n ntimes = tool.times.size\n these_times = tool.times/tff_global \n #print('tffs!',these_times) #check the TIME \n\n rhos = np.zeros([ntimes,ncores]) \n fields = np.zeros([ntimes,ncores])\n fieldOvers = np.zeros([ntimes,ncores])\n fieldOversCheck = np.zeros([ntimes,ncores])\n fieldOversLog = np.zeros([ntimes,ncores])\n\n angles = np.zeros([ntimes,ncores])\n cvs = np.zeros([ntimes,ncores])\n alphaS = np.zeros([ntimes,ncores])\n alphaP = np.zeros([ntimes,ncores])\n\n\n # OPEN UP ALL THE FIGURES AND AXIS \n # SINGLE PANEL \n if 0:\n fig, ax1=plt.subplots(1,1) \n\n # OR MULTIPLE PANEL\n if 0:\n fig = plt.figure()\n #fig.text(0.365,0.03,r'$\\rho/\\rho_{o}$')\n\n ax1 = plt.subplot(331)\n ax2 = plt.subplot(332)\n ax3 = plt.subplot(333) #ADDED \n ax4 = plt.subplot(334)\n ax5 = plt.subplot(335)\n ax6 = plt.subplot(336) #ADDED \n ax7 = plt.subplot(337)\n ax8 = plt.subplot(338)\n ax9 = plt.subplot(339) #ADDED \n\n fig.subplots_adjust(wspace=0, hspace=0)\n\n if nt == 0:\n frames = [1,2,3,5,6,7,8,10,12]\n lplots = [0,ax1,ax2,ax3,0,ax4,ax5,ax6,ax7,0,ax8,0,ax9,0] \n if nt == 1:\n frames = [1,2,3,4,5,6,7,9,11]\n lplots = [0,ax1,ax2,ax3,ax4,ax5,ax6,ax7,0,ax8,0,ax9,0] \n if nt == 2:\n frames = [1,2,3,4,5,6,7,9,10]\n lplots = [0,ax1,ax2,ax3,ax4,ax5,ax6,ax7,0,ax8,ax9,0] \n\n\n if 0: # IF DOING vs BY FRAME, SINGLE PANEL\n fig2,ax2 = plt.subplots(1,1) \n fig3,ax3 = plt.subplots(1,1) \n fig4,ax4 = plt.subplots(1,1) \n fig5,ax5 = plt.subplots(1,1) \n fig6,ax6 = plt.subplots(1,1) \n fig7,ax7 = plt.subplots(1,1) \n fig8,ax8 = plt.subplots(1,1) \n fig9,ax9 = plt.subplots(1,1) \n fig10,ax10 = plt.subplots(1,1) \n fig11,ax11 = plt.subplots(1,1) \n fig12,ax12 = plt.subplots(1,1) \n fig13,ax13 = plt.subplots(1,1)\n fig14,ax14 = plt.subplots(1,1)\n fig15,ax15 = plt.subplots(1,1)\n \n if nt == 0:\n axplts = [ax2,ax3,ax4,ax5,ax6,ax7,ax8,ax9,ax10,ax11,ax12,ax13,ax14,ax15]\n figs = [fig2,fig3,fig4,fig5,fig6,fig7,fig8,fig9,fig10,fig11,fig12,fig13,fig14,fig15]\n if nt == 1: \n axplts = [ax2,ax3,ax4,ax5,ax6,ax7,ax8,ax9,ax10,ax11,ax12,ax13,ax14]\n figs = [fig2,fig3,fig4,fig5,fig6,fig7,fig8,fig9,fig10,fig11,fig12,fig13,fig14]\n if nt == 2: \n axplts = [ax2,ax3,ax4,ax5,ax6,ax7,ax8,ax9,ax10,ax11,ax12,ax13]\n figs = [fig2,fig3,fig4,fig5,fig6,fig7,fig8,fig9,fig10,fig11,fig12,fig13]\n \n\n the_v = np.empty([0],dtype=float)\n the_w = np.empty([0],dtype=float)\n the_x = np.empty([0],dtype=float)\n the_sxx = np.empty([0],dtype=float)\n the_a = np.empty([0],dtype=float)\n the_y = np.empty([0],dtype=float)\n the_syy = np.empty([0],dtype=float)\n the_b = np.empty([0],dtype=float)\n the_z = np.empty([0],dtype=float) \n the_cv = np.empty([0],dtype=float) \n\n # MAKE THE FIELDS INTO A 2D ARRAY WE CAN PLOT\n for ncore,core_id in enumerate(tool.cores_used):\n this_rho = tool.mean_rho[core_id] \n this_field = tool.mean_field_comps[core_id] \n this_BRho = tool.mean_fieldOverRho[core_id]\n this_BRhoCheck = tool.mean_fieldOverRhoCheck[core_id]\n this_BRhoLog = tool.mean_fieldOverRhoLogged[core_id] \n this_ang = tool.angle_mean[core_id]\n this_cv = tool.mean_cv[core_id]\n \n this_alphaS = tool.alphaSum[core_id]\n this_alphaP = tool.alphaProd[core_id]\n\n rhos[:,ncore]= this_rho # was np.log10()\n fields[:,ncore]= this_field\n fieldOvers[:,ncore]= this_BRho\n fieldOversCheck[:,ncore]= this_BRhoCheck\n fieldOversLog[:,ncore] = this_BRhoLog\n angles[:,ncore]= np.arccos(this_ang)*180/np.pi\n cvs[:,ncore]= this_cv\n\n # is this redundant?\n alphaS[:,ncore] = this_alphaS\n alphaP[:,ncore] = this_alphaP\n this_alphaS = alphaS[:,ncore]\n this_alphaP = alphaP[:,ncore]\n\n this_rho = rhos[:,ncore] \n the_xx = np.log10(this_rho)\n the_aa = this_rho\n the_x= np.append(the_x,the_xx) \n the_a= np.append(the_a,the_aa) \n \n this_field = fields[:,ncore]\n the_yy = np.log10(this_field)\n the_bb = this_field\n the_y= np.append(the_y,the_yy) \n the_b= np.append(the_b,the_bb) \n \n the_fieldOvRho = this_field/this_rho\n the_zz = np.log10(the_fieldOvRho)\n the_z = np.append(the_z,the_zz)\n \n # other combinations...\n this_BRho = fieldOvers[:,ncore]\n the_vv = np.log10(this_BRho)\n the_v= np.append(the_w,the_vv) \n this_BRhoCheck = fieldOversCheck[:,ncore]\n this_BRhoLog = fieldOversLog[:,ncore]\n\n this_ang=angles[:,ncore]\n # TRY\n this_cv = cvs[:,ncore]\n the_cv=np.append(the_cv,this_cv)\n \n\n if 0: #for all extents I should combine these next ones with the extents in main definition\n rho_extents=davetools.extents()\n rho_extents(the_a)\n magfield_extents = davetools.extents()\n magfield_extents(the_b)\n\n if 0: # FOR ALL TIME\n tmap = rainbow_map(len(this_rho[:-1]))\n ctr = [tmap(n) for n in range(len(this_rho[:-1]))]\n ax1.scatter(this_rho[:-1], this_field[:-1],c=ctr,marker='*') \n #ax1.scatter(this_rho[0], this_field[0],c='b',marker='*') \n #ax1.scatter(this_rho[-1], this_field[-1],c='r',marker='*') \n #ax1.plot(this_rho,this_field,c=[0.5]*4)\n\n if 0: # FOR ONE FRAME PER TIME; SINGLE PANEL \n xlims = 0.4,1.8\n ylims = 0.0,5.0\n for i in range(len(axplts)):\n if 1:\n if nt == 0:\n color = 'r'\n if nt == 1:\n color = 'b'\n if nt == 2:\n color = 'g'\n axplts[i].scatter(this_alphaS[i],this_alphaP[i],c=color,marker='*')\n outname_frame='AlphaSP_%s_%d'%(simnames[nt],i)\n magfield_density_tool.labelled(axplts[i],xscale=None,yscale=None,xlabel='Sum',ylabel='Product',\\\n title=None, xlim=xlims,ylim=ylims)\n if 0:\n axplts[i].scatter(this_rho[i],this_field[i],c='g',marker='*')\n axplts[i].scatter(this_rho[i],the_zz[i],c='g',alpha=0.2)\n magfield_density_tool.labelled(axplts[i],xscale=None,yscale=None,xlabel=r'$\\rho$',ylabel=r'$log(B/ \\rho)$',\\\n title=None, xlim=None,ylim=None)\n outname_frame='Scatter_LogBRhovsRho_%s_%d'%(simnames[nt],i)\n figs[i].savefig(outname_frame)\n #print(\"saved\")\n \n if 0: # FOR ONE FRAME PER TIME; MULTIPLE PANEL ..in progress: I think it will be best if I end up moving this to the next if statement.\n tmap2 = rainbow_map(len(this_rho))\n c2 = [tmap2(n) for n in range(len(this_rho))] \n for i in range(len(this_rho)):\n if i in frames: \n lplots[i].scatter(this_rho[i],this_field[i],c=c2[i],marker='*') #C2 gives me lots of notes, but it works... \n #lplots[n_time].plot(xx2,yy,c='k',linewidth=1.0) #SEE BELOW\n magfield_density_tool.labelled(lplots[i],xscale='log',yscale='log',xlabel=None,ylabel=None,\\\n title=None, xlim=rho_extents.minmax,ylim=magfield_extents.minmax)\n ax2.tick_params(axis='y',labelleft=False)\n ax3.tick_params(axis='y',labelleft=False)\n ax4.set_ylabel(r'$\\left\\langle\\mid B \\mid\\right\\rangle (\\mu G)$')\n ax5.tick_params(axis='y',labelleft=False)\n ax6.tick_params(axis='y',labelleft=False)\n ax8.tick_params(axis='y',labelleft=False)\n ax8.set_xlabel(r'$\\left\\langle \\rho/\\rho_{o} \\right\\rangle$')\n ax9.tick_params(axis='y',labelleft=False)\n if 0:\n plt.close(figs[i])\n\n the_w = the_y/the_x\n if 1:\n # PLOT THE POWER LAW: per frame \n numcores = len(the_x)/ncores \n coreint = int(numcores)\n for i in range(coreint): \n #print('i in range(coreint)',i) \n #print('coreint',coreint) \n the_sx = the_x[i::coreint]\n the_sy = the_y[i::coreint]\n the_sz = the_z[i::coreint] \n the_sw = the_w[i::coreint] \n the_cvs = the_cv[i::coreint]\n\n # for power law for all time minus the last frame \n minusone = coreint-1\n if i < minusone: \n the_sxx= np.append(the_sxx,the_sx) \n the_syy= np.append(the_syy,the_sy) \n\n sX = np.linspace(the_sx.min(),the_sx.max(),num=len(the_sx)) #short: -2, +3 \n\n spfit = np.polyfit(the_sx,the_sy,1)\n salpha = spfit[0]\n sBavg_o = spfit[1]\n \n # pearsonR\n xs = np.std(the_sx)\n ys = np.std(the_sy)\n if xs != 0 and ys != 0:\n pearX,pearY = scipy.stats.pearsonr(the_sx,the_sy)\n #print('pearX',pearX)\n else:\n print(\"A zero encountered!!\",xs,ys)\n \n if nt == 0:\n mag_den1.alpharr1 = np.append(mag_den1.alpharr1,salpha) \n mag_den1.pearr1 = np.append(mag_den1.pearr1,pearX) \n if nt == 1:\n mag_den2.alpharr2 = np.append(mag_den2.alpharr2,salpha) \n mag_den2.pearr2 = np.append(mag_den2.pearr2,pearX) \n if nt == 2:\n mag_den3.alpharr3 = np.append(mag_den3.alpharr3,salpha) \n mag_den3.pearr3 = np.append(mag_den3.pearr3,pearX) \n \n sXX = 10 ** sX \n sY = 10 ** (spfit[0]*sX + spfit[1]) \n \n # PER PANEL\n # need x,y limits\n if 0:\n axplts[i].plot(sXX,sY,c='k',linewidth=1.0)\n magfield_density_tool.labelled(axplts[i],xscale='log',yscale='log',xlabel=r'$<\\rho>$',ylabel=r'$$',\\\n xlim=xlims, ylim=ylims,title=r'$\\alpha = %.3f$'%salpha) \n \n outname_frame='BnFrameTracks_pl_%s_%d'%(simnames[nt],i)\n #figs[i].savefig(outname_frame)\n print(\"saved \",i)\n plt.close('all')\n\n # MULTIPLE PANELS:\n if 0: \n if i in frames:\n lplots[i].plot(sXX,sY,c='k',linewidth=1.0)\n\n # FOR HISTOGRAMS\n if 0: \n if nt == 0:\n color = 'r'\n if nt == 1:\n color = 'b'\n if nt == 2:\n color = 'g'\n\n # the_zmin -3.3661703693691116 the_zmax 1.0872747818424593\n # the_wmin -73.79359150405679 the_wmax 82.79924734574196\n\n the_bins = np.linspace(-10,10)\n the_weights = None #the_cvs \n\n # THE LN(Y/X)\n the_lnArray, xbins = np.histogram(the_sz,bins=the_bins,weights=the_weights,density=True)\n bin_lncenters = 0.5*(xbins[1:]+xbins[:-1])\n the_lnX= bin_lncenters\n the_lnY= the_lnArray\n # THE LNY/LNX\n the_lnlnArray, xxbins = np.histogram(the_sw,bins=the_bins,weights=the_weights,density=True)\n bin_lnlncenters = 0.5*(xxbins[1:]+xxbins[:-1])\n the_lnlnX= bin_lnlncenters\n the_lnlnY= the_lnlnArray\n\n axplts[i].plot(the_lnX,the_lnY,c=color,linewidth=1.0)\n axplts[i].plot(the_lnlnX,the_lnlnY,c=color,linewidth=1.0,linestyle='dashed')\n \n #axplts[i].hist(the_sz, 50, density=False, histtype='step', color=color) #EDIT color to reflect the small dcc py file\n #axplts[i].hist(the_szz, 50, density=False, histtype='step', color=color,ls='--') #EDIT color to reflect the small dcc py file\n \n xlims = None\n ylims = None #check for limits\n magfield_density_tool.labelled(axplts[i],xscale=None,yscale=None,xlabel=r'$log B/ \\rho$, $logB/log\\rho$',ylabel=r'$PDF$',\\\n title=None, xlim=xlims, ylim=ylims)\n # ensuring percentage for the y axis \n y_vals = axplts[i].get_yticks()\n axplts[i].set_yticklabels(['{:.3f}'.format(x/len(the_lnX)) for x in y_vals])\n\n outname_frame='TwoHistograms_AvgQty%s_%d'%(simnames[nt],i)\n figs[i].savefig(outname_frame)\n print(\"saved\")\n \n\n # SAVE THE MULTIPLE PANELS WITH THEIR LEAST SQUARE FIT\n if 0: \n outname = 'brhotffpanels_avgs_%s'%(simnames[nt])\n plt.savefig(outname)\n print(\"saved\")\n\n if 0:\n alphaFile = open(\"alphaRecords.txt\",'a') \n #alphaFile.write(\"Sim %s Pears %f \\n\"%(simnames[nt],mag_den1.pearr1)) #how to write an array?\n print(mag_den1.pearr1)\n print(mag_den2.pearr2)\n print(mag_den3.pearr3)\n #alphaFile.write(\"Sim %s Alpha %f \\n\"%(simnames[nt],salpha))\n alphaFile.close()\n\n if 1:\n # PLOT THE POWER LAW: all frames \n pfit = np.polyfit(the_sxx,the_syy,1)\n alpha = pfit[0]\n Bavg_o = pfit[1]\n # AND THE PEARSON R\n allxs = np.std(the_sxx)\n allys = np.std(the_syy)\n if xs != 0 and ys != 0:\n pearX,pearY = scipy.stats.pearsonr(the_sx,the_sy)\n else:\n print(\"A zero encountered!!\",xs,ys)\n print('pearX_%s'%simnames[nt])\n print(pearX)\n\n X = np.linspace(the_sxx.min(),the_sxx.max(),num=len(the_sxx)) #short: -2, +3 \n XX = 10 ** X\n Y = 10 ** (pfit[0]*X + pfit[1]) \n \n if 0:\n ax1.plot(XX,Y,c='k',linewidth=1.0)\n xlabels = r'$\\left\\langle \\rho/\\rho_{o} \\right\\rangle$'\n ylabels = r'$\\left\\langle\\mid B \\mid\\right\\rangle$'\n xlims = 1e-1,1e8\n ylims = 1e0,1e4\n magfield_density_tool.labelled(ax1,xscale='log',yscale='log',xlabel=xlabels,ylabel=ylabels,\\\n xlim=xlims, ylim=ylims)#,title=r'$\\alpha = %.3f$'%alpha)\n\n outname_all='BnTracks_pl_mone_%s'%simnames[nt]\n fig.savefig(outname_all)\n print(\"saved\")\n\n\n # !!FOR LATER... #a tool for colors (tools/colors.py)!!! also look at make_core_cmap\n if 0: \n outname = 'alphaPerFrame' #swap order with below if for allCores\n outname = 'alphaPerFrame_allData' #and comment next one out\n outname = 'alphaRecords'\n outname = 'pearRecords'\n if nt == 0:\n therange = np.arange(0,1,0.075)\n #ax0.plot(therange, mag_den1.alpharr1_ad, c='g') \n #ax0.plot(therange, mag_den1.alpharr1, c='g',linestyle='dashed') \n ax0.plot(therange, mag_den1.pearr1, c='g',linestyle='dashed') \n if nt == 1:\n therange = np.arange(0,1,0.075) \n #mag_den2.alpharr2 = np.append(mag_den2.alpharr2,[np.nan]*1) \n #mag_den2.alpharr2_ad = np.append(mag_den2.alpharr2_ad,[np.nan]*1) \n mag_den2.pearr2 = np.append(mag_den2.pearr2,[np.nan]*1) \n #ax0.plot(therange, mag_den2.alpharr2_ad, c='b')\n #ax0.plot(therange, mag_den2.alpharr2, c='b',linestyle='dashed')\n ax0.plot(therange, mag_den2.pearr2, c='b',linestyle='dashed')\n if nt == 2:\n therange = np.arange(0,1,0.075) \n #mag_den3.alpharr3 = np.append(mag_den3.alpharr3,[np.nan]*2) \n #mag_den3.alpharr3_ad = np.append(mag_den3.alpharr3_ad,[np.nan]*2) \n mag_den3.pearr3 = np.append(mag_den3.pearr3,[np.nan]*2) \n #ax0.plot(therange, mag_den3.alpharr3_ad, c='m')\n #ax0.plot(therange, mag_den3.alpharr3, c='m',linestyle='dashed')\n ax0.plot(therange, mag_den3.pearr3, c='m',linestyle='dashed')\n \n ax0.set_xlabel(r'$t_{\\rm{ff}}$')\n #ax0.set_ylabel(r'$\\alpha_{B\\rho}$')\n ax0.set_ylabel(r'$R_{B\\rho}$')\n ax0.set_ylim(-.1,1.0)\n #mag_den3.labelled(ax0,xlabel=r'$t_{\\rm{ff}}$',ylabel=r'$R_{B\\rho}$',ylim=ylim,title=None) \n\n if 0:\n print(\"calling alphaFieldDensity.py\")\n afd.axisforbox(ax0) #plots the boxplot and returns\n\n fig0.savefig(outname) #saves all three plots \n print(outname,\" saved\")\n plt.close(fig0)\n\n\n","sub_path":"p66_brho/explorefigs/meanFieldDensityTracks_400.py","file_name":"meanFieldDensityTracks_400.py","file_ext":"py","file_size_in_byte":32800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"10929427","text":"import json\n\n\nclass Note:\n\n def __init__(self, **kwargs):\n # create an unknown set of private fields\n for key in kwargs.keys():\n k = \"_\" + key\n kwargs[k] = kwargs[key]\n del kwargs[key]\n\n self.__dict__.update(kwargs)\n\n @property\n def json(self):\n return json.dumps(self.data)\n\n @property\n def data(self):\n return {\n 'type': self.tipe,\n 'tag': self.tag,\n 'description': self.description,\n 'bug': self.bug,\n 'first_in': self.first_in,\n 'fixed_in_version': self.fixed_in_version,\n 'fixed_in_channel': self.fixed_in_channel\n }\n\n # tipe b/c type is taken\n @property\n def tipe(self):\n if (hasattr(self, '_tipe')):\n return self._tipe\n\n @property\n def description(self):\n if (hasattr(self, '_description')):\n return self._description\n\n @property\n def tag(self):\n if (hasattr(self, '_tag')):\n return self._tag\n else:\n return self.tipe.upper()\n\n @property\n def bug(self):\n if (hasattr(self, '_bug')):\n return self._bug\n\n @property\n def first_in(self):\n if (hasattr(self, '_first_in')):\n return self._first_in\n\n @property\n def fixed_in_version(self):\n if (hasattr(self, '_fixed_in_version')):\n return self._fixed_in_version\n\n @property\n def fixed_in_channel(self):\n if (hasattr(self, '_fixed_in_channel')):\n return self._fixed_in_channel\n","sub_path":"models/note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61731132","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 28 00:44:25 2021\r\n\r\n@author: chakati\r\n\"\"\"\r\nimport cv2\r\nimport os\r\nimport tensorflow as tf\r\nimport frameextractor as fe\r\nimport handshape_feature_extractor as hfe\r\nimport csv\r\nimport re as regex\r\n\r\n\r\nclass GestureDetail:\r\n def __init__(self, gesture_key, gesture_name, output_label):\r\n self.gesture_key = gesture_key\r\n self.gesture_name = gesture_name\r\n self.output_label = output_label\r\n\r\n\r\nclass GestureFeature:\r\n def __init__(self, gesture_detail: GestureDetail, extracted_feature):\r\n self.gesture_detail = gesture_detail\r\n self.extracted_feature = extracted_feature\r\n\r\n\r\ndef extract_feature(location, input_file, mid_frame_counter):\r\n middle_image = cv2.imread(fe.frameExtractor(location + input_file, location + \"frames/\", mid_frame_counter),\r\n cv2.IMREAD_GRAYSCALE)\r\n response = hfe.HandShapeFeatureExtractor.extract_feature(hfe.HandShapeFeatureExtractor.get_instance(),\r\n middle_image)\r\n return response\r\n\r\n\r\ndef decide_gesture_by_file_name(gesture_file_name):\r\n for x in gesture_details:\r\n if x.gesture_key == gesture_file_name.split('_')[0]:\r\n return x\r\n return None\r\n\r\n\r\ndef decide_gesture_by_name(lookup_gesture_name):\r\n for x in gesture_details:\r\n if x.gesture_name.replace(\" \", \"\").lower() == lookup_gesture_name.lower():\r\n return x\r\n return None\r\n\r\n\r\ndef validate_mutate_recognition(gesture_file_name, extracted_feature_vector, calc_gesture_detail: GestureDetail):\r\n actual_gesture = regex.search('-H-(.*?).mp4', gesture_file_name)\r\n\r\n if actual_gesture is None:\r\n actual_gesture = gesture_file_name.split('_')[0]\r\n add_to_vector = False\r\n else:\r\n actual_gesture = actual_gesture.group(1)\r\n add_to_vector = True\r\n\r\n if calc_gesture_detail.gesture_name == actual_gesture or calc_gesture_detail.gesture_key == actual_gesture:\r\n if add_to_vector:\r\n featureVectorList.append(GestureFeature(calc_gesture_detail, extracted_feature_vector))\r\n else:\r\n print(\"mutating vector set for gesture: \" + actual_gesture + \" for gesture file: \" + gesture_file_name)\r\n actual_gesture_detail = decide_gesture_by_name(actual_gesture)\r\n if actual_gesture_detail is not None:\r\n featureVectorList.append(GestureFeature(actual_gesture_detail, extracted_feature_vector))\r\n else:\r\n print(\r\n \"Gesture detail not decoded for gesture: \" + actual_gesture + \" for gesture file: \" + gesture_file_name)\r\n return True\r\n return False\r\n\r\n\r\n# =============================================================================\r\n# Recognize the gesture (use cosine similarity for comparing the vectors)\r\n# =============================================================================\r\ndef determine_gesture(gesture_location, gesture_file_name, mid_frame_counter):\r\n video_feature = extract_feature(gesture_location, gesture_file_name, mid_frame_counter)\r\n\r\n re_run = True\r\n max_mutations = 0\r\n gesture_detail: GestureDetail = GestureDetail(\"\", \"\", \"\")\r\n while re_run and max_mutations < 5:\r\n cos_sin = 1\r\n position = 0\r\n cursor = 0\r\n for featureVector in featureVectorList:\r\n calc_cos_sin = tf.keras.losses.cosine_similarity(\r\n video_feature,\r\n featureVector.extracted_feature,\r\n axis=-1\r\n )\r\n if calc_cos_sin < cos_sin:\r\n cos_sin = calc_cos_sin\r\n position = cursor\r\n cursor = cursor + 1\r\n\r\n gesture_detail = featureVectorList[position].gesture_detail\r\n print(gesture_file_name + \" calculated gesture \" + gesture_detail.gesture_name)\r\n # re_run = validate_mutate_recognition(gesture_file_name, video_feature, gesture_detail)\r\n re_run = False\r\n if re_run:\r\n max_mutations = max_mutations + 1\r\n return gesture_detail\r\n\r\n\r\ngesture_details = [GestureDetail(\"Num0\", \"0\", \"0\"), GestureDetail(\"Num1\", \"1\", \"1\"),\r\n GestureDetail(\"Num2\", \"2\", \"2\"), GestureDetail(\"Num3\", \"3\", \"3\"),\r\n GestureDetail(\"Num4\", \"4\", \"4\"), GestureDetail(\"Num5\", \"5\", \"5\"),\r\n GestureDetail(\"Num6\", \"6\", \"6\"), GestureDetail(\"Num7\", \"7\", \"7\"),\r\n GestureDetail(\"Num8\", \"8\", \"8\"), GestureDetail(\"Num9\", \"9\", \"9\"),\r\n GestureDetail(\"FanDown\", \"Decrease Fan Speed\", \"10\"),\r\n GestureDetail(\"FanOn\", \"FanOn\", \"11\"), GestureDetail(\"FanOff\", \"FanOff\", \"12\"),\r\n GestureDetail(\"FanUp\", \"Increase Fan Speed\", \"13\"),\r\n GestureDetail(\"LightOff\", \"LightOff\", \"14\"), GestureDetail(\"LightOn\", \"LightOn\", \"15\"),\r\n GestureDetail(\"SetThermo\", \"SetThermo\", \"16\")\r\n ]\r\n\r\n# =============================================================================\r\n# Get the penultimate layer for training data\r\n# =============================================================================\r\n\r\nfeatureVectorList = []\r\npath_to_train_data = \"traindata/\"\r\ncount = 0\r\nfor file in os.listdir(path_to_train_data):\r\n if not file.startswith('.') and not file.startswith('frames') and not file.startswith('results'):\r\n featureVectorList.append(GestureFeature(decide_gesture_by_file_name(file),\r\n extract_feature(path_to_train_data, file, count)))\r\n count = count + 1\r\n\r\n# =============================================================================\r\n# Get the penultimate layer for test data\r\n# =============================================================================\r\nvideo_locations = [\"test/\"]\r\ntest_count = 0\r\nfor video_location in video_locations:\r\n with open('results.csv', 'w', newline='') as results_file:\r\n fieldnames = [\r\n 'Gesture_Video_File_Name', 'Gesture_Name',\r\n 'Output_Label']\r\n train_data_writer = csv.DictWriter(results_file, fieldnames=fieldnames)\r\n train_data_writer.writeheader()\r\n\r\n for test_file in os.listdir(video_location):\r\n if not test_file.startswith('.') and not test_file.startswith('frames') \\\r\n and not test_file.startswith('results'):\r\n recognized_gesture_detail = determine_gesture(video_location, test_file, test_count)\r\n test_count = test_count + 1\r\n\r\n train_data_writer.writerow({\r\n 'Gesture_Video_File_Name': test_file,\r\n 'Gesture_Name': recognized_gesture_detail.gesture_name,\r\n 'Output_Label': recognized_gesture_detail.output_label})\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"441042368","text":"import sys\n# sys.argv is the same as \"from sys import argv\".\nscript, encoding, error = sys.argv\n\n# this function takes 3 parameters.\ndef main(language_file, encoding, errors):\n # line is initilize with a parameter with the attached function that reads one line of a txt file.\n line = language_file.readline()\n # if there is something in line the run following code.\n if line:\n # Run this function if line is true. Outside function.\n print_line(line, encoding, errors)\n # calling the function inside itself and return it.\n return main(language_file, encoding, errors)\n\n# this function takes 3 parameters.\ndef print_line(line, encoding, errors):\n # strips any new lines\n new_lang = line.strip()\n # encoding strings into bytes.\n raw_bytes = new_lang.encode(encoding, errors=errors)\n # decoding bytes into strings\n cooked_string = raw_bytes.decode(encoding, errors=errors)\n # each variable's value is printed out.\n print(raw_bytes, \"<===>\", cooked_string)\n\n# variable is initilize with a text file and how it should be encoded.\nlanguages =open(\"languages.txt\", encoding=\"utf-8\")\n# function is called.\nmain(languages, encoding, error)\n","sub_path":"ex23.py","file_name":"ex23.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"463667671","text":"\n\nimport sys, os\nsys.path.append(os.pardir)\nsys.path.append('../../')\nimport numpy as np\nfrom dataset.mnist import load_mnist\nfrom layers import *\nimport matplotlib.pylab as plt\n\n\n\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)\nnetwork = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\n\niters_num = 1000\n#iters_num = 5\n\n# iters_num = 100\n\ntrain_size = x_train.shape[0]\nbatch_size = 100\n#batch_size = 1\nlearning_rate = 0.1\n\ntrain_loss_list = []\ntrain_accuracy_list = []\ntest_accuracy_list = []\n\niter_per_epoch = max(train_size / batch_size, 1)\n\nfig, ax = plt.subplots(1, 1)\nlines, = ax.plot(0, 0)\n\n\nfor i in range(iters_num):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n\n grad = network.gradient(x_batch, t_batch)\n\n for key in ('W1', 'W2', 'b1', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n \n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n\n if True: # i % iter_per_epoch == 0:\n train_accuracy = network.accuracy(x_train, t_train)\n test_accuracy = network.accuracy(x_test, t_test)\n train_accuracy_list.append(train_accuracy)\n test_accuracy_list.append(test_accuracy)\n print(train_accuracy, test_accuracy)\n # plt.xlabel(\"i\")\n # plt.ylabel(\"accuracy\")\n # # import pdb; pdb.set_trace()\n # plt.plot(np.arange(i+1), test_accuracy_list)\n # plt.show()\n\n\n lines.set_data(np.arange(i+1), test_accuracy_list)\n # set_data()を使うと軸とかは自動設定されないっぽいので,\n # 今回の例だとあっという間にsinカーブが描画範囲からいなくなる.\n # そのためx軸の範囲は適宜修正してやる必要がある.\n ax.set_xlim((0, i))\n ax.set_ylim((0, 1))\n # 一番のポイント\n # - plt.show() ブロッキングされてリアルタイムに描写できない\n # - plt.ion() + plt.draw() グラフウインドウが固まってプログラムが止まるから使えない\n # ----> plt.pause(interval) これを使う!!! 引数はsleep時間\n plt.pause(.01)\n \nplt.savefig('result.png') \n","sub_path":"ch05/mine/trainlayer.py","file_name":"trainlayer.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"52643052","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport sys\n\nimport echolib\n\nfrom time import time\n\nif __name__ == \"__main__\":\n\n channel_in = sys.argv[1]\n channel_out = sys.argv[2]\n\n counter = 0\n\n def __callback(message):\n \n global counter\n counter += 1\n\n print(\"Msg %d: %s \" % (counter, echolib.MessageReader(message).readString()))\n \n\n loop = echolib.IOLoop()\n client = echolib.Client()\n loop.add_handler(client)\n\n\n subscriber = echolib.Subscriber(client, channel_in, u\"string\", __callback)\n publisher = echolib.Publisher(client, channel_out, u\"string\") \n\n t = time()\n\n while loop.wait(100):\n \n writer = echolib.MessageWriter()\n writer.writeString(\"Hello there\")\n\n print(\"Send\")\n\n publisher.send(writer)\n\n t = time()\n","sub_path":"tests/python2/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"133709876","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 11 20:24:46 2018\n\n@author: soubhikdeb\n\nThis code shows the exploration vs. exploitation\n\"\"\"\n\n\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import bernoulli\nimport random\n\n\nK = 10 # number of bandits\ntime_steps = 2000 # number of time steps\nbandit_problems = 2000 # number of k-armed bandit problems\n\nmu = 1\nsigma = 1\nreward_sigma = 2\n\n\n'''\ngreedy policy\n'''\navg_reward_greedy = np.zeros(time_steps) # necessary for making the plot\npercent_optimal_action_greedy = np.zeros(time_steps) # percentage of optimal actions \n\nfor problem_num in range(1, bandit_problems + 1):\n \n \n # determine actual mean for each bandit in each problem\n q_star = np.random.normal(mu,sigma,K)\n sample_q_star = np.zeros(10) # for storing the sample means\n occurance_bandit_selected = np.zeros(10) # to keep a tab on how many times each bandit has been selected\n \n optimal_action = np.argmax(q_star) # determning optimal action for this particular bandit\n \n \n # now we start each bandit problem\n for t in range(1, time_steps + 1):\n A_t = np.argmax(sample_q_star) # the greedy selection of best bandit\n R_t = np.random.normal(q_star[A_t],reward_sigma) # reward obtained on pulling the greedily selected bandit\n sample_q_star[A_t] = (sample_q_star[A_t]*occurance_bandit_selected[A_t] + R_t)/(occurance_bandit_selected[A_t] + 1) # new sample average \n occurance_bandit_selected[A_t] = occurance_bandit_selected[A_t] + 1 # update the number of occurance\n \n \n # update the avg_reward\n avg_reward_greedy[t-1] = (((problem_num - 1) * avg_reward_greedy[t-1]) + R_t)/problem_num \n \n # update of optimal selection tally\n if A_t == optimal_action:\n percent_optimal_action_greedy[t-1] = percent_optimal_action_greedy[t-1] + 1\n \n \npercent_optimal_action_greedy = (percent_optimal_action_greedy/bandit_problems)*100 \n \n\n\n\n\n\n\n\n\n'''\nepsilon greedy policy\n'''\navg_reward_epsilon_greedy = np.zeros([2,time_steps])\npercent_optimal_action_epsilon_greedy = np.zeros([2, time_steps])\nepsilon = [0.1, 0.01] # for exploration\n\n\nfor num in [0,1]:\n for problem_num in range(1, bandit_problems + 1):\n \n \n # determine actual mean for each bandit in each problem\n q_star = np.random.normal(mu,sigma,K)\n sample_q_star = np.zeros(10) # for storing the sample means\n occurance_bandit_selected = np.zeros(10) # to keep a tab on how many times each bandit has been selected\n \n optimal_action = np.argmax(q_star)\n \n # now we start each bandit problem\n temp = bernoulli.rvs(1 - epsilon[num], size = time_steps)\n for t in range(1, time_steps + 1):\n \n if temp[t-1] == 1:\n A_t = np.argmax(sample_q_star) # the greedy selection of best bandit\n else: \n A_t = random.sample(range(0,10),1) # uniform selection\n \n R_t = np.random.normal(q_star[A_t],reward_sigma) # reward obtained on pulling the greedily selected bandit\n sample_q_star[A_t] = (sample_q_star[A_t]*occurance_bandit_selected[A_t] + R_t)/(occurance_bandit_selected[A_t] + 1) # new sample average \n occurance_bandit_selected[A_t] = occurance_bandit_selected[A_t] + 1 # update the number of occurance\n \n \n # update the avg_reward\n avg_reward_epsilon_greedy[num][t-1] = (((problem_num - 1) * avg_reward_epsilon_greedy[num][t-1]) + R_t)/problem_num \n \n \n \n # update of optimal selection tally\n if A_t == optimal_action:\n percent_optimal_action_epsilon_greedy[num][t-1] = percent_optimal_action_epsilon_greedy[num][t-1] + 1\n \n \n\npercent_optimal_action_epsilon_greedy = (percent_optimal_action_epsilon_greedy/bandit_problems)*100 \n\n \n \n\nplt.plot(avg_reward_greedy, label = 'greedy') \nplt.plot(avg_reward_epsilon_greedy[0], label = 'epsilon = 0.1') \nplt.plot(avg_reward_epsilon_greedy[1], label = 'epsilon = 0.01') \nplt.xlabel('Steps')\nplt.ylabel('Average Reward')\n\nplt.legend()\nplt.show() \n\n\n\nplt.plot(percent_optimal_action_greedy, label = 'greedy') \nplt.plot(percent_optimal_action_epsilon_greedy[0], label = 'epsilon = 0.1') \nplt.plot(percent_optimal_action_epsilon_greedy[1], label = 'epsilon = 0.01') \nplt.xlabel('Steps')\nplt.ylabel('% optimal action')\n\nplt.legend()\nplt.show()\n\n","sub_path":"bandit_example.py","file_name":"bandit_example.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"562409762","text":"import difflib\nimport os\nimport pathlib\nimport shutil\nimport sys\n\nimport pytest\n\nimport abjad\n\ntest_path = pathlib.Path(__file__).parent\ntravis_build_dir = os.environ.get(\"TRAVIS_BUILD_DIR\")\nassert isinstance(travis_build_dir, str)\nwrapper = pathlib.Path(travis_build_dir)\nsegments_dir = wrapper / wrapper.name / \"Segments\"\nsegments = []\nfor path in sorted(segments_dir.iterdir()):\n if not path.is_dir():\n continue\n if path.name == \"__pycache__\":\n continue\n segments.append(path)\n\n\n@pytest.mark.parametrize(\"segment\", segments)\ndef test_segments_01(segment):\n ly = f\"{segment}/illustration.ly\"\n ly_old = f\"{segment}/illustration.old.ly\"\n if os.path.exists(ly):\n shutil.copyfile(ly, ly_old)\n exit_code = os.system(f\"python {segment}/definition.py\")\n if exit_code != 0:\n sys.exit(exit_code)\n if not os.path.exists(ly_old):\n return\n assert os.path.exists(ly)\n assert os.path.exists(ly_old)\n if not abjad.TestManager.compare_files(ly_old, ly):\n ly_old_text = open(ly_old, \"r\")\n ly_old_text = ly_old_text.read()\n ly_text = open(ly, \"r\")\n ly_text = ly_text.read()\n print(\"\".join(difflib.ndiff(ly_old_text, ly_text)))\n sys.exit(1)\n","sub_path":"test_segments.py","file_name":"test_segments.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"356029795","text":"'''As the problems get harder my code seems to start to be more\n\"fill in the problem\"-ish, so I'm restarting this one, and\nin future problems I'll stop when I realize my code is going in the wrong\ndirection\n\nPurpose: rotate letters in string by certain amount in alphabet. Ex:\n\nrotate a by 5, gets you f. Z by 1 gets you A'''\n\nfrom math import floor\nlength = int(input())\nstring = input()\nshiftAmount = int(input())\n\ndef shiftLetters(string, shiftAmount):\n '''rotates all the letters in a string by the shiftAmount\n Note: cycle means iteration of the alphabet.\n '''\n alphabetCycle = floor(shiftAmount/26)\n realShift = shiftAmount - alphabetCycle*26 #shift accounting for numbers above 26\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n upperAlphabet = 'ABCDEFGHIJKLMNOPQRST'\n newString = ''\n for letter in string:\n #if the letter is placed so that shifting it will keep it in the same alphabet cycle:\n if letter.isalpha(): #double if: filtering out dashes and stuff\n if letter in upperAlphabet:\n letter = letter.lower()\n upperCase = True\n else:\n upperCase = False\n letterIndex = alphabet.index(letter)\n distanceFromEnd = 25 - letterIndex\n if distanceFromEnd > realShift:\n #shift the letter to the same cycle and append it to the new word\n if upperCase == True:\n newString += alphabet[letterIndex + realShift].upper()\n else:\n newString += alphabet[letterIndex + realShift]\n elif shiftAmount % 26 == 0:\n if upperCase == True:\n newString += letter.upper()\n else:\n newString += letter\n elif distanceFromEnd == 0:\n if upperCase == True:\n newString += alphabet[realShift - 1].upper()\n else:\n newString += alphabet[realShift - 1]\n else: #Otherwise, find how much it needs to be shifted in the next cycle\n shiftAmount -= alphabetCycle*26\n # and shift it that much starting from a\n if upperCase == True:\n newString = alphaber[shiftAmount].upper()\n else:\n newString += alphabet[shiftAmount]\n else:\n if upperCase == True:\n newString += letter.upper()\n else:\n newString += letter\n return newString\n\nprint(shiftLetters(string, shiftAmount))\n","sub_path":"Python/Algorithms/Implementation/#9 Caesar Cipher Try_One_Scrapping.py","file_name":"#9 Caesar Cipher Try_One_Scrapping.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"178326603","text":"import unittest\n\nfrom kv_config_reader.filler import *\nfrom kv_config_reader.predef import *\n\nCONFIG_SCHEMA = {\n 'FUCK_STR': ConfigSchemaItem(description='fuck you', for_short='fy', default='fuck', type=str),\n 'SUCK_STR': ConfigSchemaItem(description='it sucks', for_short='is', default='suck', type=str),\n 'DAMN_STR': ConfigSchemaItem(description='damn it', for_short='di', default='damn', type=str),\n 'SHIT_STR': ConfigSchemaItem(description='oh shit', for_short='os', default=None, type=str),\n\n 'FUCK_INT': ConfigSchemaItem(description='fuck you', for_short='fy', default=1, type=int),\n 'SUCK_INT': ConfigSchemaItem(description='it sucks', for_short='is', default=2, type=int),\n 'DAMN_INT': ConfigSchemaItem(description='damn it', for_short='di', default=3, type=int),\n 'SHIT_INT': ConfigSchemaItem(description='oh shit', for_short='os', default=4, type=int),\n\n 'FUCK_BOOL': ConfigSchemaItem(description='fuck you', for_short='fy', default=False, type=bool),\n 'SUCK_BOOL': ConfigSchemaItem(description='it sucks', for_short='is', default=False, type=bool),\n 'DAMN_BOOL': ConfigSchemaItem(description='damn it', for_short='di', default=False, type=bool),\n 'SHIT_BOOL': ConfigSchemaItem(description='oh shit', for_short='os', default=False, type=bool),\n}\n\nDEFAULT_CONFIG = {\n 'FUCK_STR': 'fuck',\n 'SUCK_STR': 'suck',\n 'DAMN_STR': 'damn',\n 'SHIT_STR': None,\n\n 'FUCK_INT': 1,\n 'SUCK_INT': 2,\n 'DAMN_INT': 3,\n 'SHIT_INT': 4,\n\n 'FUCK_BOOL': False,\n 'SUCK_BOOL': False,\n 'DAMN_BOOL': False,\n 'SHIT_BOOL': False,\n}\n\nPERFECT_ENV = {\n 'FUCK_STR': 'a',\n 'SUCK_STR': 'b',\n 'DAMN_STR': 'c',\n 'SHIT_STR': 'd',\n\n 'FUCK_INT': '11',\n 'SUCK_INT': '22',\n 'DAMN_INT': '33',\n 'SHIT_INT': '44',\n\n 'FUCK_BOOL': '+',\n 'SUCK_BOOL': '-',\n 'DAMN_BOOL': '*',\n 'SHIT_BOOL': '/',\n}\n\n\nclass TestEnvFilter(unittest.TestCase):\n\n def test_normal(self):\n \"\"\"正常情况\"\"\"\n\n filler = EnvFiller(env=PERFECT_ENV)\n config = DEFAULT_CONFIG.copy()\n filler.fill(config, CONFIG_SCHEMA)\n\n self.assertEqual(config, {\n 'FUCK_STR': 'a',\n 'SUCK_STR': 'b',\n 'DAMN_STR': 'c',\n 'SHIT_STR': 'd',\n\n 'FUCK_INT': 11,\n 'SUCK_INT': 22,\n 'DAMN_INT': 33,\n 'SHIT_INT': 44,\n\n 'FUCK_BOOL': True,\n 'SUCK_BOOL': True,\n 'DAMN_BOOL': True,\n 'SHIT_BOOL': True,\n })\n\n def test_invalid_values(self):\n \"\"\"\n - 空字符串(不含空白符)不覆盖原值\n - 类型错误不覆盖原值\n - 未填写不覆盖原值\n \"\"\"\n filler = EnvFiller(env={\n 'FUCK_STR': 'a',\n 'SUCK_STR': '',\n\n 'FUCK_INT': '233',\n 'SUCK_INT': 'abc',\n 'DAMN_INT': '',\n\n 'FUCK_BOOL': '123',\n 'SUCK_BOOL': '*',\n 'DAMN_BOOL': '',\n })\n config = DEFAULT_CONFIG.copy()\n filler.fill(config, CONFIG_SCHEMA)\n\n self.assertEqual(config, {\n 'FUCK_STR': 'a',\n 'SUCK_STR': 'suck',\n 'DAMN_STR': 'damn',\n 'SHIT_STR': None,\n\n 'FUCK_INT': 233,\n 'SUCK_INT': 2,\n 'DAMN_INT': 3,\n 'SHIT_INT': 4,\n\n 'FUCK_BOOL': True,\n 'SUCK_BOOL': True,\n 'DAMN_BOOL': False,\n 'SHIT_BOOL': False,\n })\n\n def test_strange_schema(self):\n \"\"\"\n 空 schema 不改变 config\n schema 与 config 完全不一致,不改变 config\n \"\"\"\n\n filler = EnvFiller(env=PERFECT_ENV)\n config = DEFAULT_CONFIG.copy()\n\n filler.fill(config, {})\n self.assertEqual(config, DEFAULT_CONFIG)\n\n filler.fill(config, {\n 'STRANGE_SCHEMA': ConfigSchemaItem(\n description='strange schema',\n for_short='ss',\n default=None,\n type=int,\n ),\n })\n self.assertEqual(config, DEFAULT_CONFIG)\n","sub_path":"kv_config_reader/_test/test_env_filler.py","file_name":"test_env_filler.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"281495600","text":"import random\n\nfrom dcs.vehicles import AirDefence\n\nfrom gen.sam.airdefensegroupgenerator import (\n AirDefenseRange,\n AirDefenseGroupGenerator,\n)\n\n\nclass ZU23InsurgentGenerator(AirDefenseGroupGenerator):\n \"\"\"\n This generate a ZU23 insurgent flak artillery group\n \"\"\"\n\n name = \"Zu-23 Site\"\n price = 56\n\n def generate(self):\n grid_x = random.randint(2, 3)\n grid_y = random.randint(2, 3)\n\n spacing = random.randint(10,40)\n\n index = 0\n for i in range(grid_x):\n for j in range(grid_y):\n index = index+1\n self.add_unit(AirDefence.AAA_ZU_23_Insurgent_Closed, \"AAA#\" + str(index),\n self.position.x + spacing*i,\n self.position.y + spacing*j, self.heading)\n\n @classmethod\n def range(cls) -> AirDefenseRange:\n return AirDefenseRange.Short\n","sub_path":"gen/sam/aaa_zu23_insurgent.py","file_name":"aaa_zu23_insurgent.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"435838373","text":"from sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import accuracy_score\nfrom time import time\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef run_decomp_nn(k, estimator, f, x_train, x_test, y_train, y_test):\n ica_x_train = estimator.fit_transform(x_train)\n ica_x_test = estimator.transform(x_test)\n scaler = StandardScaler()\n scaler.fit(ica_x_train)\n ica_x_train = scaler.transform(ica_x_train)\n ica_x_test = scaler.transform(ica_x_test)\n start = time()\n model_1 = MLPClassifier(solver='sgd', learning_rate_init=0.001, validation_fraction=0.1, alpha=1e-6, hidden_layer_sizes=(5, 5), max_iter=5000, random_state=1)\n model_1.fit(ica_x_train, y_train)\n end = time() - start\n results = model_1.predict(ica_x_test)\n acc = accuracy_score(y_test, results)\n f.write('%3f\\t%.4f\\t%.3f\\t%.3f\\n' % (k,end, acc, 0.0))\n\n\ndef run_cluster_nn(k, estimator, f, x_train, x_test, y_train, y_test):\n print('running...')\n estimator.fit(x_train)\n predictions = estimator.predict(x_train)\n predictions = np.reshape(predictions, (-1, 1))\n test_predictions = estimator.predict(x_test)\n test_predictions = np.reshape(test_predictions, (-1, 1))\n\n enc = OneHotEncoder()\n enc.fit(predictions)\n train = enc.transform(predictions).toarray()\n test = enc.transform(test_predictions).toarray()\n\n # scaler = StandardScaler()\n # scaler.fit(predictions)\n # ica_x_train = scaler.transform(predictions)\n # ica_x_test = scaler.transform(test_predictions)\n start = time()\n model_1 = MLPClassifier(solver='sgd', learning_rate_init=0.001, validation_fraction=0.1, alpha=1e-6, hidden_layer_sizes=(5,5), max_iter=5000, random_state=1)\n model_1.fit(train, y_train)\n end = time() - start\n results = model_1.predict(test)\n acc = accuracy_score(y_test, results)\n f.write('%3f\\t%.4f\\t%.3f\\t%.3f\\n' % (k,end, acc, 0.0))\n","sub_path":"supervised/nn_util.py","file_name":"nn_util.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"339025582","text":"from AST.sentence import InsertAll, Insert\nfrom storageManager.TypeChecker_Manager import *\nfrom storageManager.jsonMode import *\nfrom .executeExpression import executeExpression\nfrom .AST.error import *\n\nimport sys\nsys.path.append(\"../\")\nfrom console import print_error, print_success, print_warning\n\n\n#def insert(database: str, table: str, register: list) -> int:\n#0 -> Successful operation\n#1 -> Operation error \n#2 -> Database does not exist\n#3 -> Table does not exist\n#4 -> Duplicate primary key\n#5 -> Columns out of bounds\n#anything else -> Operation error\n\ndef executeInsertAll(self, InsertAll_):\n\n # InsertAll : {\n # table: \"table_name\",\n # values: [ { type: ('Entero' | 'Decimal' | 'Cadena' | 'Variable' | 'Regex' | 'All'), value: \"\" } ]\n # #values: [ { type: (1 | 2 | 3 | 4 | 5 | 6 ), value: \"\" } ]\n # }\n \n insertAll: InsertAll = InsertAll_\n table_name = insertAll.table\n values = insertAll.values\n \n check_and_solve_values_ = check_and_solve_values(self, values)\n if check_and_solve_values_ == None:\n \n TypeChecker_Manager_ = get_TypeChecker_Manager()\n if TypeChecker_Manager_ != None:\n \n use_: str = get_use(TypeChecker_Manager_)\n if use_ != None:\n \n database_ = get_database(use_, TypeChecker_Manager_)\n if database_ != None:\n \n table_ = get_table(table_name, database_)\n if table_ != None:\n \n if len(table_.columns) == len(values):\n \n check_type_ = check_type(table_.columns, values)\n if check_type_ == None:\n \n check_null_ = check_null(table_.columns, values)\n if check_null_ == None:\n\n check_maxlength_ = check_maxlength(table_.columns, values)\n if check_maxlength_ == None:\n\n check_checks_ = check_checks(table_.columns, values)\n if check_checks_ == None:\n \n try:\n #success\n values_list = []\n i = 0\n while i < len(values):\n if(values[i] == None):\n values_list.append(None)\n else:\n values_list.append(values[i].value)\n i += 1\n replace_default(values_list, table_.columns)\n result_insert = insert(database_.name, table_.name, values_list)\n if result_insert == 0:\n print_success(\"QUERY\", \"Insert in \" + str(table_.name) + \" table, done successfully\",2)\n elif result_insert == 1:\n print_error(\"UNKNOWN ERROR\", \"Operation error\",2)\n elif result_insert == 2:\n print_error(\"SEMANTIC ERROR\", \"Database does not exist\",2)\n elif result_insert == 3:\n print_error(\"SEMANTIC ERROR\", \"Table does not exist\",2)\n elif result_insert == 4:\n print_error(\"SEMANTIC ERROR\", \"Duplicate primary key\",2)\n elif result_insert == 5:\n print_error(\"SEMANTIC ERROR\", \"Columns out of bounds\",2)\n else:\n print_error(\"UNKNOWN ERROR\", \"Operation error\",2)\n except Exception as e:\n print_error(\"UNKNOWN ERROR\", \"instruction not executed\",2)\n #print(e)\n\n else:\n print_error(\"SEMANTIC ERROR\", check_checks_,2)\n\n else:\n print_error(\"SEMANTIC ERROR\", check_maxlength_,2)\n\n else:\n print_error(\"SEMANTIC ERROR\", check_null_,2)\n\n else:\n print_error(\"SEMANTIC ERROR\", check_type_,2)\n \n else:\n print_error(\"SEMANTIC ERROR\", \"Wrong arguments submitted for table. \" + str(len(table_.columns)) + \" required and \" + str(len(values)) + \" received\",2)\n\n else:\n print_error(\"SEMANTIC ERROR\", \"Table does not exist\",2)\n\n else:\n print_error(\"SEMANTIC ERROR\", \"Database to use does not exist\",2)\n\n else:\n print_warning(\"RUNTIME ERROR\", \"Undefined database to use\",2)\n \n else:\n print_error(\"UNKNOWN ERROR\", \"instruction not executed\",2)\n\n else:\n print_error(\"SEMANTIC ERROR\", check_and_solve_values_,2)\n\n\ndef executeInsert(self, Insert_):\n\n # Insert : {\n # table: \"table_name\",\n # columns: [ \"column_name\", \"column_name\" ],\n # values: [ { type: ('Entero' | 'Decimal' | 'Cadena' | 'Variable' | 'Regex' | 'All'), value: \"\" } ]\n # #values: [ { type: (1 | 2 | 3 | 4 | 5 | 6 ), value: \"\" } ]\n # }\n \n insert: Insert = Insert_\n table_name = insert.table\n columns = insert.columns\n values = insert.values\n\n if len(columns) == len(values):\n\n TypeChecker_Manager_ = get_TypeChecker_Manager()\n if TypeChecker_Manager_ != None:\n \n use_: str = get_use(TypeChecker_Manager_)\n if use_ != None:\n \n database_ = get_database(use_, TypeChecker_Manager_)\n if database_ != None:\n \n table_ = get_table(table_name, database_)\n if table_ != None:\n \n if len(table_.columns) >= len(values):\n \n table_columns_names = []\n i = 0\n while i < len(table_.columns):\n table_columns_names.append(table_.columns[i].name)\n i += 1\n\n i = 0\n columns_exist = True\n columns_exist_error = 0\n while i < len(columns) and columns_exist == True:\n if not(columns[i] in table_columns_names) == True:\n columns_exist = False\n columns_exist_error = i\n i += 1\n\n if columns_exist == True: \n new_list_of_values = []\n i = 0\n while i < len(table_columns_names):\n if (table_columns_names[i] in columns) == True:\n j = 0\n while j < len(columns):\n if table_columns_names[i] == columns[j]:\n new_list_of_values.append(values[j])\n j = len(columns) \n j += 1\n else:\n new_list_of_values.append(None) \n i += 1\n new_InsertAll = InsertAll(table_name, new_list_of_values)\n executeInsertAll(self, new_InsertAll)\n\n else:\n print_error(\"SEMANTIC ERROR\", str(columns[columns_exist_error]) + \" column in which you want to insert does not exist\",2) \n \n else:\n print_error(\"SEMANTIC ERROR\", \"Number of arguments sent is greater than the number of columns in the table\",2)\n\n else:\n print_error(\"SEMANTIC ERROR\", \"Table does not exist\",2)\n\n else:\n print_error(\"SEMANTIC ERROR\", \"Database to use does not exist\",2)\n\n else:\n print_warning(\"RUNTIME ERROR\", \"Undefined database to use\",2)\n \n else:\n print_error(\"UNKNOWN ERROR\", \"instruction not executed\",2)\n \n else:\n print_error(\"SEMANTIC ERROR\", \"number of columns and values ​​are not the same size\",2)\n\n\ndef check_and_solve_values(self, values_):\n\n return_ = None\n\n i = 0\n while i < len(values_):\n \n if values_[i] != None:\n \n result_executeExpression = executeExpression(self, values_[i])\n\n if( isinstance(result_executeExpression, Error) ):\n return_ = result_executeExpression.detail\n \n else:\n values_[i].type = result_executeExpression.type\n values_[i].value = result_executeExpression.value\n \n i += 1\n\n return return_\n\n\ntype_int = [\"SMALLINT\", \"INTEGER\", \"BIGINT\", \"REAL\"]\ntype_float = [\"DECIMAL\", \"NUMERIC\", \"DOUBLE PRECISION\", \"PRECISION\", \"MONEY\"]\ntype_char = [\"CHARACTER\", \"CHAR\", \"TEXT\"]\ntype_string = [\"TEXT\"]\ntype_bool = [\"BOOLEAN\"]\n# | TIMESTAMP\n# | DATE\n# | TIME \n# | INTERVAL\n# | TIME WITHOUT TIME ZONE\n# | TIME WITH TIME ZONE\n# | INTERVAL INT\n# | TIMESTAMP WITH TIME ZONE\n# | ID\n\n\ndef check_type(columns_, values_) -> str:\n\n return_ = None\n\n i = 0\n while i < len(columns_):\n \n if values_[i] != None:\n\n column_type = ((str(columns_[i].type_)).upper()) \n if ( column_type in type_int ) == True:\n if values_[i].type != 1 and values_[i].type!=4:\n return_ = \"Argument \" + str((i+1)) + \" of wrong type. It should be a \" + str(column_type) + \" type.\"\n i = len(columns_)\n \n elif ( column_type in type_float ) == True:\n if values_[i].type != 2 and values_[i].type!=4:\n return_ = \"Argument \" + str((i+1)) + \" of wrong type. It should be a \" + str(column_type) + \" type.\"\n i = len(columns_)\n \n elif ( column_type in type_char ) == True:\n if values_[i].type != 3 and values_[i].type!=4:\n return_ = \"Argument \" + str((i+1)) + \" of wrong type. It should be a \" + str(column_type) + \" type.\"\n i = len(columns_)\n \n elif ( column_type in type_string ) == True:\n if values_[i].type != 3 and values_[i].type!=4:\n return_ = \"Argument \" + str((i+1)) + \" of wrong type. It should be a \" + str(column_type) + \" type.\"\n i = len(columns_)\n\n elif ( column_type in type_bool ) == True:\n if values_[i].type != 1 and values_[i].type!=4:\n return_ = \"Argument \" + str((i+1)) + \" of wrong type. It should be a \" + str(column_type) + \" type.\"\n i = len(columns_)\n else:\n if (not(str(values_[i].value)==\"0\" or str(values_[i].value==\"1\"))) == True:\n return_ = \"Argument \" + str((i+1)) + \" of wrong type. It should be a \" + str(column_type) + \" type.\"\n i = len(columns_)\n \n i += 1\n\n return return_\n\n\ndef check_null(columns_, values_) -> str:\n\n return_ = None\n\n i = 0\n while i < len(columns_):\n\n if columns_[i].null_ != None:\n if columns_[i].null_ == False:\n if values_[i] == None:\n return_ = \"Argument \" + str((i+1)) + \" is null and the column does not allow null values.\"\n i = len(columns_)\n\n i += 1\n\n return return_\n\n\ndef check_maxlength(columns_, values_) -> str:\n\n return_ = None\n\n i = 0\n while i < len(columns_):\n\n if values_[i] != None:\n\n if columns_[i].maxlength_ != None:\n if ( columns_[i].maxlength_ < len(str(values_[i].value)) ) == True:\n return_ = \"Argument \" + str((i+1)) + \" exceeds the maximum length allowed by the column.\"\n i = len(columns_)\n\n i += 1\n\n return return_\n\n\ndef check_checks(columns_, values_) -> str:\n\n return_ = None\n error_encontrado = False\n\n i = 0\n while i < len(columns_) and error_encontrado == False:\n\n if values_[i] != None:\n\n value = str(values_[i].value)\n j = 0\n while j < len(columns_[i].checks) and error_encontrado == False:\n\n check_operation = columns_[i].checks[j].operation\n check_value = columns_[i].checks[j].value\n\n if str(check_operation) == \"<\":\n if not(str(value) < str(check_value)):\n return_ = \"Argument \" + str((i+1)) + \" must be \" + str(check_operation) + \" to \"\n is_int_or_float_ = is_int_or_float(check_value)\n if is_int_or_float_== True:\n return_ += str(check_value) + \".\"\n else:\n return_ += \"\\\"\" + str(check_value) + \"\\\".\"\n error_encontrado = True\n elif str(check_operation) == \">\":\n if not(str(value) > str(check_value)):\n return_ = \"Argument \" + str((i+1)) + \" must be \" + str(check_operation) + \" to \"\n is_int_or_float_ = is_int_or_float(check_value)\n if is_int_or_float_== True:\n return_ += str(check_value) + \".\"\n else:\n return_ += \"\\\"\" + str(check_value) + \"\\\".\"\n error_encontrado = True\n elif str(check_operation) == \"<=\":\n if not(str(value) <= str(check_value)):\n return_ = \"Argument \" + str((i+1)) + \" must be \" + str(check_operation) + \" to \"\n is_int_or_float_ = is_int_or_float(check_value)\n if is_int_or_float_== True:\n return_ += str(check_value) + \".\"\n else:\n return_ += \"\\\"\" + str(check_value) + \"\\\".\"\n error_encontrado = True\n elif str(check_operation) == \">=\":\n if not(str(value) >= str(check_value)):\n return_ = \"Argument \" + str((i+1)) + \" must be \" + str(check_operation) + \" to \"\n is_int_or_float_ = is_int_or_float(check_value)\n if is_int_or_float_== True:\n return_ += str(check_value) + \".\"\n else:\n return_ += \"\\\"\" + str(check_value) + \"\\\".\"\n error_encontrado = True\n elif str(check_operation) == \"==\":\n if not(str(value) == str(check_value)):\n return_ = \"Argument \" + str((i+1)) + \" must be \" + str(check_operation) + \" to \"\n is_int_or_float_ = is_int_or_float(check_value)\n if is_int_or_float_== True:\n return_ += str(check_value) + \".\"\n else:\n return_ += \"\\\"\" + str(check_value) + \"\\\".\"\n error_encontrado = True\n elif str(check_operation) == \"!=\":\n if not(str(value) != str(check_value)):\n return_ = \"Argument \" + str((i+1)) + \" must be \" + str(check_operation) + \" to \"\n is_int_or_float_ = is_int_or_float(check_value)\n if is_int_or_float_== True:\n return_ += str(check_value) + \".\"\n else:\n return_ += \"\\\"\" + str(check_value) + \"\\\".\"\n error_encontrado = True\n \n j += 1\n\n i += 1\n\n return return_\n\n\ndef replace_default(values_, columns_):\n\n return_ = None\n\n if len(values_) == len(columns_):\n i = 0\n while i < len(values_):\n\n if values_[i] == None:\n values_[i] = columns_[i].default_\n\n i += 1","sub_path":"parser/fase2/team20/execution/executeInsert.py","file_name":"executeInsert.py","file_ext":"py","file_size_in_byte":17608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"633152236","text":"#Импорт библиотеки time для контроля скорости выполнения алгоритма\nimport time\n\n\ndef selectSort(array, demoArray, sleeptime):\n \"\"\"Функция сортировки выбором\"\"\"\n for i in range(len(array)):\n demoArray(array, painting(len(array), i, 0))\n time.sleep(sleeptime)\n min_i = i\n for j in range(i + 1, len(array)):\n demoArray(array, painting(len(array), i, j, min_i))\n time.sleep(sleeptime)\n if array[min_i] > array[j]:\n min_i = j\n demoArray(array, painting(len(array), i, j, min_i))\n time.sleep(sleeptime)\n array[i], array[min_i] = array[min_i], array[i]\n demoArray(array, colorswhileswapping(len(array), min_i, i))\n time.sleep(sleeptime)\n\n\ndef painting(length, curr, j, min_i=0):\n \"\"\"Функция цветовой разметки элементов массива на холсте\"\"\"\n colors = [\"#fdb827\" for i in range(length)]\n colors[curr] = \"#FB5560\"\n if(j != 0):\n colors[j] = \"#009fdc\"\n if curr != min_i and min_i != 0:\n colors[min_i] = \"#77E596\"\n\n return colors\n\n\ndef colorswhileswapping(length, a, b):\n \"\"\"Смена цветов элементов массива\"\"\"\n colors = [\"#fdb827\" for i in range(length)]\n colors[a] = \"#FB5560\"\n colors[b] = \"#77E596\"\n\n return colors","sub_path":"selectSort.py","file_name":"selectSort.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"62268920","text":"\n\n#calss header\nclass _MUTATION():\n\tdef __init__(self,): \n\t\tself.name = \"MUTATION\"\n\t\tself.definitions = [u'the way in which genes change and produce permanent differences: ', u'a permanent change in an organism, or the changed organism itself: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_mutation.py","file_name":"_mutation.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"474474715","text":"import numpy as np\r\n\r\n# from algorithms import *\r\nfrom algorithms.StupidGeneticAlgorithm.StupidGeneticAlgorithmAgent import *\r\n\r\nclass StupidGeneticAlgorithmPlayer:\r\n\t# args:\r\n\t# mean_reward\r\n\t# races_taken\r\n\tdef __init__(self, sess, state_size, action_size):\r\n\t\tself.sess = sess\r\n\t\tself.STATE_SIZE = state_size\r\n\t\tself.ACTION_SIZE = action_size\r\n\t\tself.HIDDEN_SIZE = 10\r\n\r\n\t\tself.mean_reward = 0\r\n\t\tself.races_taken = 0\r\n\t\tself.running_reward = 0\r\n\r\n\t\tself.vars = []\r\n\t\tself.agent = StupidGeneticAlgorithmAgent(self.HIDDEN_SIZE, self.STATE_SIZE, self.ACTION_SIZE)\r\n\r\n\tdef reset(self, state, done):\r\n\t\treturn self.step(state, 0, done)\r\n\r\n\tdef step(self, state, reward, done):\r\n\t\tself.running_reward += reward\r\n\t\tif done:\r\n\t\t\tself.mean_reward = (self.mean_reward * self.races_taken + self.running_reward) / (self.races_taken + 1)\r\n\t\t\tself.races_taken += 1\r\n\t\t\tself.running_reward = 0\r\n\t\treturn self.sess.run(self.agent.predict, feed_dict = {self.agent.input: [state]})\r\n\r\n\tdef append_var(self, var):\r\n\t\tself.vars += [var]\r\n\r\n\tdef set_vars(self, vars):\r\n\t\t# Whenever we set vars, mean_reward = 0 & races_taken = 0 & running_reward = 0, because that's a new agent\r\n\t\tself.mean_reward = 0\r\n\t\tself.races_taken = 0\r\n\t\tself.running_reward = 0\r\n\t\tself.agent.update(self.sess, vars)\r\n\r\n\tdef get_vars(self):\r\n\t\tresult = []\r\n\t\tfor var in self.vars:\r\n\t\t\tresult += [self.sess.run(var)]\r\n\t\treturn result\r\n\r\n\tdef new_var(self, arr0, arr1, mutation_rate):\r\n\t\tresult = []\r\n\r\n\t\tshape = arr0.shape\r\n\t\tarr0 = np.reshape(arr0, [-1])\r\n\t\tarr1 = np.reshape(arr1, [-1])\r\n\t\tfor a0, a1 in zip(arr0, arr1):\r\n\t\t\tif np.random.random() < mutation_rate:\r\n\t\t\t\tresult += [np.random.random()]\r\n\t\t\telse:\r\n\t\t\t\tbeta = (1 if np.random.random() < 0.5 else -1) * np.random.random() * mutation_rate * (a1 - a0)\r\n\t\t\t\tresult += [a0 + beta]\r\n\r\n\t\treturn np.reshape(np.array(result), shape)\r\n\r\n\tdef cross_vars(self, vars, mutation_rate):\r\n\t\tmy_vars = self.get_vars()\r\n\t\tresult = []\r\n\t\tfor i in range(len(my_vars)):\r\n\t\t\tfirst, second = my_vars[i], vars[i]\r\n\t\t\t# if np.random.random() < mutation_rate:\r\n\t\t\t# \tfirst, second = second, first\r\n\t\t\t# beta = (1 if np.random.random() < 0.5 else -1) * (second - first) * np.random.random() * mutation_rate\r\n\t\t\t# if np.random.random() < mutation_rate:\r\n\t\t\t# \tresult += [np.random.random(self.sess.run(tf.shape(vars[i])))]\r\n\t\t\t# else:\r\n\t\t\t# \tresult += [first + beta]\r\n\t\t\tresult += [self.new_var(first, second, mutation_rate)]\r\n\t\treturn result\r\n","sub_path":"Reinforcement Learning on Unreal Engine/algorithms/StupidGeneticAlgorithm/StupidGeneticAlgorithmPlayer.py","file_name":"StupidGeneticAlgorithmPlayer.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9733356","text":"import six.moves.builtins as builtins\nimport six.moves.tkinter_messagebox as messagebox\nfrom six.moves.tkinter_tkfiledialog import askdirectory\nfrom six.moves.tkinter import *\nimport os\nfrom constant import Constant\nfrom ui import DetailWindow\nfrom util import XMLUtil\nimport shutil\n\nimport platform\nclass TestResult(Toplevel):\n def __init__(self,parent):\n Toplevel.__init__(self)\n self.initialise()\n\n def initialise(self):\n self.result_path=builtins.global_response_files_path\n self.choose_result_folder_lbl=Label(self,anchor=\"w\",fg=\"white\",bg=\"blue\",text=\"Choose Test Response Folder default \"+builtins.global_response_files_path)\n self.choose_result_folder_lbl.grid(column=0,row=1,sticky=\"EW\")\n\n\n\n self.choose_result_folder_btn=Button(self,text=\"Choose\",command=self.ask_result_directory)\n self.choose_result_folder_btn.grid(column=1,row=1,columnspan=2)\n\n self.listbox=Listbox(self,width=\"100\")\n self.listbox.grid(column=0,row=2,columnspan=2,sticky=\"EW\")\n\n\n #create a frame to host three button then put the frame in one cell. it wont look so ugly in the window\n\n btn_frme=Frame(self)\n #view invalid response button\n view=Button(btn_frme,text=\"View File\",command=self.btn_view_response_xml)\n view.pack(side=\"left\", fill=None, expand=False)\n\n\n\n #Failed Response\n fail_resp=Button(btn_frme,text=\"Failed Response Only\",command=self.display_failed_response)\n fail_resp.pack(side=\"left\", fill=None, expand=False)\n\n\n #detail button open another window\n detail=Button(btn_frme,text=\"Detail\",command=self.create_detail_window)\n detail.pack(side=\"left\", fill=None, expand=False)\n\n btn_frme.grid(column=3,row=2,sticky=\"w\")\n\n self.expect_result_path=builtins.global_expected_result_files_path\n\n #expect result folder lable\n self.choose_expect_result_folder_lbl=Label(self,anchor=\"w\",fg=\"white\",bg=\"blue\",text=\"Choose Expected Test Result Folder default \"+Constant.FOLDER_PATH.EXPECTED_RESULT_FOLDER_PATH)\n self.choose_expect_result_folder_lbl.grid(column=0,row=3,sticky=\"EW\")\n\n self.choose_expect_result_folder_btn=Button(self,text=\"Choose\",command=self.ask_expect_result_directory)\n self.choose_expect_result_folder_btn.grid(column=1,row=3,columnspan=2)\n\n #save diff label\n\n self.diff_folder=builtins.global_diff_folder_path\n self.choose_diff_folder_lbl=Label(self,anchor=\"w\",fg=\"white\",bg=\"blue\",text=\"Choose where you want to save the diff default \"+Constant.FOLDER_PATH.DIFF_FOLDER_PATH)\n self.choose_diff_folder_lbl.grid(column=0,row=4,sticky=\"EW\")\n\n self.choose_diff_folder_btn=Button(self,text=\"Choose\",command=self.ask_diff_directory)\n self.choose_diff_folder_btn.grid(column=1,row=4,columnspan=2)\n\n\n #view diff button\n # self.view_diff_btn=Button(self,text=\"View Diff\",command=self.view_diff)\n # self.view_diff_btn.grid(column=2,row=4)\n\n #Compare Failed Response with Expected response\n compare_btn=Button(self,text=\"Compare Response With Expected and Save\",command=self.compare_response_with_ex)\n compare_btn.grid(column=3,row=4)\n\n #number of response\n self.number_lable=Label(self)\n self.number_lable.grid(column=0,row=5)\n number_of_response=self.get_file_and_populate_list(self.result_path)\n self.number_lable['text']=\"Total Number of Response is \"+str(number_of_response)\n self.grid_columnconfigure(0,weight=1)\n\n def display_failed_response(self):\n invalid_responses=self.get_invalid_response_files(self.result_path)\n number_of_failed_response=self.clean_listbox_reload(invalid_responses)\n self.number_lable['text']=\"Total Number of Failed Response is \"+str(number_of_failed_response)\n\n def get_invalid_response_files(self,folder_path):\n invalid_reps=[]\n files=self.get_files_fullpath_under_a_folder(folder_path)\n for file in files:\n if not self.check_response(file):\n invalid_reps.append(file)\n return invalid_reps\n\n def check_response(self,file_path):\n with open(file_path,'r')as f:\n data=f.read().replace('\\n','')\n data=''.join(data.split())\n try:\n start=data.index('')\n end=data.index('')\n status=data[start:end][23:]\n if(status!='1'):\n print(' Resp doesnt have a return code'+file_path)\n return False\n else:\n print(' Resp have a return code'+file_path)\n return True\n except:\n print(' Resp doesnt have a return code'+file_path)\n return False\n\n #accept a list of full path and populate into list\n def clean_listbox_reload(self,full_path_list):\n self.listbox.delete(0,END)\n number_of_failed_response=0\n for f in full_path_list:\n number_of_failed_response=number_of_failed_response+1\n self.listbox.insert(END,f)\n return number_of_failed_response\n\n\n def btn_view_response_xml(self):\n if(len(self.listbox.curselection())==0):\n messagebox.showerror(\"Error\",\"Select at least one item\")\n return\n index=self.listbox.curselection()[0]\n selected_text=self.listbox.get(index)\n os_platform=platform.system()\n if(os_platform==\"Windows\"):\n os.startfile(selected_text)\n elif(os_platform==\"Darwin\"):#mac\n os.system('open %s' % selected_text)\n else:\n pass\n\n\n\n def create_detail_window(self):\n if(len(self.listbox.curselection())==0):\n messagebox.showerror(\"Error\",\"Select at least one item\")\n return\n index=self.listbox.curselection()[0]\n selected_text=self.listbox.get(index)\n # self.new_window=Toplevel(self.parent)\n self.app= DetailWindow.DetailWindow(self, selected_text)\n\n def ask_result_directory(self):\n self.dir_result_opt = options = {}\n options['initialdir'] = Constant.FOLDER_PATH.INIT_ASK_RESPONSE_FOLDER\n options['mustexist'] = False\n options['parent'] = self\n options['title'] = 'Test Result'\n self.result_path=askdirectory(**self.dir_result_opt)\n self.choose_result_folder_lbl['text']=self.result_path\n number_of_response=self.get_file_and_populate_list(self.result_path)\n self.number_lable['text']=\"Total Number of Response is \"+str(number_of_response)\n\n def ask_expect_result_directory(self):\n self.dir_expect_result_opt = options = {}\n options['initialdir'] = Constant.FOLDER_PATH.INIT_ASK_EXPECT_RESPONSE\n options['mustexist'] = False\n options['parent'] = self\n options['title'] = 'Test Result'\n self.expect_result_path=askdirectory(**self.dir_expect_result_opt)\n self.choose_expect_result_folder_lbl['text']=self.expect_result_path\n builtins.global_expected_result_files_path=self.expect_result_path\n\n def ask_diff_directory(self):\n self.dir_diff_opt = options = {}\n options['initialdir'] = Constant.FOLDER_PATH.INIT_ASK_EXPECT_RESPONSE\n options['mustexist'] = False\n options['parent'] = self\n options['title'] = 'Test Result'\n self.diff_folder=askdirectory(**self.dir_diff_opt)\n self.choose_diff_folder_lbl['text']=self.diff_folder\n builtins.global_diff_folder_path=self.diff_folder\n\n\n\n #accept a folder path and get all files under that path and then populate list\n def get_file_and_populate_list(self,path):\n number_of_response=0\n self.listbox.delete(0,END)\n for root, subdirs, files in os.walk(path):\n for name in files:\n full_path=os.path.realpath(os.path.join(root,name))\n if full_path.find('.xml')!=-1:\n self.listbox.insert(END,full_path)\n number_of_response=number_of_response+1\n return number_of_response\n\n def get_files_fullpath_under_a_folder(self,folder_path):\n file=[]\n for root, subdirs, files in os.walk(folder_path):\n for name in files:\n full_path=os.path.realpath(os.path.join(root,name))\n file.append(full_path)\n return file\n\n def compare_response_with_ex(self):\n\n if(len(self.listbox.curselection())==0):\n messagebox.showerror(\"Error\",\"Select at least one item\")\n return\n index=self.listbox.curselection()[0]\n selected_text=self.listbox.get(index)#this is full path. need to get the file name only\n # #mac\n # filename=selected_text.split('/')[-1]\n\n #windows\n filename=selected_text.split('\\\\')[-1]\n # print(filename)\n expected_result=self.get_expected_by_name(selected_text)\n\n # #macos\n # expected_result_filename=expected_result.split('/')[-1]\n\n #windows\n expected_result_filename=expected_result.split('\\\\')[-1]\n\n if expected_result==None:\n messagebox.showerror(\"Error\",\"Expected response template not found\")\n return\n difflist=XMLUtil.compare_two_xml(expected_result, selected_text)\n diff_filepath=os.path.join(self.diff_folder,\"expected \"+expected_result_filename+\" vs actuall \"+filename+\".diff\")\n\n #write diff\n file=open(diff_filepath,'w')\n for diff in difflist:\n file.write(diff+\"\\n\")\n expected_result_fullpath_in_diff=os.path.join(self.diff_folder,expected_result_filename+\"expected.xml\")\n actual_response_fullpath_in_diff=os.path.join(self.diff_folder,expected_result_filename+\"actualresp.xml\")\n shutil.copyfile(expected_result,expected_result_fullpath_in_diff)\n shutil.copyfile(selected_text,actual_response_fullpath_in_diff)\n #windows\n # os.startfile(selected_text)\n #mac\n\n # os.system('open %s' % expected_result_fullpath_in_diff)\n # os.system('open %s' % actual_response_fullpath_in_diff)\n # os.system('open %s' % diff_filepath)\n #pop up a window asking if want to view diff in the app\n result=messagebox.askquestion(\"Message\",\"Do you want to view diff in the app\")\n if result=='yes':\n self.app= DetailWindow.ViewDiff(self, diff_filepath)\n else:\n return\n\n\n def get_expected_by_name(self,filepath):\n\n #r\"C:\\Users\\fengmingy\\Desktop\\MyTest\\Test Result\\DnB\\Consumer\\1.xml\" where response is stored\n #r'C:\\Users\\fengmingy\\Desktop\\MyTest\\Expected_Result\\DnB\\Consumer\\1.xml'\n\n\n # print(filepath.split('\\\\')[-1].split('_')[0]) DCom-01.01.01\n expected_response_filename=filepath.split('\\\\')[-1].split('_')[0]+\"_exp.xml\"\n # DCom-01.01.01_exp.xml\n\n response_file_name=filepath.split('\\\\')[-1]\n\n temp_path=filepath.replace(response_file_name,expected_response_filename)#replace filename\n expected_response=temp_path.replace(self.result_path,self.expect_result_path)#replace base_path\n if os.path.isfile(expected_response):\n return expected_response\n else:\n return None\n","sub_path":"test_automation_webservices/ui/TestResult.py","file_name":"TestResult.py","file_ext":"py","file_size_in_byte":11271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"64843783","text":"#!/usr/bin/env python\n\n\nimport rospy\nfrom ian_bot.msg import PWM\nfrom ian_bot.srv import PWMgetResponse, PWMget, PWMfreq, PWMfreqResponse\nimport pigpio\n\nclass RPiPWM():\n\n\tdef __init__(self):\n\t\tself.sub = rospy.Subscriber(\"/pi/servo\", PWM, self.set_PWM_callback)\n\t\tself.pulseSrv = rospy.Service('/pi/servo_pos', PWMget, self.handle_get_PWM)\n\t\tself.freqSrv = rospy.Service('/pi/pwm_freq', PWMfreq, self.handle_freq_change)\n\t\tself.gpio = pigpio.pi()\n\t\tif not self.gpio.connected:\n\t\t\trospy.logerr(\"pigpio not connected\") ##########start it\n\t\t\texit()\n\t\trospy.loginfo(\"initialized\")\n\t\t\n\tdef handle_get_PWM(self, req):\n\t\treturn PWMgetResponse(self.gpio.get_servo_pulsewidth(req.pin))\n\t\n\tdef handle_freq_change(self, req):\n\t\tself.gpio.set_PWM_frequency(req.pin, req.freq)\n\t\treturn PWMfreqResponce(self.gpio.get_PWM_frequency(req.pin))\n\t\n\tdef set_PWM_callback(self, msg):\n\t\t#data.pin, data.width\n\t\tif not 0<=msg.pin<=31 or not (msg.width == 0 or 500<=msg.width<=2500):\n\t\t\trospy.logerr_throttle(1, \"Malformed PWM message: %d | %d\" % (msg.pin, msg.width))\n\t\t\treturn\n\t\trospy.loginfo(\"Pin: %d, Timing: %d\", msg.pin, msg.width)\n\t\tself.gpio.set_servo_pulsewidth(msg.pin, msg.width)\n\nif __name__ == \"__main__\":\n\ttry:\n\t\trospy.init_node('rpi_servo', anonymous=True)\n\t\tRPiPWM()\n\t\trospy.spin()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n","sub_path":"src/ian_bot/src/ian_bot/rpi_servo2.py","file_name":"rpi_servo2.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"259375644","text":"#-*- coding: UTF-8 -*- \n\nimport urllib\nimport json\n\nclass music:\n def __init__(self):\n self.title = \"\"\n self.url = \"\"\n self.description = \"\"\n \n \nclass music_api:\n def getsong(self,songName):\n result = music()\n \n songUrl = 'http://nnlife.duapp.com/xiami.php?key=%s'\\\n % (urllib.quote(songName.encode('utf-8')))#对字符串进行编码\n \n f = urllib.urlopen(songUrl)\n c = f.read()\n \n data = json.loads(c);\n try:\n if data['status'] == 'ok':\n result.title = data['song']['song_name']\n result.url = data['song']['song_location']\n result.description = data['song']['artist_name']\n return result\n else:\n return None\n except Exception:\n return None\n","sub_path":"handlers/music/xmmusic.py","file_name":"xmmusic.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"191074770","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 22 18:25:35 2018\n\n@author: slytherin\n\"\"\"\nfrom nltk.tokenize import word_tokenize\nimport random\nfrom nltk.classify.scikitlearn import SklearnClassifier\nimport pickle\n\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\n\nfrom nltk.classify import ClassifierI\nfrom statistics import mode\n\nclass VoteClassifier(ClassifierI):\n def __init__(self, *classifiers):\n self._classifiers = classifiers\n\n def classify(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n return mode(votes)\n\n def confidence(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n\n choice_votes = votes.count(mode(votes))\n conf = choice_votes / len(votes)\n return conf\n\n\nshort_pos=open(\"positive.txt\",encoding = \"ISO-8859-1\").read()\nshort_neg=open(\"negative.txt\",encoding = \"ISO-8859-1\").read()\n\ndocuments=[]\nall_words=[]\n\nallowed_word_types=[\"J\",\"R\",\"V\"]\n\nfor r in short_pos.split('\\n'):\n documents.append((r,\"pos\"))\n words=word_tokenize(r)\n pos=nltk.pos_tag(words)\n for w in pos:\n if w[1][0] in allowed_word_types:\n all_words.append(w[0].lower())\n\nfor r in short_neg.split('\\n'):\n documents.append((r,\"neg\"))\n words=word_tokenize(r)\n pos=nltk.pos_tag(words)\n for w in pos:\n if w[1][0] in allowed_word_types:\n all_words.append(w[0].lower())\n\nsave_documents = open(\"pickled_algos/documents.pickle\",\"wb\")\npickle.dump(documents, save_documents)\nsave_documents.close()\n\n \nall_words=nltk.FreqDist(all_words)\nword_features=list(all_words.keys())[:5000]\n\nsave_word_features = open(\"pickled_algos/word_features5k.pickle\",\"wb\")\npickle.dump(word_features, save_word_features)\nsave_word_features.close()\n\n\ndef find_features(document):\n words=word_tokenize(document)\n features={}\n for w in word_features:\n features[w]=(w in words)\n return features\n\nfeaturesets=[(find_features(rev),category) for(rev,category) in documents]\nrandom.shuffle(featuresets)\ntraining_set = featuresets[:10000]\ntesting_set = featuresets[10000:]\n\nclassifier = nltk.NaiveBayesClassifier.train(training_set)\nprint(\"Original Naive Bayes Algo accuracy percent:\", (nltk.classify.accuracy(classifier, testing_set))*100)\nclassifier.show_most_informative_features(15)\n\nsave_classifier = open(\"pickled_algos/originalnaivebayes5k.pickle\",\"wb\")\npickle.dump(classifier, save_classifier)\nsave_classifier.close()\n\n\nMNB_classifier = SklearnClassifier(MultinomialNB())\nMNB_classifier.train(training_set)\nprint(\"MNB_classifier accuracy percent:\", (nltk.classify.accuracy(MNB_classifier, testing_set))*100)\n\nsave_classifier = open(\"pickled_algos/MNB_classifier5k.pickle\",\"wb\")\npickle.dump(MNB_classifier, save_classifier)\nsave_classifier.close()\n\n\nBernoulliNB_classifier = SklearnClassifier(BernoulliNB())\nBernoulliNB_classifier.train(training_set)\nprint(\"BernoulliNB_classifier accuracy percent:\", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100)\n\nsave_classifier = open(\"pickled_algos/BernoulliNB_classifier5k.pickle\",\"wb\")\npickle.dump(BernoulliNB_classifier, save_classifier)\nsave_classifier.close()\n\n\nLogisticRegression_classifier = SklearnClassifier(LogisticRegression())\nLogisticRegression_classifier.train(training_set)\nprint(\"LogisticRegression_classifier accuracy percent:\", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100)\n\nsave_classifier = open(\"pickled_algos/LogisticRegression_classifier5k.pickle\",\"wb\")\npickle.dump(LogisticRegression_classifier, save_classifier)\nsave_classifier.close()\n \n\nSGDClassifier_classifier = SklearnClassifier(SGDClassifier())\nSGDClassifier_classifier.train(training_set)\nprint(\"SGDClassifier_classifier accuracy percent:\", (nltk.classify.accuracy(SGDClassifier_classifier, testing_set))*100)\n\nsave_classifier = open(\"pickled_algos/SGDC_classifier5k.pickle\",\"wb\")\npickle.dump(SGDClassifier, save_classifier)\nsave_classifier.close()\n\nLinearSVC_classifier = SklearnClassifier(LinearSVC())\nLinearSVC_classifier.train(training_set)\nprint(\"LinearSVC_classifier accuracy percent:\", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100)\n\nsave_classifier = open(\"pickled_algos/LinearSVC_classifier5k.pickle\",\"wb\")\npickle.dump(LinearSVC_classifier, save_classifier)\nsave_classifier.close()\n\n#voted_classifier = VoteClassifier(\n# NuSVC_classifier,\n# LinearSVC_classifier,\n# MNB_classifier,\n# BernoulliNB_classifier,\n# LogisticRegression_classifier)\n#\n#print(\"voted_classifier accuracy percent:\", (nltk.classify.accuracy(voted_classifier, testing_set))*100)\n\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"227093982","text":"\"\"\" This is the main module \"\"\"\nimport warnings\n\nimport pandas as pd\nfrom analysis.feature_importance.feature_importance import FeatureImportance\nfrom classifiers.classifier_executor import ClassifierExecutor\nfrom classifiers.input_data import InputData\nfrom feature_extraction.feature_extractor import FeatureExtractor\nfrom path_helper import get_project_root\nfrom preprocessing.corpus import build_corpus\n\n# Configs\nfrom preprocessing.data_preparation import prepare_and_merge_datasets\n\npd.options.mode.chained_assignment = None\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\n\n\ndef run_preprocessing(run_from_scratch):\n \"\"\" Run data preprocessing if run_from_scratch=True \"\"\"\n if run_from_scratch:\n # prepare corpus\n print(\"\\nPreparing data ...\")\n prepare_and_merge_datasets()\n df_preprocessed = pd.read_csv(\n str(get_project_root()) + \"/data/preprocessed/dataset.csv\", index_col=0\n )\n return df_preprocessed\n else:\n df_preprocessed = pd.read_csv(\n str(get_project_root()) + \"/data/preprocessed/dataset.csv\", index_col=0\n )\n return df_preprocessed\n\n\ndef run_feature_extraction_create_corpus(run_from_scratch, df_preprocessed):\n \"\"\" Run corpus building if run_from_scratch=True \"\"\"\n if run_from_scratch:\n df_corpus = build_corpus(df_preprocessed)\n df_corpus.to_csv(\n str(get_project_root()) + \"/data/extracted_features/corpus.csv\"\n )\n return df_corpus\n else:\n df_corpus = pd.read_csv(\n str(get_project_root()) + \"/data/extracted_features/corpus.csv\"\n )\n return df_corpus\n\n\ndef run_feature_extraction(run_from_scratch, df_corpus):\n \"\"\" Run feature extraction if run_from_scratch=True \"\"\"\n if run_from_scratch:\n print(\"\\nExtracting features ...\")\n df_extracted_features = FeatureExtractor(df_corpus).get_df_with_all_features()\n df_extracted_features = df_extracted_features.drop(\n [\"original_content\", \"content\", \"tokens\", \"pos\", \"stems\"], axis=1\n )\n df_extracted_features.to_csv(\n str(get_project_root()) + \"/data/extracted_features/extracted_features.csv\"\n )\n return df_extracted_features\n else:\n df_extracted_features = pd.read_csv(\n str(get_project_root()) + \"/data/extracted_features/extracted_features.csv\"\n )\n return df_extracted_features\n\n\nif __name__ == \"__main__\":\n preprocessing = True\n corpus = True\n feature_extraction = True\n\n df_preprocessed_data = run_preprocessing(preprocessing)\n df_data_corpus = run_feature_extraction_create_corpus(corpus, df_preprocessed_data)\n df_data_extracted_features = run_feature_extraction(\n feature_extraction, df_data_corpus\n )\n\n # unchanged dataset\n raw_text_features = df_preprocessed_data[\"content\"]\n raw_text_labels = df_preprocessed_data[\"class\"]\n extracted_features = df_data_extracted_features.loc[\n :, df_data_extracted_features.columns != \"class\"\n ]\n labels = df_data_extracted_features[\"class\"]\n\n # do balancing, i.e. over- and undersampling\n input_data = InputData(\n raw_text_features, raw_text_labels, extracted_features, labels\n )\n\n # feature importances\n print(\"\\nFeature importances ...\")\n feature_importance = FeatureImportance(\n extracted_features, labels, extracted_features.columns.values\n )\n feature_importance.get_importance_scores()\n\n # run classifiers\n print(\"\\nRunning classifiers ...\")\n classifier_executor = ClassifierExecutor(input_data.get_datasets())\n df_results = classifier_executor.get_results()\n df_results.to_csv(str(get_project_root()) + \"/results/results.csv\")\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"422534922","text":"from . import BaseParser\n\n# VimeoUser get vimeo user and\n# parser it to make local user object\nclass VimeoUser(BaseParser):\n\n def __init__(self, id):\n if not isinstance(id, int):\n raise TypeError(\"`id` must be an integer.\")\n params = dict(user_id=id)\n self.get(params)\n self.data = self.raw_data.get('person', {})\n if self.invalid_user(id): self.data = {}\n\n @property\n def id(self):\n id = self.data.get(\"id\", None)\n return int(id) if id else None\n\n @property\n def username(self):\n return self.data.get(\"username\", \"\")\n\n @property\n def display_name(self):\n return self.data.get(\"display_name\", \"\")\n\n @property\n def is_staff(self):\n result = self.data.get(\"is_staff\", 0)\n result = int(result)\n return True if result == 1 else False\n\n @property\n def has_uploaded(self):\n result = self.data.get(\"number_of_uploads\", 0)\n result = int(result)\n return True if result > 0 else False\n\n @property\n def is_paying(self):\n is_plus = self.data.get(\"is_plus\", 0)\n is_plus = int(is_plus)\n is_plus = True if is_plus == 1 else False\n is_pro = self.data.get(\"is_pro\", 0)\n is_pro = int(is_pro)\n is_pro = True if is_pro == 1 else False\n return is_plus or is_pro\n\n @property\n def vimeo_url(self):\n return self.data.get(\"profileurl\", \"\")\n\n\n @property\n def blank_user(self):\n return len(self.data.keys()) == 0\n\n # Get vimeo users with given params\n def get(self, params):\n if not isinstance(params, dict) and \"user_id\" not in params.keys():\n raise TypeError(\"Params not given in specified format or type.\")\n params.update(method=u'vimeo.people.getInfo')\n super(VimeoUser, self).get(params)\n\n\n\n def invalid_user(self, id):\n return self.blank_user or self.id != id\n\n# VimeoUsers do operation for collection of Vimeo users\nclass VimeoUsers(BaseParser):\n\n def __init__(self, users):\n if not isinstance(users, list):\n raise TypeError(\"`users` list must be an list object.\")\n for user in users:\n if not isinstance(user, VimeoUser):\n raise TypeError(\"`user` in list must be a VimeoUser object.\")\n self.collection = users\n\n def __repr__(self):\n values = [ repr(user) for user in self.collection ]\n return 'VimeoUsers' + str(values)\n\n def __getitem__(self, index):\n return self.collection[index]\n\n def append(self, user):\n if not isinstance(user, VimeoUser):\n raise TypeError(\"`user` in list must be a VimeoUser object.\")\n self.collection.append(user)\n\n # Get required user ids specified in limit\n def get_required_user_ids(self, limit):\n self.user_ids = []\n count = 0\n params = dict(method = u\"vimeo.categories.getAll\", page=1,per_page=50)\n self.get(params)\n categories= self.raw_data['categories']\n if categories['total'] > 0:\n categories = categories['category']\n for category in categories:\n name = category.get('word')\n pages = 20\n for page in range(1,pages+1):\n params = dict(method = u\"vimeo.categories.getRelatedPeople\", category=name, page=page,per_page=50)\n self.get(params)\n content = self.raw_data\n pages = int(content['users']['total'])/50\n if page > pages: break\n users = content['users']['user']\n for user in users:\n if count >= limit: return None\n user_id = int(user['id'])\n if user_id not in self.user_ids:\n self.user_ids.append(user_id)\n count = count + 1\n\n @property\n def users(self):\n return self.collection\n\n @property\n def count(self):\n return len(self.users)","sub_path":"libs/parser/vimeo_user.py","file_name":"vimeo_user.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"203099517","text":"'''\n2020.02.13.\n2020.02.15. - rpi: Server /laptop(yolo): Client\n'''\n# coding=\nimport cv2\nimport numpy as np\nimport threading\nimport time\nimport socket\nimport sys\nimport os\nimport detector\n# import multiprocessing\n\n\ndef recvall(sock, count):\n buf = b''\n while count:\n newbuf = sock.recv(count)\n if not newbuf: return None\n buf += newbuf\n count -= len(newbuf)\n return buf\n\n\nclass DetectFrame(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n # Input server IP\n self.host = \"192.168.0.122\"\n self.port = 4000\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.connect((self.host, self.port))\n print(\"connected\")\n\n def run(self):\n while True:\n try:\n with open('./camData/image.jpg', 'wb') as my_file:\n length = recvall(self.server_socket, 16)\n frame_data = recvall(self.server_socket, int(length))\n print(\"receiving frame...\")\n my_file.write(frame_data)\n print(\"Now frame Updated!\")\n my_file.close()\n\n # command: run yolo\n myRunYolo = detector.RunYolo()\n myRunYolo.start()\n if not myRunYolo.is_alive():\n print(\"[Thread]: Run Yolo\")\n myRunYolo.start()\n del myRunYolo\n else:\n del myRunYolo\n time.sleep(1)\n\n # read yolo_mark bounding box\n with open(\"/home/heejunghong/BlackfencerWeb/index.html\", 'w+t') as my_file_2:\n data = my_file_2.read()\n print(\"Read the bounding box's coordinate\")\n conn.send(data)\n print(\"Send bounding box's coordinate successfully!\")\n time.sleep(2)\n my_file_2.write('0')\n print(\"Initialize the bounding box's coordinate to 0\")\n my_file_2.close()\n\n except Exception as ex:\n print('main.py ERROR', ex)\n break\n\n # conn.close()\n # self.server_socket.close()\n # print(\"++++++++++++++++++++++++++++ DISCONNECTED +++++++++++++++++++++++\")\n\n def shutdown(self):\n print(\"Shutdown initiated\")\n self.exit.set()\n\n\nif __name__ == '__main__':\n myDetectFrame = DetectFrame()\n myDetectFrame.start()\n print(\"processing start 1\")\n myDetectFrame.shutdown()\n time.sleep(1)\n while True:\n if not myDetectFrame.is_alive():\n myDetectFrame.start()\n myDetectFrame.shutdown()\n else:\n break","sub_path":"main server/server_rpi client_yolo/main_cl.py","file_name":"main_cl.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"527179313","text":"\"\"\"\nURLs | Cannlytics API\nCreated: 4/21/2021\nUpdated: 4/25/2021\nDescription: API URLs to interface with cannabis analytics.\n\"\"\"\n\n# External imports\nfrom django.urls import include, path\nfrom rest_framework import urlpatterns\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\n# Internal imports\nfrom api import views\nfrom api.auth import auth\nfrom api.areas import areas\nfrom api.inventory import inventory\nfrom api.organizations import organizations\nfrom api.users import users\n\napp_name = 'api' # pylint: disable=invalid-name\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('auth', include([\n path('/authenticate', auth.authenticate),\n path('/sign-out', auth.logout),\n ])),\n # Allow for labs to choose to make their analyses public,\n # so that producers can search for analyses.\n path('analyses', include([\n path('', views.index),\n path('/', views.index),\n ])),\n path('analytes', include([\n path('', views.index),\n path('/', views.index),\n ])),\n # path('areas', inventory.areas),\n path('areas', include([\n path('', areas.areas),\n path('/', areas.areas),\n ])),\n path('clients', include([\n path('', views.index),\n path('/', views.index),\n path('//contacts', views.index),\n ])),\n path('inventory', include([\n path('', views.index),\n path('/', views.index),\n ])),\n path('instruments', include([\n path('', views.index),\n path('/', views.index),\n ])),\n path('invoices', include([\n path('', views.index),\n path('/', views.index),\n ])),\n path('users', include([\n path('', users.users),\n path('/', users.users),\n path('//settings', users.users),\n ])),\n path('organizations', include([\n path('', organizations.organizations),\n path('/', organizations.organizations),\n path('//settings', organizations.organizations),\n # path('join/', organizations.join_organization),\n ])),\n path('samples', include([\n path('', views.index),\n path('/', views.index),\n ])),\n path('results', include([\n path('', views.index),\n path('/', views.index),\n ])),\n path('transfers', include([\n path('', views.index),\n path('/', views.index),\n ])),\n path('regulations', views.regulations),\n path('create-key', auth.create_api_key),\n path('delete-key', auth.delete_api_key),\n path('get-keys', auth.get_api_key_hmacs),\n]\n\n# Add optional format suffixes to the URLs,\n# so users can explicitely specify a formatm e.g. .json.\n# https://www.django-rest-framework.org/tutorial/2-requests-and-responses/\n# urlpatterns = format_suffix_patterns(urlpatterns)\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"100403446","text":"\"\"\"\nThis module offers miscellanious networking functions.\n\"\"\"\n\nimport requests\n\n\ndef download_file(url, file_path):\n '''\n Generic function to stream-download a large file.\n Args:\n - url (str): url to download from.\n - file_path (str): path to save file to.\n Returns:\n - file_path (str): original file_path where saved is.\n '''\n r = requests.get(url, stream=True)\n with open(file_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n return file_path\n","sub_path":"Alpaca/datasets/utils/network_utils.py","file_name":"network_utils.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"191447973","text":"import services.controlers.itemClassificationsControler\nimport services.controlers.loggControler\nimport json\nfrom json import JSONEncoder\nfrom json import JSONDecoder\nfrom services.exceptions import *\n\nclass GetClassLeavesView:\n\tdef get (self):\n\t\tresponse_data = {}\n\t\ttry:\n\t\t\titemClassificationsControler =services.controlers.itemClassificationsControler.ItemClassificationsControler()\n\t\t\tresult = itemClassificationsControler.getClassLeaves()\n\t\t\tresponse_data['status']=OK\n\t\t\tresponse_data['message']='CLASSLEAVES_ENCONTRADOS'\n\t\t\tresponse_data['data'] = result\n\t\texcept Exception as e:\n\t\t\tresponse_data['status']=ERROR_NO_DEFINIDO\n\t\t\tresponse_data['message']=e.message\n\t\t\tresponse_data['data']=''\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\tjsonStringResponse = JSONEncoder().encode(response_data)\n\t\treturn jsonStringResponse","sub_path":"services/views/getClassLeavesView.py","file_name":"getClassLeavesView.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"262557941","text":"# coding=utf-8\n\n# This file contains the SerienRecoder About Screen\nfrom Screens.Screen import Screen\nfrom Components.ConfigList import ConfigListScreen\nfrom Components.ActionMap import ActionMap\nfrom Components.Label import Label\nfrom Components.config import config\n\nfrom enigma import getDesktop\n\nclass serienRecAboutScreen(Screen, ConfigListScreen):\n\tDESKTOP_WIDTH = getDesktop(0).size().width()\n\tDESKTOP_HEIGHT = getDesktop(0).size().height()\n\n\tskin = \"\"\"\n\t\t\n\t\t\t\n\t\t\"\"\" % ((DESKTOP_WIDTH - 650) / 2, (DESKTOP_HEIGHT - 400) / 2, (\"Über SerienRecorder\"))\n\n\tdef __init__(self,session):\n\t\tself.session = session\n\t\tScreen.__init__(self, session)\n\n\t\tself[\"actions\"] = ActionMap([\"SerienRecorderActions\"], dict(cancel=self.exit, ok=self.exit), -1)\n\n\t\tself.info =(\"SerienRecorder for Enigma2 (Version %s)\\n\"\n\t\t \"(c) 2014-2020 by einfall, w22754, MacDisein and egn\\n\"\n\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\"For more info:\\n\"\n\t\t\t\t\t\"http://tinyurl.com/puafaaz\\n\"\n\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\"If you like this plugin and want to support us, please donate to:\\n\"\n\t\t\t\t\t\"@einfall: send PN for Amazon-Wishlist,\\n\"\n\t\t \"@MacDisein: PayPal to macdisein@gmx.de\\n\\n\"\n\t\t \"Mit Unterstützung und Genehmigung zur Verwendung der Daten von\\n\"\n\t\t \"Wunschliste.de - http://www.wunschliste.de\") % config.plugins.serienRec.showversion.value\n\n\t\tself[\"pluginInfo\"] = Label(self.info)\n\n\tdef exit(self):\n\t\tself.close()\n","sub_path":"src/SerienRecorderAboutScreen.py","file_name":"SerienRecorderAboutScreen.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"609861008","text":"import numpy as np\nimport matplotlib as mpl\nmpl.use('tkagg') \nimport matplotlib.pyplot as plt\nfrom func import *\n\nn=np.array([1,1])\n\nA1=np.array([2,0])\nA2=np.array([0,2])\n\nomat=np.array([[0,1],[-1,0]])\n\nm=np.matmul(omat,n)\n\nD=np.matmul(np.linalg.inv(np.array([[m[0],m[1]],[n[0],n[1]]])),np.array([0,2]))\n\n\nA=-2*D\n\nh_alt=np.linalg.norm(A-D)\nside=(2*h_alt)/np.sqrt(3)\n\nB=D+(side/2)*(m/np.linalg.norm(m))\nC=D-(side/2)*(m/np.linalg.norm(m))\n\nl_AB=line(A,B)\nl_BC=line(B,C)\nl_CA=line(C,A)\n\n\nplt.plot(l_AB[0,:],l_AB[1,:])\nplt.plot(l_BC[0,:],l_BC[1,:])\nplt.plot(l_CA[0,:],l_CA[1,:])\nplt.plot(0,0,'o')\nplt.grid()\nplt.axis('equal')\n\n\n\nprint(\"Vertex A is :\")\nprint(A)\nprint(B)\nprint(C)\n\nprint(\"Area of Triangle is:\")\nprint((h_alt**2)/np.sqrt(3))\n\n\n#Verificatiom\n\nmed1=line(A,D)\nmed2=line(B,(A+C)/2)\n\nplt.plot(med1[0,:],med1[1,:])\nplt.plot(med2[0,:],med2[1,:])\n\nplt.show()\n","sub_path":"Computing/StraightLines/Ex6.py","file_name":"Ex6.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"597740897","text":"\"\"\"\nLandon Buell\nNeural-Network-Projects-with-Python\nChapter 02 - Exploratory Data Analysis\n7 June 2021\n\"\"\"\n\n #### IMPORTS ####\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\n ### MAIN EXECUTABLE ####\n\nif __name__ == \"__main__\":\n\n # Load in the Data\n df = pd.read_csv(\"diabetes.csv\")\n print(df.head(10))\n print(\"\")\n\n # Visualize w/ Histogram\n df.hist()\n plt.show()\n\n # Create a 3x3 subplot\n plt.subplots(nrows=3,ncols=3,figsize=(16,12))\n \n # Plot a density plot for each variable\n for (idx,col) in enumerate(df.columns):\n ax = plt.subplot(3,3,idx+1)\n ax.yaxis.set_ticklabels([])\n sns.distplot(df.loc[df.Outcome == 0][col], hist=False, axlabel=False,\n kde_kws={'linestyle':'-',\n 'color':'black',\n 'label':'No Diabetes'\n })\n sns.distplot(df.loc[df.Outcome == 1][col], hist=False, axlabel=False,\n kde_kws={'linestyle':'--',\n 'color':'black',\n 'label':'Diabetes'\n })\n ax.set_title(col)\n\n # Hide the 9th subplot (bottom right) \n plt.subplot(3,3,9).set_visible(False)\n plt.show()\n\n # Handling Missing Values\n print(df.isnull().any())\n print(\"\")\n \n print(df.describe())\n print(\"\")\n\n # Find number of zero values in dataset\n print(\"Num of rows with - values for each variable\")\n for col in df.columns:\n missingRows = df.loc[df[col] == 0].shape[0]\n print(col + \": \" + str(missingRows))\n print(\"\")\n\n # Replace the 'zero' values with NaN\n df['Glucose'] = df['Glucose'].replace(0,np.nan)\n df['BloodPressure'] = df['BloodPressure'].replace(0,np.nan)\n df['SkinThickness'] = df['SkinThickness'].replace(0,np.nan)\n df['Insulin'] = df['Insulin'].replace(0,np.nan)\n df['BMI'] = df['BMI'].replace(0,np.nan)\n\n # Find number of zero values in dataset\n print(\"Num of rows with - values for each variable\")\n for col in df.columns:\n missingRows = df.loc[df[col] == 0].shape[0]\n print(col + \": \" + str(missingRows))\n print(\"\")\n","sub_path":"Chapter02/ExploratoryDataAnalysis.py","file_name":"ExploratoryDataAnalysis.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"389464803","text":"from sane_doc_reports.domain.Element import Element\nfrom sane_doc_reports.conf import DEBUG\nfrom sane_doc_reports.elements import error, markdown\nfrom sane_doc_reports.transform.markdown.md_helpers import \\\n markdown_to_section_list\n\n\nclass PlaceHolderElement(Element):\n \"\"\" Mainly used to fix the old json's header element \"\"\"\n\n def insert(self):\n if DEBUG:\n print('Adding placeholder...')\n\n self.section.type = 'markdown'\n if isinstance(self.section.contents, str):\n self.section.contents = markdown_to_section_list(\n self.section.contents)\n else:\n self.section.contents = markdown_to_section_list(\n self.section.contents['text'])\n markdown.invoke(self.cell_object, self.section)\n\n\ndef invoke(cell_object, section) -> None:\n if section.type != 'placeholder':\n section.contents = f'Called placeholder but not placeholder - [{section}]'\n return error.invoke(cell_object, section)\n\n PlaceHolderElement(cell_object, section).insert()\n","sub_path":"docker/sane-doc-reports/src/sane_doc_reports/elements/placeholder.py","file_name":"placeholder.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"380258119","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport re\nimport subprocess\n#from AppKit import *\n\n#keyFile = NSPasteboard.generalPasteboard()\n#keyString = keyFile.stringForType_(NSStringPboardType)\n\nkeyString = subprocess.check_output('pbpaste', env={'LANG': 'en_US.UTF-8'}).decode('utf-8')\n\nkeyList = re.findall(r'[A-Z0-9]{5}-[A-Z0-9]{5}-[A-Z0-9]{5}', keyString)\nkeys = ','.join(keyList)\n\nprocess = subprocess.Popen('pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)\nprocess.communicate(keys.encode('utf-8'))\n","sub_path":"KeyExtractTool/KeyExtractTool_macos.py","file_name":"KeyExtractTool_macos.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"577100257","text":"import logging\nimport os\nimport sys\n\nimport yaml\n\nimport simtools.configuration.commandline_parser as argparser\nfrom simtools import io_handler\n\n__all__ = [\n \"Configurator\",\n \"InvalidConfigurationParameter\",\n]\n\n\nclass InvalidConfigurationParameter(Exception):\n \"\"\"Exception for Invalid configuration parameter.\"\"\"\n\n\nclass Configurator:\n \"\"\"\n Configuration handling application configuration.\n\n Allow to set configuration parameters by\n\n - command line arguments\n - configuration file (yml file)\n - configuration dict when calling the class\n - environmental variables\n\n Configuration parameter names are converted always to lower case.\n\n Parameters\n ----------\n config: dict\n Configuration parameters as dict.\n label: str\n Class label.\n usage: str\n Application usage description.\n description: str\n Text displayed as description.\n epilog: str\n Text display after all arguments.\n \"\"\"\n\n def __init__(self, config=None, label=None, usage=None, description=None, epilog=None):\n \"\"\"\n Initialize Configurator.\n \"\"\"\n\n self._logger = logging.getLogger(__name__)\n self._logger.debug(\"Init Configuration\")\n\n self.config_class_init = config\n self.label = label\n self.config = {}\n self.parser = argparser.CommandLineParser(\n prog=self.label, usage=usage, description=description, epilog=epilog\n )\n\n def default_config(self, arg_list=None, add_db_config=False):\n \"\"\"\n Returns dictionary of default configuration\n\n Parameters\n ----------\n arg_list: list\n List of arguments.\n add_db_config: bool\n Add DB configuration file.\n\n Returns\n -------\n dict\n Configuration parameters as dict.\n \"\"\"\n\n self.parser.initialize_default_arguments()\n if arg_list and \"--site\" in arg_list:\n self.parser.initialize_telescope_model_arguments(True, \"--telescope\" in arg_list)\n if add_db_config:\n self.parser.initialize_db_config_arguments()\n\n self._fill_config(arg_list)\n return self.config\n\n def initialize(\n self,\n paths=True,\n telescope_model=False,\n workflow_config=False,\n db_config=False,\n job_submission=False,\n ):\n \"\"\"\n Initialize configuration from command line, configuration file, class config, or \\\n environmental variable.\n\n Priorities in parameter settings.\n 1. command line; 2. yaml file; 3. class init; 4. env variables.\n\n Conflicting configuration settings raise an Exception, with the exception of settings \\\n from environmental variables, which are only done when the configuration parameter \\\n is None.\n\n Parameters\n ----------\n paths: bool\n Add path configuration to list of args.\n telescope_model: bool\n Add telescope model configuration to list of args.\n workflow_config: bool\n Add workflow configuration to list of args.\n db_config: bool\n Add database configuration parameters to list of args.\n job_submission: bool\n Add job submission configuration to list of args.\n\n Returns\n -------\n dict\n Configuration parameters as dict.\n dict\n Dictionary with DB parameters\n\n Raises\n ------\n InvalidConfigurationParameter\n if parameter has already been defined with a different value.\n\n \"\"\"\n\n self.parser.initialize_default_arguments(\n paths=paths,\n telescope_model=telescope_model,\n workflow_config=workflow_config,\n db_config=db_config,\n job_submission=job_submission,\n )\n\n self._fill_from_command_line()\n try:\n self._fill_from_config_file(self.config[\"workflow_config\"])\n except KeyError:\n pass\n try:\n self._fill_from_config_file(self.config[\"config\"])\n except KeyError:\n pass\n self._fill_from_config_dict(self.config_class_init)\n self._fill_from_environmental_variables()\n self._initialize_io_handler()\n _db_dict = self._get_db_parameters()\n\n if self.config[\"label\"] is None:\n self.config[\"label\"] = self.label\n\n return self.config, _db_dict\n\n def _fill_from_command_line(self, arg_list=None):\n \"\"\"\n Fill configuration parameters from command line arguments.\n\n \"\"\"\n\n if arg_list is None:\n arg_list = sys.argv[1:]\n\n self._fill_config(arg_list)\n\n def _fill_from_config_dict(self, _input_dict):\n \"\"\"\n Fill configuration parameters from dictionary. Enforce that configuration parameter names\\\n are lower case.\n\n Parameters\n ----------\n _input_dict: dict\n dictionary with configuration parameters.\n\n \"\"\"\n _tmp_config = {}\n try:\n for key, value in _input_dict.items():\n self._check_parameter_configuration_status(key, value)\n _tmp_config[key.lower()] = value\n except AttributeError:\n pass\n\n self._fill_config(_tmp_config)\n\n def _check_parameter_configuration_status(self, key, value):\n \"\"\"\n Check if a parameter is already configured and not still set to the default value. Allow \\\n configuration with None values.\n\n Parameters\n ----------\n key, value\n parameter key, value to be checked\n\n\n Raises\n ------\n InvalidConfigurationParameter\n if parameter has already been defined with a different value.\n\n\n \"\"\"\n # parameter not changed or None\n if self.parser.get_default(key) == self.config[key] or self.config[key] is None:\n return\n\n # parameter already set\n if key in self.config and self.config[key] != value:\n self._logger.error(\n f\"Inconsistent configuration parameter ({key}) definition \"\n f\"({self.config[key]} vs {value})\"\n )\n raise InvalidConfigurationParameter\n\n def _fill_from_config_file(self, config_file):\n \"\"\"\n Read and fill configuration parameters from yaml file. Take into account that this could be\\\n a CTASIMPIPE workflow configuration file. (CTASIMPIPE:CONFIGURATION is optional, therefore,\\\n no error is raised when this key is not found)\n\n Parameters\n ----------\n config file: str\n Name of configuration file name\n\n\n Raises\n ------\n FileNotFoundError\n if configuration file has not been found.\n\n \"\"\"\n\n try:\n self._logger.debug(f\"Reading configuration from {config_file}\")\n with open(config_file, \"r\") as stream:\n _config_dict = yaml.safe_load(stream)\n if \"CTASIMPIPE\" in _config_dict:\n try:\n self._fill_from_config_dict(_config_dict[\"CTASIMPIPE\"][\"CONFIGURATION\"])\n except KeyError:\n self._logger.info(f\"No CTASIMPIPE:CONFIGURATION dict found in {config_file}.\")\n else:\n self._fill_from_config_dict(_config_dict)\n # TypeError is raised for config_file=None\n except TypeError:\n pass\n except FileNotFoundError:\n self._logger.error(f\"Configuration file not found: {config_file}\")\n raise\n\n def _fill_from_environmental_variables(self):\n \"\"\"\n Fill any unconfigured configuration parameters (i.e., parameter is None) \\\n from environmental variables.\n\n \"\"\"\n\n _env_dict = {}\n try:\n for key, value in self.config.items():\n if value is None:\n _env_dict[key] = os.environ.get(key.upper())\n except AttributeError:\n pass\n\n self._fill_from_config_dict(_env_dict)\n\n def _initialize_io_handler(self):\n \"\"\"\n Initialize IOHandler with input and output paths.\n\n \"\"\"\n _io_handler = io_handler.IOHandler()\n _io_handler.set_paths(\n output_path=self.config.get(\"output_path\", None),\n data_path=self.config.get(\"data_path\", None),\n model_path=self.config.get(\"model_path\", None),\n )\n\n @staticmethod\n def _arglist_from_config(input_var):\n \"\"\"\n Convert input list of strings as needed by argparse.\n\n Special cases:\n - lists as arguments (using e.g., nargs=\"+\") are expanded\n - boolean are expected to be handled as action=\"store_true\" or \"store_false\"\n - None values or zero length values are ignored (this means setting a parameter \\\n to none or \"\" is not allowed.\n\n\n Ignore values which are None or of zero length.\n\n Parameters\n ----------\n input_var: dict, list, None\n Dictionary/list of commands to convert to list.\n\n Returns\n -------\n list\n Dict keys and values as dict.\n\n \"\"\"\n\n if isinstance(input_var, dict):\n _list_args = []\n for key, value in input_var.items():\n if isinstance(value, list):\n _list_args.append(\"--\" + key)\n _list_args += value\n elif not isinstance(value, bool) and value is not None and len(str(value)) > 0:\n _list_args.append(\"--\" + key)\n _list_args.append(str(value))\n elif value:\n _list_args.append(\"--\" + key)\n return _list_args\n\n try:\n return [str(value) for value in list(input_var) if value != \"None\"]\n except TypeError:\n return []\n\n @staticmethod\n def _convert_stringnone_to_none(input_dict):\n \"\"\"\n Convert string type 'None' to type None (argparse returns None as str).\n\n Parameters\n ----------\n input_dict\n Dictionary with values to be converted.\n\n \"\"\"\n\n for key, value in input_dict.items():\n input_dict[key] = None if value == \"None\" else value\n\n return input_dict\n\n def _fill_config(self, input_container):\n \"\"\"\n Fill configuration dictionary.\n\n Parameters\n ----------\n input_container\n List or dictionary with configuration updates.\n\n \"\"\"\n\n self.config = self._convert_stringnone_to_none(\n vars(\n self.parser.parse_args(\n self._arglist_from_config(self.config)\n + self._arglist_from_config(input_container)\n )\n )\n )\n\n def _get_db_parameters(self):\n \"\"\"\n Return parameters for DB configuration\n\n Parameters\n ----------\n dict\n Dictionary with DB parameters\n\n\n \"\"\"\n\n _db_dict = {}\n _db_para = (\"db_api_user\", \"db_api_pw\", \"db_api_port\", \"db_server\")\n try:\n for _para in _db_para:\n _db_dict[_para] = self.config[_para]\n except KeyError:\n pass\n\n return _db_dict\n","sub_path":"simtools/configuration/configurator.py","file_name":"configurator.py","file_ext":"py","file_size_in_byte":11312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"379597400","text":"import math\n\nprint(\"Enter the price of the commodity bought from the store\")\n\nx = float(input())\n\nprint(\"Enter the price of the commodity bought from online\")\n\ny = float(input())\n\ndiff = float(y - x)\n\nif diff > 0:\n print(\"You got a profit of {}\".format(math.fabs(diff)))\nelse:\n print(\"You got a loss of {}\".format(math.fabs(diff)))\n","sub_path":"prelim/profit and loss.py","file_name":"profit and loss.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9355523","text":"import sys\r\nimport os.path\r\n\r\ndef ip_file_valid():\r\n\r\n while True:\r\n ip_file = input(\"\\nEnter full path and name of IP file: \")\r\n if os.path.isfile(ip_file) == True:\r\n print('\\nFile {} exists\\n'.format(ip_file))\r\n break\r\n else:\r\n print('Invalid path or filename!\\nPlease try again')\r\n continue\r\n\r\n ip = open(ip_file, 'r')\r\n ip.seek(0)\r\n ip_list = ip.readlines()\r\n ip.close()\r\n return ip_list\r\n","sub_path":"ip_file_valid.py","file_name":"ip_file_valid.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"282322284","text":"import os\nimport numpy as np\nimport cv2\n# from PIL import Image\n# import matplotlib.pyplot as plt\n#\nvideo_dirs_path = \"\"\narr_names = [f for f in os.listdir('./lmk_res') if f.endswith('.npy')]\n# print(len(arr_names))\nvideo_npy = np.zeros((1,68,2))\nfor i in range(1,len(arr_names)+1):\n arr_name = os.path.join('./lmk_res',str(i)+'.npy')\n arr_number = np.load(arr_name)\n center = arr_number[30]\n after_decenter =np.zeros((1,2))\n for j in arr_number:\n # print((np.expand_dims(j-center,0)))\n after_decenter=np.concatenate((after_decenter,(np.expand_dims(j-center,0))))\n after_decenter = after_decenter[1:]\n video_npy = np.concatenate((video_npy,np.expand_dims(after_decenter,0)))\n video_npy = video_npy[1:]\n\nvideo_npy_name = \"./lmk_res/video.npy\"\n# np.save(video_npy_name,video_npy)\n\n\n\n # print(printarr_number[30])\n # exit()\n # print(arr_name)\n# for i in img:\n# x_list = []\n# y_list = []\n# npy = i.replace('jpg', 'npy')\n # img = Image.open(os.path.join('./lmk_res', i))\n # arr = np.load(os.path.join('./lmk_res', npy))\n # print(arr.shape)\n # for j in arr:\n # x_list.append(j[0])\n # y_list.append(j[1])\n # plt.figure(\"Image\") # 图像窗口名称\n # ax = plt.gca()\n # plt.imshow(img)\n # plt.axis('off') # 关掉坐标轴为 off\n # plt.title('image') # 图像题目\n # ax.scatter(x_list, y_list, c='r', s=20, alpha=0.5)\n # plt.show()\n # plt.close()\n # print(arr)\n\n\n# img_name = \"/home/jiamengzhao/data_root/lmk_res/1.jpg\"\n# npy = img_name.replace('.jpg','.npy')\n#\n# arr = np.load(npy)\n# img = cv2.imread(img_name)\n#\n# # for i in arr:\n# #\n# # img = cv2.circle(img,(int(i[0]),int(i[1])),5,(0,0,255))\n# img = cv2.circle(img,(int(arr[30][0]),int(arr[30][1])),5,(0,0,255))\n# cv2.imwrite(img_name.replace('.jpg','_draw.jpg'),img)\n#\n# print(arr[30])","sub_path":"tools/view_lmks.py","file_name":"view_lmks.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"453356728","text":"class Solution:\n def isPalindrome(self, s: str) -> bool:\n\n l, r = 0, len(s) - 1\n while l < r:\n while l < r and self.helper(s[l]):\n l += 1\n l_char = s[l].lower()\n while l < r and self.helper(s[r]):\n r -= 1\n r_char = s[r].lower()\n if l_char != r_char:\n return False\n r -= 1\n l += 1\n return True\n\n def helper(self, char):\n return not ('a' <= char <= 'z' or 'A' <= char <= 'Z' or '0' <= char <= '9')","sub_path":"125_验证回文串.py","file_name":"125_验证回文串.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"33809463","text":"def toBinary(n):\n r = []\n while (n > 0):\n r.append(n % 2)\n n = n / 2\n return r\n\n\ndef MillerRabin(n, a, s=50):\n for j in range(1, s + 1):\n b = toBinary(n - 1)\n d = 1\n for i in range(len(b) - 1, -1, -1):\n x = d\n d = (d * d) % n\n if d == 1 and x != 1 and x != n - 1:\n return True # Составное\n if b[i] == 1:\n d = (d * a) % n\n if d != 1:\n return True # Составное\n return False # Простое\n\nif __name__ == \"__main__\":\n print(MillerRabin(53, 7))","sub_path":"RSA/MillerRabin.py","file_name":"MillerRabin.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"62571736","text":"from sklearn.cluster import KMeans\nimport numpy as np\n\nfrom crankshaft.analysis_data_provider import AnalysisDataProvider\n\n\nclass Kmeans(object):\n def __init__(self, data_provider=None):\n if data_provider is None:\n self.data_provider = AnalysisDataProvider()\n else:\n self.data_provider = data_provider\n\n def spatial(self, query, no_clusters, no_init=20):\n \"\"\"\n find centers based on clusters of latitude/longitude pairs\n query: SQL query that has a WGS84 geometry (the_geom)\n \"\"\"\n params = {\"subquery\": query,\n \"geom_col\": \"the_geom\",\n \"id_col\": \"cartodb_id\"}\n\n result = self.data_provider.get_spatial_kmeans(params)\n\n # Unpack query response\n xs = result[0]['xs']\n ys = result[0]['ys']\n ids = result[0]['ids']\n\n km = KMeans(n_clusters=no_clusters, n_init=no_init)\n labels = km.fit_predict(zip(xs, ys))\n return zip(ids, labels)\n\n def nonspatial(self, subquery, colnames, no_clusters=5,\n standardize=True, id_col='cartodb_id'):\n \"\"\"\n Arguments:\n query (string): A SQL query to retrieve the data required to do the\n k-means clustering analysis, like so:\n SELECT * FROM iris_flower_data\n colnames (list): a list of the column names which contain the data\n of interest, like so: ['sepal_width',\n 'petal_width',\n 'sepal_length',\n 'petal_length']\n no_clusters (int): number of clusters (greater than zero)\n id_col (string): name of the input id_column\n\n Returns:\n A list of tuples with the following columns:\n cluster labels: a label for the cluster that the row belongs to\n centers: center of the cluster that this row belongs to\n silhouettes: silhouette measure for this value\n rowid: row that these values belong to (corresponds to the value in\n `id_col`)\n \"\"\"\n import json\n from sklearn import metrics\n\n params = {\n \"colnames\": colnames,\n \"subquery\": subquery,\n \"id_col\": id_col\n }\n\n data = self.data_provider.get_nonspatial_kmeans(params)\n\n # fill array with values for k-means clustering\n if standardize:\n cluster_columns = _scale_data(\n _extract_columns(data))\n else:\n cluster_columns = _extract_columns(data)\n\n kmeans = KMeans(n_clusters=no_clusters,\n random_state=0).fit(cluster_columns)\n\n centers = [json.dumps(dict(zip(colnames, c)))\n for c in kmeans.cluster_centers_[kmeans.labels_]]\n\n silhouettes = metrics.silhouette_samples(cluster_columns,\n kmeans.labels_,\n metric='sqeuclidean')\n\n return zip(kmeans.labels_,\n centers,\n silhouettes,\n [kmeans.inertia_] * kmeans.labels_.shape[0],\n data[0]['rowid'])\n\n\n# -- Preprocessing steps\n\ndef _extract_columns(data):\n \"\"\"\n Extract the features from the query and pack them into a NumPy array\n data (list of dicts): result of the kmeans request\n \"\"\"\n # number of columns minus rowid column\n n_cols = len(data[0]) - 1\n return np.array([data[0]['arr_col{0}'.format(i+1)]\n for i in xrange(n_cols)],\n dtype=float).T\n\n\ndef _scale_data(features):\n \"\"\"\n Scale all input columns to center on 0 with a standard devation of 1\n features (numpy matrix): features of dimension (n_features, n_samples)\n \"\"\"\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n return scaler.fit_transform(features)\n","sub_path":"release/python/0.8.2/crankshaft/crankshaft/clustering/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"501756736","text":"# vim: set et sw=4 ts=4 nu fdm=indent:\n# coding: utf8\n\nimport sys\nsys.path.insert(1, \"../src\")\n\n\nfrom system import System\nfrom island import Island\nfrom kerr import MOKE\n\nimport numpy as np\nimport random\n\n# we create the system\nsystem = System(\"3D\")\n\nR = 5\nN = 6\n\n\n# we create a nanowatch\nsystem.add_object(Island([0, 0, 0], [1, 0, 0], a=4.0, b=4.0/1.2, h=2.0, angle=0.0))\n\nfor i in range(N):\n theta = 2*np.pi/N * i\n \n x = R*np.cos(theta)\n y = R*np.sin(theta)\n \n ar = random.random()*0.5 + 1.0\n \n system.add_object(Island([x, y, 0], [1, 0, 0], a=4.0, b=4.0/ar, h=2.0, angle=0.0))\n\nsystem.randomize_magnetizations()\nsystem.randomize_angles()\n\n\n# setting the magnetoctistalline anisotropy\nsystem.K1 = 48e3\n\n# creating some exchange coupling\nsystem.couple(0, 1, 0.002) # ferro coupling\nsystem.couple(0, 3, -0.001) # antiferro coupling\n\n# we set a applied field in the 311 direction at -0.1 T\nsystem.Bu = [3, 1, 1]\nsystem.B = -0.1\n\n# we freeze the island number 1\nsystem.objects[1].frozen = True\n\n\n# we plot our initialized system with couples and annotations\nsystem.draw(name=\"complete3d_init\", size=15, couples=True, annoted=True)\n\n# relaxing it\nsystem.relax()\n\n# we plot the relaxed system\nsystem.draw(name=\"complete3d_relaxed\", size=15, couples=True)\n\n\n# center island caracteristics\nprint(system.objects[0].caracteristics)\n\n#system caracteristics\nprint(system.caracteristics)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tutorials/complete3d.py","file_name":"complete3d.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"639732332","text":"__author__ = 'nightfade'\n\nimport asyncore\nimport sys\n\nfrom network.tcp_server import TCPServer, TCPConnectionHandlerBase\nfrom rpc.entity import RPCEntity\nfrom rpc.base import RPCServiceBase, RPCResponse\nfrom rpc.msgpack.rpc_codec import MPRPCCodec\nfrom utility import logger_manager\n\n\nclass EchoService(RPCServiceBase):\n\n def __init__(self):\n super(EchoService, self).__init__()\n self.entity = None\n self.logger = logger_manager.get_logger(self.__class__.__name__)\n\n def set_entity(self, entity):\n self.entity = entity\n\n def handleRequest(self, request):\n response = RPCResponse()\n response.callid = request.callid\n response.retvalue = {'status': 'ok'}\n\n def callback(retvalue):\n self.logger.info('return value: %s', str(retvalue))\n\n if self.entity:\n self.entity.call_method(request.method_name, request.params, callback)\n return response\n\n\nclass RPCManager(TCPConnectionHandlerBase):\n\n def __init__(self):\n super(RPCManager, self).__init__()\n self.entities = {}\n\n def handle_new_connection(self, conn):\n super(RPCManager, self).handle_new_connection(conn)\n entity = RPCEntity(MPRPCCodec())\n entity.set_connection(conn)\n entity.service = EchoService()\n entity.service.set_entity(entity)\n self.entities[conn.peername] = entity\n\n\nclass RPCServer(TCPServer):\n\n def __init__(self, ip, port):\n TCPServer.__init__(self, ip, port, RPCManager())\n\n\ndef main(ip, port):\n server = RPCServer(ip, port)\n while True:\n asyncore.loop()\n\n\nif __name__ == '__main__':\n assert len(sys.argv) >= 3\n listen_ip = sys.argv[1]\n listen_port = int(sys.argv[2])\n main(listen_ip, listen_port)\n","sub_path":"Server-Python/examples/MPRPCServer.py","file_name":"MPRPCServer.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"484665635","text":"from olympia.amo.tests import (\n APITestClient,\n TestCase,\n reverse_ns,\n user_factory,\n)\nfrom olympia.constants.scanners import YARA\nfrom olympia.scanners.models import ScannerResult\nfrom olympia.scanners.serializers import ScannerResultSerializer\n\n\nclass TestScannerResultViewSet(TestCase):\n client_class = APITestClient\n\n def setUp(self):\n super().setUp()\n\n self.user = user_factory()\n self.grant_permission(self.user, 'Admin:ScannersResultsView')\n self.client.login_api(self.user)\n self.url = reverse_ns('scanner-results', api_version='v5')\n\n def test_endpoint_requires_authentication(self):\n self.client.logout_api()\n response = self.client.get(self.url)\n assert response.status_code == 401\n\n def test_endpoint_requires_permissions(self):\n self.user = user_factory()\n self.client.login_api(self.user)\n response = self.client.get(self.url)\n assert response.status_code == 403\n\n def test_endpoint_can_be_disabled(self):\n self.create_switch('enable-scanner-results-api', active=False)\n response = self.client.get(self.url)\n assert response.status_code == 404\n\n def test_get(self):\n yara_result = ScannerResult.objects.create(scanner=YARA)\n self.create_switch('enable-scanner-results-api', active=True)\n\n response = self.client.get(self.url)\n\n assert response.status_code == 200\n json = response.json()\n assert 'results' in json\n results = json['results']\n assert len(results) == 1\n assert (results[0] ==\n ScannerResultSerializer(instance=yara_result).data)\n","sub_path":"src/olympia/scanners/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"174636103","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nItem content view\n\"\"\"\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtWebKit import *\n\nfrom utils.qwebviewselectionsuppressor import QWebViewSelectionSuppressor\nfrom utils.toolbar import ToolbarManager\nfrom ..basic.itemview import ItemViewView as BasicItemViewView, \\\n ItemViewEventFilter as BasicItemViewEventFilter\n\nclass ItemViewEventFilter(BasicItemViewEventFilter):\n def preEventFilter(self, obj, event):\n if super(ItemViewEventFilter, self).preEventFilter(obj, event):\n return True\n if event.type() == QEvent.KeyPress:\n key = event.key()\n if key in (Qt.Key_Down, Qt.Key_Up, Qt.Key_Left, Qt.Key_Right, Qt.Key_Space):\n self.emit(SIGNAL(\"init_browser_scrollbars\"))\n return False\n return False\n\nclass ItemViewView(BasicItemViewView):\n \n def __init__(self, *args, **kwargs):\n super(ItemViewView, self).__init__(*args, **kwargs)\n\n # remove selection when clic/drag to enable finger scrolling\n self.suppressor = QWebViewSelectionSuppressor(self.ui.webView)\n self.suppressor.enable()\n try:\n scroller = self.ui.webView.property(\"kineticScroller\").toPyObject()\n if scroller:\n scroller.setEnabled(True)\n except:\n pass\n \n def get_event_filter_class(self):\n return ItemViewEventFilter\n\n def init_events(self):\n super(ItemViewView, self).init_events()\n QObject.connect(self.event_filter, SIGNAL(\"init_browser_scrollbars\"), self.init_browser_scrollbars)\n \n def get_toolbar_manager_class(self):\n return ToolbarManager\n\n def get_toolbars(self):\n toolbars = super(ItemViewView, self).get_toolbars()\n toolbar_class = self.get_toolbar_class()\n self.bottomToolbar = toolbar_class('+', 'Toolbar', self.bottom_toolbar_pressed, 0.5, 1, parent=self.win)\n self.bottomToolbar.enable()\n toolbars.append(self.bottomToolbar)\n return toolbars\n\n def bottom_toolbar_pressed(self):\n pos = self.bottomToolbar.toolbar.pos()\n self.request_context_menu(pos)\n\n def show_previous(self):\n self.toolbar_manager.move_cursor_away_of_toolbar()\n super(ItemViewView, self).show_previous()\n\n def show_next(self):\n self.toolbar_manager.move_cursor_away_of_toolbar()\n super(ItemViewView, self).show_next()\n \n \n def init_browser_scrollbars(self):\n \"\"\"\n We need scrollbars to navigate with keyboard\n \"\"\"\n frame = self.ui.webView.page().currentFrame()\n if frame.scrollBarPolicy(Qt.Vertical) != Qt.ScrollBarAsNeeded:\n frame.setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAsNeeded)\n\n def help_keys(self):\n help = super(ItemViewView, self).help_keys()\n new_help = { 'title': help['title'], 'keys': []}\n for key in help['keys']:\n if key[0].startswith('F7'):\n new_help['keys'].append(('Vol. keys', key[1]))\n else:\n new_help['keys'].append(key)\n return new_help\n","sub_path":"src/views/mobile/itemview.py","file_name":"itemview.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"224280371","text":"#!/usr/bin/env python3\n\"\"\"\nThis file is part of pynadc\n\nhttps://github.com/rmvanhees/pynadc\n\nRead Sciamachy level 0 products in ENVISAT format\n\nCopyright (c) 2016-2021 SRON - Netherlands Institute for Space Research\n All Rights Reserved\n\nLicense: BSD-3-Clause\n\"\"\"\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom pynadc.scia import db, lv0\n\n\n# - global parameters ------------------------------\n\n\n# - local functions --------------------------------\ndef check_dsr_in_states(isp_in, verbose=False):\n \"\"\"\n This module combines L0 ISP per state ID based on parameter icu_time.\n \"\"\"\n # selects complete ISPs\n isp = isp_in[isp_in['fep_hdr']['_quality'] == 0]\n\n # combine L0 ISP on parameter icu_time\n # alternatively one could use parameter state_id\n _arr = isp['data_hdr']['icu_time']\n _arr = np.concatenate(([-1], _arr, [-1]))\n indx = np.where(np.diff(_arr) != 0)[0]\n num_dsr = np.diff(indx)\n icu_time = isp['data_hdr']['icu_time'][indx[:-1]]\n state_id = isp['data_hdr']['state_id'][indx[:-1]]\n if 'pmtc_frame' in isp.dtype.names:\n bcps = isp['pmtc_frame']['bcp']['bcps'][:, 0, 0].astype(int)\n elif 'pmd_data' in isp.dtype.names:\n bcps = isp['pmd_data']['bcps'][:, 0].astype(int)\n else:\n bcps = isp['pmtc_hdr']['bcps'].astype(int)\n\n if not verbose:\n return isp\n\n diff_bcps = None\n for ni in range(num_dsr.size):\n if ni+1 == num_dsr.size:\n continue\n\n diff_bcps = np.diff(bcps[indx[ni]:indx[ni+1]])\n\n if len(diff_bcps) == 1:\n print(\"# {:3d} state_{:02d} {:5d} {:4d}\".format(\n ni, state_id[ni], indx[ni], num_dsr[ni]),\n icu_time[ni])\n else:\n print(\"# {:3d} state_{:02d} {:5d} {:4d}\".format(\n ni, state_id[ni], indx[ni], num_dsr[ni]),\n icu_time[ni],\n np.all(diff_bcps > 0),\n np.unique(diff_bcps))\n\n return isp\n\n\n# - main code --------------------------------------\ndef main():\n \"\"\"\n main function of module 'scia_lv0'\n \"\"\"\n parser = ArgumentParser(\n formatter_class=RawDescriptionHelpFormatter,\n description='read Sciamachy level 0 product'\n )\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('--orbit', nargs=1, type=int,\n help='select data from given orbit')\n group.add_argument('file', nargs='?', type=str,\n help='read data from given file')\n parser.add_argument('--only_headers', action='store_true',\n help='read only the product headers')\n parser.add_argument('--state', nargs='+', type=int,\n help='must be the last argument on the command-line')\n args = parser.parse_args()\n\n scia_fl = \"\"\n if args.orbit is not None:\n file_list = db.get_product_by_type(prod_type='0',\n proc_best=True,\n orbits=args.orbit)\n if file_list and Path(file_list[0]).is_file():\n scia_fl = file_list[0]\n elif args.file is not None:\n if Path(args.file).is_file():\n scia_fl = args.file\n else:\n file_list = db.get_product_by_name(product=args.file)\n if file_list and Path(file_list[0]).is_file():\n scia_fl = file_list[0]\n\n if not scia_fl:\n print('Failed: file not found on your system')\n return\n\n print(scia_fl)\n # create object and open Sciamachy level 0 product\n try:\n fid = lv0.File(scia_fl, only_headers=args.only_headers)\n except:\n print('exception occurred in module pynadc.scia.lv0')\n raise\n\n # show the ASCII headers of the level 0 product and exit\n if args.only_headers:\n for key, value in fid.mph.items():\n print('MPH: ', key, value)\n for key, value in fid.sph.items():\n print('SPH: ', key, value)\n for ni, dsd_rec in enumerate(fid.dsd):\n for key in dsd_rec:\n print('DSD[{:02d}]: '.format(ni), key, dsd_rec[key])\n return\n\n # repair the info-records\n fid.repair_info()\n\n # read level 0 ISP\n (det_isp, aux_isp, pmd_isp) = fid.get_isp(state_id=args.state)\n check_dsr_in_states(det_isp, verbose=True)\n check_dsr_in_states(aux_isp, verbose=False)\n check_dsr_in_states(pmd_isp, verbose=False)\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/scia_lv0.py","file_name":"scia_lv0.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"51759242","text":"import unittest\nimport zserio\n\nfrom testutils import getZserioApi\n\nclass OptionalMemberAlignmentTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = getZserioApi(__file__, \"alignment.zs\").optional_member_alignment\n\n def testBitSizeOfWithOptional(self):\n optionalMemberAlignment = self.api.OptionalMemberAlignment.fromFields(True, 0x4433, 0x1122)\n self.assertEqual(self.WITH_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE, optionalMemberAlignment.bitSizeOf())\n\n def testBitSizeOfWithoutOptional(self):\n optionalMemberAlignment = self.api.OptionalMemberAlignment.fromFields(False, None, 0x7624)\n self.assertEqual(self.WITHOUT_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE, optionalMemberAlignment.bitSizeOf())\n\n def testInitializeOffsetsWithOptional(self):\n optionalMemberAlignment = self.api.OptionalMemberAlignment.fromFields(True, 0x1111, 0x3333)\n for bitPosition in range(32):\n self.assertEqual(self.WITH_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE,\n optionalMemberAlignment.initializeOffsets(bitPosition))\n\n bitPosition = 32\n self.assertEqual(self.WITH_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE + bitPosition,\n optionalMemberAlignment.initializeOffsets(bitPosition))\n\n def testInitializeOffsetsWithoutOptional(self):\n optionalMemberAlignment = self.api.OptionalMemberAlignment.fromFields(False, None, 0x3334)\n bitPosition = 1\n self.assertEqual(self.WITHOUT_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE + bitPosition,\n optionalMemberAlignment.initializeOffsets(bitPosition))\n\n def testReadWithOptional(self):\n hasOptional = True\n optionalField = 0x1234\n field = 0x7654\n writer = zserio.BitStreamWriter()\n OptionalMemberAlignmentTest._writeOptionalMemberAlignmentToStream(writer, hasOptional, optionalField,\n field)\n reader = zserio.BitStreamReader(writer.getByteArray())\n optionalMemberAlignment = self.api.OptionalMemberAlignment.fromReader(reader)\n self._checkOptionalMemberAlignment(optionalMemberAlignment, hasOptional, optionalField, field)\n\n def testReadWithoutOptional(self):\n hasOptional = False\n field = 0x2222\n writer = zserio.BitStreamWriter()\n OptionalMemberAlignmentTest._writeOptionalMemberAlignmentToStream(writer, hasOptional, None, field)\n reader = zserio.BitStreamReader(writer.getByteArray())\n optionalMemberAlignment = self.api.OptionalMemberAlignment.fromReader(reader)\n self._checkOptionalMemberAlignment(optionalMemberAlignment, hasOptional, None, field)\n\n def testWriteWithOptional(self):\n hasOptional = True\n optionalField = 0x9ADB\n field = 0x8ACD\n optionalMemberAlignment = self.api.OptionalMemberAlignment.fromFields(hasOptional, optionalField, field)\n writer = zserio.BitStreamWriter()\n optionalMemberAlignment.write(writer)\n reader = zserio.BitStreamReader(writer.getByteArray())\n readOptionalMemberAlignment = self.api.OptionalMemberAlignment.fromReader(reader)\n self._checkOptionalMemberAlignment(readOptionalMemberAlignment, hasOptional, optionalField, field)\n self.assertTrue(optionalMemberAlignment == readOptionalMemberAlignment)\n\n def testWriteWithoutOptional(self):\n hasOptional = False\n field = 0x7ACF\n optionalMemberAlignment = self.api.OptionalMemberAlignment.fromFields(hasOptional, None, field)\n writer = zserio.BitStreamWriter()\n optionalMemberAlignment.write(writer)\n reader = zserio.BitStreamReader(writer.getByteArray())\n readOptionalMemberAlignment = self.api.OptionalMemberAlignment.fromReader(reader)\n self._checkOptionalMemberAlignment(readOptionalMemberAlignment, hasOptional, None, field)\n self.assertTrue(optionalMemberAlignment == readOptionalMemberAlignment)\n\n @staticmethod\n def _writeOptionalMemberAlignmentToStream(writer, hasOptional, optionalField, field):\n writer.writeBool(hasOptional)\n if hasOptional:\n writer.writeBits(0, 31)\n writer.writeBits(optionalField, 32)\n writer.writeBits(field, 32)\n\n def _checkOptionalMemberAlignment(self, optionalMemberAlignment, hasOptional, optionalField, field):\n self.assertEqual(hasOptional, optionalMemberAlignment.getHasOptional())\n if hasOptional:\n self.assertTrue(optionalMemberAlignment.hasOptionalField())\n self.assertEqual(optionalField, optionalMemberAlignment.getOptionalField())\n else:\n self.assertFalse(optionalMemberAlignment.hasOptionalField())\n self.assertEqual(field, optionalMemberAlignment.getField())\n\n WITH_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE = 96\n WITHOUT_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE = 33\n","sub_path":"test/language/alignment/python/OptionalMemberAlignmentTest.py","file_name":"OptionalMemberAlignmentTest.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"270780439","text":"from typing import Dict, Any, List\n\nfrom omics_dashboard_client.record.record import Record\n\n\nclass UserGroup(Record):\n \"\"\"\n A user group on the Omics Dashboard service.\n \"\"\"\n url_suffix = 'user_groups'\n\n def __init__(self,\n res_data,\n base_url,\n session_user_is_admin=False):\n # type: (Dict[str, Any], str, bool) -> None\n \"\"\"\n :param res_data: The dictionary received as JSON from the server.\n :param base_url: The url of service\n :param session_user_is_admin:\n \"\"\"\n super(UserGroup, self).__init__(res_data, '{}/{}'.format(base_url, UserGroup.url_suffix))\n self._creator_id = res_data['creator_id']\n self._name = res_data['name']\n self._description = res_data['description']\n self._member_ids = [member['id'] for member in res_data['members']]\n self._admin_ids = [admin['id'] for admin in res_data['admins']]\n self.__is_write_permitted = True if self.id is None or session_user_is_admin else res_data['is_write_permitted'] if 'is_write_permitted' in res_data else False\n self.__session_user_is_admin = session_user_is_admin\n\n @property\n def name(self):\n # type: () -> str\n \"\"\"\n The name field of this record.\n :return:\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value):\n # type: (str) -> None\n if self.__is_write_permitted:\n self._name = value\n else:\n raise RuntimeError('Current user cannot edit this field.')\n\n @name.deleter\n def name(self):\n raise RuntimeError('Fields cannot be deleted.')\n\n @property\n def description(self):\n # type: () -> str\n \"\"\"\n The description field of this record.\n :return:\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, value):\n # type: (str) -> None\n if self.__is_write_permitted:\n self._description = value\n else:\n raise RuntimeError('Current user cannot edit this field.')\n\n @description.deleter\n def description(self):\n raise RuntimeError('Fields cannot be deleted.')\n\n @property\n def member_ids(self):\n # type: () -> List[int]\n \"\"\"\n The user ids of the members of this group.\n :return:\n \"\"\"\n return self._member_ids\n\n @member_ids.setter\n def member_ids(self, value):\n # type: (List[int]) -> None\n if self.__is_write_permitted:\n self._member_ids = value\n else:\n raise RuntimeError('Current user cannot edit this field.')\n\n @member_ids.deleter\n def member_ids(self):\n raise RuntimeError('Fields cannot be deleted.')\n\n @property\n def admin_ids(self):\n # type: () -> List[int]\n \"\"\"\n The user ids of the admins of this group.\n :return:\n \"\"\"\n return self._admin_ids\n\n @admin_ids.setter\n def admin_ids(self, value):\n if self.__is_write_permitted:\n self._admin_ids = value\n else:\n raise RuntimeError('Current user cannot edit this field.')\n\n @admin_ids.deleter\n def admin_ids(self):\n raise RuntimeError('Fields cannot be deleted.')\n\n @property\n def creator_id(self):\n # type: () -> int\n \"\"\"\n The user id of the creator of this record.\n :return:\n \"\"\"\n return self._creator_id\n\n @creator_id.setter\n def creator_id(self, value):\n # type: (int) -> None\n if self.__session_user_is_admin:\n self._creator_id = value\n else:\n raise RuntimeError('Only admins can modify creator_id or owner_id.')\n\n @creator_id.deleter\n def creator_id(self):\n raise RuntimeError('Fields cannot be deleted.')\n\n def serialize(self):\n # type: () -> Dict[str, Any]\n \"\"\"\n Get a dictionary representation of this record's fields.\n :return:\n \"\"\"\n return {\n 'id': self.id,\n 'creator_id': self._creator_id,\n 'name': self._name,\n 'description': self._description,\n 'member_ids': self._member_ids,\n 'admin_ids': self._admin_ids\n }\n\n def update(self, new_data, base_url):\n super(UserGroup, self).update(new_data, '{}/{}'.format(base_url, UserGroup.url_suffix))\n self._creator_id = new_data['creator_id']\n self._name = new_data['name']\n self._description = new_data['description']\n self._member_ids = [member['id'] for member in new_data['members']]\n self._admin_ids = [admin['id'] for admin in new_data['admins']]\n","sub_path":"omics_dashboard_client/record/user_group.py","file_name":"user_group.py","file_ext":"py","file_size_in_byte":4710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"491649508","text":"import numpy as np\nimport cvxpy as cp\nimport torch\nimport time\nimport matplotlib.pyplot as plt\n\nc=3*10**8\ndt=10**(-7)\nTs=1.6000e-06\nL=int(Ts/dt)\nT=400\nSNR_dB=15\nDB=10.**(0.1*SNR_dB)\n\nN=8 #the number of receivers\nM=3 #the number of transmitters\n\nK=1 #the number of targets\n# np.random.seed(15)\n#Position of receivers\nx_r=np.array([1000,2000,2500,2500,2000,1000,500,500])#+500*(np.random.rand(N)-0.5))#\\\n # 1500,3000,500,2500,1000,1500,500,3000,\\\n # 2500,3500,1000,3500,2000,4000,3000,3000]+500*(np.random.rand(N)-0.5))\ny_r=np.array([500,500,1000,2000,2500,2500,2000,1500])#+500*(np.random.rand(N)-0.5))#\\\n # 3500,3500,500,4000,4000,2500,3000,500,\\\n # 3500,3000,2000,1000,2000,500,4000,1500]+500*(np.random.rand(N)-0.5))\n\n#Position of transmitters\nx_t=np.array([0,4000,4000,0,1500,0,4000,2000])\ny_t=np.array([0,0,4000,4000,4000,1500,1500,0])\n\nNOISE = 1 #on/off noise\nH = 1 #on/off êîýôôèöèåíòû îòðàæåíèÿ\nrk = np.zeros([K,M,N]);\ntk = np.zeros([K,M,N]);\ntau = np.zeros([K,M,N]);\nif H == 0:\n h=np.ones([K,M,N])\nelse:\n h=(np.random.randn(K,M,N)+1j*np.random.randn(K,M,N))/np.sqrt(2)\n\ns=np.zeros([M,L])+1j*np.zeros([M,L])\nfor m in range(M):\n s[m]=np.exp(1j*2*np.pi*(m)*np.arange(L)/M)/np.sqrt(L);#sqrt(0.5)*(randn(1,L)+1i*randn(1,L))/sqrt(L);\nLs = 875\nLe = Ls+125*6\ndx = 125\ndy = dx\nx_grid = np.arange(Ls,Le,dx)\ny_grid = np.arange(Ls,Le,dy)\nsize_grid_x = len(x_grid)\nsize_grid_y = len(y_grid)\ngrid_all_points = [[i, j] for i in x_grid for j in y_grid]\nr=np.zeros(size_grid_x*size_grid_y)\nk_random_grid_points = np.random.permutation(size_grid_x*size_grid_y)[range(K)]\n#Position of targets\nx_k=np.zeros([K])\ny_k=np.zeros([K])\nfor kk in range(K):\n x_k[kk]=grid_all_points[k_random_grid_points.item(kk)][0]\n y_k[kk]=grid_all_points[k_random_grid_points.item(kk)][1]\nr[k_random_grid_points] = 1\n\n#Time delays\nfor k in range(K):\n for m in range(M):\n for n in range(N):\n tk[k,m,n]=np.sqrt((x_k[k]-x_t[m])**2+(y_k[k]-y_t[m])**2)\n rk[k,m,n]=np.sqrt((x_k[k]-x_r[n])**2+(y_k[k]-y_r[n])**2)\n tau[k,m,n]=(tk[k,m,n]+rk[k,m,n])/c\n\nr_glob = np.zeros([size_grid_x*size_grid_y*M*N]) + 1j*np.zeros([size_grid_x*size_grid_y*M*N])\nfor m in range(M):\n for n in range(N):\n for k in range(K):\n r_glob[k_random_grid_points[k]] = DB*h[k,m,n]*\\\n np.sqrt(200000000000)*(1/tk[k,m,n])*(1/rk[k,m,n])\n k_random_grid_points = k_random_grid_points + size_grid_x * size_grid_y\n\ntau_grid_t = np.zeros([size_grid_x,size_grid_y,M])\nfor m in np.arange(M):\n for xx in np.arange(size_grid_x):\n for yy in np.arange(size_grid_y):\n tau_grid_t[xx,yy,m] = np.sqrt((x_grid[xx]-x_t[m])**2+(y_grid[yy]-y_t[m])**2)\n\ntau_grid_r = np.zeros([size_grid_x,size_grid_y,N])\nfor n in np.arange(N):\n for xx in np.arange(size_grid_x):\n for yy in np.arange(size_grid_y):\n tau_grid_r[xx,yy,n] = np.sqrt((x_grid[xx]-x_r[n])**2+(y_grid[yy]-y_r[n])**2)\n\ntau_grid_c = np.zeros([size_grid_x,size_grid_y,N,M])\nfor n in np.arange(N):\n for m in np.arange(M):\n tau_grid_c[:,:,n,m] = (tau_grid_r[:,:,n]+tau_grid_t[:,:,m])/c\n\nif NOISE == 0:\n x=np.zeros([N,T]) + 1j*np.zeros([N,T])\nelse:\n x=np.random.randn(N,T)+1j*np.random.randn(N,T)/np.sqrt(2)\n\nfor k in range(K):\n for m in range(M):\n for n in range(N):\n l=np.floor(tau[k,m,n]/dt)\n l=l.astype(int)\n x[n,range(l,l+L)]= x[n,range(l,l+L)] + DB*s[m,:]*h[k,m,n]*\\\n np.sqrt(200000000000)*(1/tk[k,m,n])*(1/rk[k,m,n])\n\nx_flat = x[0,:].transpose();\nfor n in range(1,N):\n x_flat = np.concatenate([x_flat,x[n,:].transpose()],axis=0)\n\n\ndictionary=(np.zeros([M,N,T,size_grid_x,size_grid_y])+ \\\n 1j*np.zeros([M,N,T,size_grid_x,size_grid_y]))/np.sqrt(2);\n\nll = [];\n\nfor xx in np.arange(size_grid_x):\n for yy in np.arange(size_grid_y):\n for m in np.arange(M):\n for n in np.arange(N):\n l=np.floor(tau_grid_c[xx,yy,n,m]/dt)\n dictionary[m,n,np.arange(l,l+L,dtype = np.integer),xx,yy] = s[m,:].transpose()*\\\n np.sqrt(200000000000)*\\\n (1/tk[k,m,n])*(1/rk[k,m,n])\n\nD_flat = np.zeros([N*T,N*size_grid_x*size_grid_y*M]) + 1j*np.zeros([N*T,N*size_grid_x*size_grid_y*M])\ni=0\nfor m in range(M):\n for n in range(N):\n for xx in range(size_grid_x):\n for yy in range(size_grid_y):\n D_flat[range(n*T,(n+1)*T),i]= np.squeeze(dictionary[m,n,range(T),xx,yy])\n i += 1\n# from gen_mimo_samples import gen_mimo_samples\n# y, rr, rr_glob, label = gen_mimo_samples(SNR_dB, M, N, K, NOISE, H)\n# print(label)\n\n#group lasso\nlambdas = cp.Parameter(nonneg=True)\nlambdas.value = 1\n# Define problem\nx = cp.Variable(size_grid_x*size_grid_y*M*N,complex=True)\np = cp.Variable(1)\nq = cp.Variable(1)\nobjective = 0.5*p**2+lambdas*q\n\na = []\nfor ii in range(size_grid_x*size_grid_y):\n a.append(cp.norm(x[range(ii,size_grid_x*size_grid_y*M*N,size_grid_x*size_grid_y)],2))\n\nconstr = [cp.norm(x_flat-D_flat@x,2) <= p, sum(a) <= q]\nprob = cp.Problem(cp.Minimize(objective), constr)\nprob.solve()\n\nplt.figure(2)\nplt.subplot(211)\nplt.plot(np.abs(r_glob), lw=2)\nplt.grid(True)\nplt.subplot(212)\nplt.plot(np.abs(x.value), lw=2)\nplt.grid(True)\nplt.show()\n","sub_path":"mimo_radar_0.py","file_name":"mimo_radar_0.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"584169533","text":"from __future__ import absolute_import, unicode_literals\n\nimport ast\nimport os\nimport io\nfrom setuptools import setup\n\n\nclass VersionFinder(ast.NodeVisitor):\n def __init__(self):\n self.version = None\n\n def visit_Assign(self, node): # noqa\n if node.targets[0].id == '__version__':\n self.version = node.value.s\n\n\ndef read(*parts):\n filename = os.path.join(os.path.dirname(__file__), *parts)\n with io.open(filename, encoding='utf-8', mode='rt') as fp:\n return fp.read()\n\n\ndef find_version(*parts):\n finder = VersionFinder()\n finder.visit(ast.parse(read(*parts)))\n return finder.version\n\n\nclassifiers = '''\\\nEnvironment :: Web Environment\nIntended Audience :: Developers\nTopic :: Internet :: WWW/HTTP\nTopic :: Software Development :: Libraries\nLicense :: OSI Approved :: Apache Software License\nDevelopment Status :: 5 - Production/Stable\nNatural Language :: English\nProgramming Language :: Python\nProgramming Language :: Python :: 2\nProgramming Language :: Python :: 3\nOperating System :: OS Independent\n'''\n\ndescription = 'Up to date simple useragent faker with real world database'\n\n\nsetup(\n name='fake-useragent',\n version=find_version('fake_useragent', 'settings.py'),\n packages=[str('fake_useragent')],\n description=description,\n long_description=read('README.rst'),\n install_requires=[],\n author='hellysmile',\n author_email='hellysmile@gmail.com',\n url='https://github.com/hellysmile/fake-useragent',\n zip_safe=False,\n license='http://www.apache.org/licenses/LICENSE-2.0',\n classifiers=filter(None, classifiers.split('\\n')),\n keywords=[\n 'user', 'agent', 'user agent', 'useragent',\n 'fake', 'fake useragent', 'fake user agent',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"612579619","text":"import numpy as np\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\nfrom UTILS.Calculus import Calculus\nfrom UTILS.SetAxisLimit import SetAxisLimit\nfrom UTILS.Tools import Tools\nfrom UTILS.Errors import Errors\nimport sys\n\n\n# Theoretical background https://arxiv.org/abs/1401.5176\n\n# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #\n# Equations in Spherical Geometry and their Application to Turbulent Stellar #\n# Convection Data #\n\nclass EntropyVarianceResolutionStudy(Calculus, SetAxisLimit, Tools, Errors, object):\n\n def __init__(self, filename, ig, intc, data_prefix):\n super(EntropyVarianceResolutionStudy, self).__init__(ig)\n\n # load data to list of structured arrays\n eht = []\n for ffile in filename:\n eht.append(self.customLoad(ffile))\n\n # declare data lists\t\t\n xzn0, nx, ny, nz = [], [], [], []\n\n ssvar = []\n\n for i in range(len(filename)):\n # load grid\n xzn0.append(np.asarray(eht[i].item().get('xzn0')))\n\n nx.append(np.asarray(eht[i].item().get('nx')))\n ny.append(np.asarray(eht[i].item().get('ny')))\n nz.append(np.asarray(eht[i].item().get('nz')))\n\n # pick specific Reynolds-averaged mean fields according to:\n # https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf \t\t\n\n dd = np.asarray(eht[i].item().get('dd')[intc])\n ddss = np.asarray(eht[i].item().get('ddss')[intc])\n ddsssq = np.asarray(eht[i].item().get('ddsssq')[intc])\n sigma_ss = (ddsssq / dd) - (ddss * ddss) / (dd * dd)\n\n ssvar.append(sigma_ss)\n\n # share data globally\n self.data_prefix = data_prefix\n self.xzn0 = xzn0\n self.nx = nx\n self.ny = ny\n self.nz = nz\n self.ssvar = ssvar\n self.ig = ig\n\n def plot_ssvar(self, LAXIS, xbl, xbr, ybu, ybd, ilg):\n \"\"\"Plot entropy variance in the model\"\"\"\n\n if (LAXIS != 2):\n print(\"ERROR(EntropyVarianceResolutionStudy.py): Only LAXIS=2 is supported.\")\n sys.exit()\n\n # load x GRID\n grd = self.xzn0\n\n # load DATA to plot\t\t\n plt1 = self.ssvar\n nx = self.nx\n ny = self.ny\n nz = self.nz\n\n # find maximum resolution data\n grd_maxres = self.maxresdata(grd)\n plt1_maxres = self.maxresdata(plt1)\n\n plt_interp = []\n for i in range(len(grd)):\n plt_interp.append(np.interp(grd_maxres, grd[i], plt1[i]))\n\n # create FIGURE\n plt.figure(figsize=(7, 6))\n\n # format AXIS, make sure it is exponential\n plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))\n\n plt10_tmp = plt1[0]\n plt11_tmp = plt1[0]\n\n plt1_foraxislimit = []\n plt1max = np.max(plt1[0])\n for plt1i in plt1:\n if (np.max(plt1i) > plt1max):\n plt1_foraxislimit = plt1i\n\n # set plot boundaries\n to_plot = [plt1_foraxislimit]\n self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)\n\n # plot DATA \n plt.title('Entropy Variance')\n\n for i in range(len(grd)):\n plt.plot(grd[i], plt1[i], label=str(self.nx[i]) + ' x ' + str(self.ny[i]) + ' x ' + str(self.nz[i]))\n\n # define and show x/y LABELS\n if self.ig == 1:\n setxlabel = r\"x (cm)\"\n setylabel = r\"$\\sigma_s$ (erg$^2$ g$^{-2}$ K$^{-2})$\"\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n elif self.ig == 2:\n setxlabel = r\"r (cm)\"\n setylabel = r\"$\\sigma_s$ (erg$^2$ g$^{-2}$ K$^{-2})$\"\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n\n # show LEGEND\n plt.legend(loc=ilg, prop={'size': 18})\n\n # display PLOT\n plt.show(block=False)\n\n # save PLOT\n plt.savefig('RESULTS/' + self.data_prefix + 'mean_ssvar.png')\n\n # find data with maximum resolution\t\n def maxresdata(self, data):\n tmp = 0\n for idata in data:\n if idata.shape[0] > tmp:\n data_maxres = idata\n else:\n tmp = idata.shape[0]\n\n return data_maxres\n","sub_path":"EQUATIONS/FOR_RESOLUTION_STUDY/EntropyVarianceResolutionStudy.py","file_name":"EntropyVarianceResolutionStudy.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"275938756","text":"import requests\n\nfrom bs4 import BeautifulSoup\n\n\ndef search(keyword=\"\",category=\"apps\",format=\"dict\",proxies=None):\n requests.packages.urllib3.disable_warnings()\n response = {\"status\":\"OK\",\"error\":None,\"results\":[]}\n payload = {'q': keyword, 'c': category}\n \n url = \"https://play.google.com/store/search?\"\n try:\n if proxies:\n htmlresponse = requests.get(url,params = payload,proxies=proxies)\n else:\n htmlresponse = requests.get(url,params = payload)\n \n htmlresponse = BeautifulSoup(htmlresponse.text, 'html.parser')\n\n contents = htmlresponse.find(\"div\", { \"class\" : \"card-list two-cards\" }).findAll(\"div\", { \"class\" : \"card-content id-track-click id-track-impression\" })\n \n for content in contents:\n result = {}\n result[\"name\"] = content.find(\"a\", {\"class\":\"title\"})[\"title\"]\n result[\"id\"] = content.find(\"span\", {\"class\":\"preview-overlay-container\"})[\"data-docid\"]\n result[\"developer\"] = content.find(\"a\", {\"class\":\"subtitle\"})[\"title\"]\n result[\"description\"] = content.find(\"div\", {\"class\":\"description\"}).getText()\n try:\n result[\"rating\"] = content.find(\"div\", {\"class\":\"tiny-star star-rating-non-editable-container\"})[\"aria-label\"]\n except TypeError:\n result[\"rating\"] = \"Not rated\"\n result[\"price\"] = content.findAll(\"span\", {\"class\":\"display-price\"})[0].getText()\n result[\"largeImageUrl\"] = content.find(\"img\", {\"class\":\"cover-image\"})[\"data-cover-large\"]\n result[\"smallImageUrl\"] = content.find(\"img\", {\"class\":\"cover-image\"})[\"data-cover-small\"]\n response[\"results\"].append(result)\n return response\n except Exception as e:\n response[\"error\"] = e\n response[\"status\"] = \"Failed\"\n return response\n\n","sub_path":"playkit/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"56560784","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\nwebapp.py\n\nCreated by Pradeep Gowda on 2008-04-23.\nCopyright (c) 2008 Yashotech. All rights reserved.\nModified by Izaac Zavaleta, Pagination, memcache and other features\n\"\"\"\nimport wsgiref.handlers\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext import db\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.api import users\nfrom google.appengine.api import memcache\nfrom django.core.paginator import ObjectPaginator\nimport sys\nsys.path.insert(0, 'lib.zip')\nfrom lib.utils import Config\n\nimport os\nimport blog\nimport admin\n \ndef main():\n\n application = webapp.WSGIApplication([\n (\"/*$\", blog.HomePageHandler),\n (r\"/page(\\d+)/*$\", blog.HomePageHandler),\n (\"/login/*$\", blog.LoginHandler),\n (\"/logout/*$\", blog.LogoutHandler),\n (\"/entries/*$\", blog.EntryIndexHandler),\n (r\"/entries(\\d+)/*$\", blog.EntryIndexHandler),\n (\"/feed/*$\", blog.FeedHandler),\n (\"/rss/*$\", blog.RSSHandler),\n (\"/entry/([^/]+)/*$\", blog.EntryHandler),\n (\"/entry/([^/]+)/edit/*$\", blog.NewEntryHandler),\n (\"/entry/([^/]+)/del/*$\", blog.EntryDeleteHandler),\n (r'/date/(\\d\\d\\d\\d)/(\\d\\d)/*$', blog.EntriesForMonthHandler),\n (\"/([^/]+)/edit/*$\", blog.NewEntryHandler),\n (\"/([^/]+)/del/*$\", blog.EntryDeleteHandler),\n (\"/topic/([^/]+)/*$\", blog.TagHandler),\n (\"/flush/*$\", blog.FlushHandler),\n (\"/admin/*$\", admin.AdminHandler),\n (\"/admin/new/*$\", blog.NewEntryHandler),\n (\"/admin/config/*$\", admin.ConfigHandler),\n (\"/admin/entrylist/*$\", admin.EntryListHandler),\n\n # (r\"/shooin/([^/]+)\", shooin.ShooinHandler),\n (r\"/robots.txt\", blog.RobotHandler),\n (r\"/sitemap.xml\", blog.SitemapHandler),\n (\"/([^/]+)/*$\", blog.PageHandler),\n ], debug=True)\n \n config = Config.all()\n if config.count() > 0:\n config = config.fetch(1)[0]\n else: \n config1 = Config(title=\"izaac log\")\n config1.put()\n \n wsgiref.handlers.CGIHandler().run(application)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"610225070","text":"# Input: [[1,3],[2,6],[8,10],[15,18]]\n# Output: [[1,6],[8,10],[15,18]]\n# Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].\n\n# O(N log N)\ndef merge_intervals(arr):\n\tarr.sort(key = lambda x: x[0])\n\n\tresult = []\n\tresult.append(arr[0])\n\n\tfor i in range(1, len(arr)):\n\t\tif arr[i][0] > result[-1][1]:\n\t\t\tresult.append(arr[i])\n\t\telse:\n\t\t\tif arr[i][1] > result[-1][1]:\n\t\t\t\tresult[-1][1] = arr[i][1]\n\t\t\telse:\n\t\t\t\tcontinue\n\n\treturn result\n\nprint(merge_intervals([[1,3],[2,6],[8,10],[15,18]]))\n\ndef merge_intervals2(arr):\n\tarr.sort(key = lambda x: x[0])\n\tresult = []\n\n\tfor interval in arr:\n\t\tif result == [] or result[-1][1] < interval[0]:\n\t\t\tresult.append(interval)\n\n\t\telse:\n\t\t\tresult[-1][1] = max(result[-1][1], interval[1])\n\n\treturn result\n\nprint(merge_intervals2([[1,3],[2,6],[8,10],[15,18]]))\n\t","sub_path":"IK/AdHoc/MergeOverlappingIntervals.py","file_name":"MergeOverlappingIntervals.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"290406294","text":"\"\"\"\nComplete the solution so that the function\nwill break up camel casing, using a space between words.\nExample\nsolution(\"camelCasing\") == \"camel Casing\"\n\"\"\"\n\n\ndef break_camel_case(string):\n result_string = []\n for sym in string:\n if sym.isupper():\n result_string.append(f' {sym}')\n else:\n result_string.append(sym)\n\n return \"\".join(result_string)\n","sub_path":"6_kyu/break_camel_case.py","file_name":"break_camel_case.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"152337466","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport csv\nfrom datetime import datetime\n\nincluded_titles = [\n 'request'\n]\n\ndef clean_entry(entry):\n title, description, start_date, end_date = entry\n if start_date != end_date:\n # is a range\n if any([True if 'request' in title.lower() else False for included_title in included_titles]):\n # Collapse events\n if title[0:14] == 'Request period':\n title = 'Deadline ' + title[15:]\n elif title[0:7] == 'Request':\n title = 'Deadline to r' + title[1:]\n elif 'Program selection' in title:\n title = 'Deadline for ' + title[0:27]\n elif 'November Convocation' in title:\n title = 'Last day to apply for November Convocation'\n description = title\n start_date = end_date\n if 'Accept an invitation' in title:\n title = 'Deadline to a' + title[1:]\n description = title\n start_date = end_date\n return tuple([title, description, start_date, end_date])\n\ndef parsedate(date):\n dates = date.split('-');\n start = datetime.strptime(dates[0].strip(), '%B %d, %Y')\n end = start\n if len(dates) > 1:\n end = datetime.strptime(dates[1].strip(), '%B %d, %Y')\n start = start.strftime('%Y-%m-%d')\n end = end.strftime('%Y-%m-%d')\n return [start, end]\n\n\ndef clean_tags(tags1, tags2):\n lst1 = [x.strip() for x in tags1.split(',')]\n lst2 = [x.strip() for x in tags2.split(',')]\n lst = list(set(lst1 + lst2))\n return ','.join(lst)\n\n\ndef relevant_tags(info):\n ans = \"\"\n dct = {\"drop\": \"Drop Deadlines\", \"Credit/No Credit\": \"Cr/NCr\", \"break\": \"Breaks\", \"Break\": \"Breaks\", \"LWD\": \"LWD\", \"enrolment\": \"Enrolment\", \"Form\": \"Forms and Petitions\", \"form\": \"Forms and Petitions\", \"Petition\": \"Forms and Petitions\", \"petition\": \"Forms and Petitions\"}\n for x in dct:\n if x in info:\n ans += \",\" + dct[x]\n return ans\n\n\nwrite_to_csv = {}\n\n\ndef parse_html_to_csv(url, tags):\n fp = urllib.request.urlopen(url)\n mybytes = fp.read()\n summer_academic_html = mybytes.decode(\"utf8\")\n fp.close()\n soup = BeautifulSoup(summer_academic_html, 'html.parser')\n\n for dateinfo in soup.find_all(class_='date'):\n info = dateinfo.find(class_='info').text\n date = dateinfo.find(class_='title').text\n dict_key = clean_entry([info, info] + parsedate(date))\n tags2 = tags + relevant_tags(info)\n old_tags = tags2\n if dict_key in write_to_csv:\n old_tags = write_to_csv[dict_key]\n write_to_csv[dict_key] = clean_tags(old_tags, tags)\n\n\nurls = [\"https://student.utm.utoronto.ca/importantDates/importantDates.php?sub_type=Academic&sub_session=Summer&sub_search_term=\",\n \"https://student.utm.utoronto.ca/importantDates/importantDates.php?sub_type=Financial&sub_session=Summer&sub_search_term=\",\n \"https://student.utm.utoronto.ca/importantDates/importantDates.php?sub_type=Academic&sub_session=Fall&sub_search_term=\",\n \"https://student.utm.utoronto.ca/importantDates/importantDates.php?sub_type=Financial&sub_session=Fall&sub_search_term=\"]\ntags = ['Summer,Academic', 'Summer,Financial', 'Fall/Winter,Academic', 'Fall/Winter,Financial']\n\nfor i in range(4):\n parse_html_to_csv(urls[i], tags[i])\n\n\nwith open('important_dates.csv', 'w', newline='') as csvfile:\n\n spamwriter = csv.writer(csvfile, delimiter='|')\n # spamwriter.writerow(['Title', 'Description', 'Start Date', 'End Date', 'Tags'])\n spamwriter.writerow(['Title', 'Description','Start Date', 'End Date', 'Tags'])\n\n for item in write_to_csv:\n spamwriter.writerow(list(item) + [write_to_csv[item]])\n","sub_path":"scripts/important-dates-scraper.py","file_name":"important-dates-scraper.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"612721568","text":"# python3\r\nfrom collections import deque\r\n\r\nclass Request:\r\n def __init__(self, arrival_time, process_time):\r\n self.arrival_time = arrival_time\r\n self.process_time = process_time\r\n self.finish_time = None\r\n\r\nclass Response:\r\n def __init__(self, dropped, start_time):\r\n self.dropped = dropped\r\n self.start_time = start_time\r\n\r\nclass Buffer:\r\n def __init__(self, size):\r\n self.size = size\r\n self.queue = deque()\r\n self.finish_time = 0\r\n\r\n def Process(self, req):\r\n if len(self.queue) < self.size:\r\n if self.finish_time >= req.arrival_time:\r\n res = Response(False,self.finish_time)\r\n self.finish_time += req.process_time\r\n else:\r\n res = Response(False,req.arrival_time)\r\n self.finish_time = req.arrival_time + req.process_time\r\n req.finish_time = self.finish_time\r\n self.queue.append(req)\r\n return res\r\n\r\n if len(self.queue) == self.size and self.size > 0:\r\n if req.arrival_time >= self.queue[0].finish_time:\r\n self.queue.popleft()\r\n if self.finish_time >= req.arrival_time:\r\n res = Response(False,self.finish_time)\r\n self.finish_time += req.process_time\r\n else:\r\n res = Response(False,req.arrival_time)\r\n self.finish_time += req.arrival_time + req.process_time\r\n \r\n req.finish_time = self.finish_time\r\n self.queue.append(req)\r\n return res\r\n\r\n return Response(False, -1)\r\n\r\ndef ReadRequests(count):\r\n requests = []\r\n for i in range(count):\r\n arrival_time, process_time = map(int, input().strip().split())\r\n requests.append(Request(arrival_time, process_time))\r\n return requests\r\n\r\ndef ProcessRequests(requests, buffer):\r\n responses = []\r\n for request in requests:\r\n responses.append(buffer.Process(request))\r\n return responses\r\n\r\ndef PrintResponses(responses):\r\n for response in responses:\r\n print(response.start_time if not response.dropped else -1)\r\n\r\nif __name__ == \"__main__\":\r\n size, count = map(int, input().strip().split())\r\n requests = ReadRequests(count)\r\n\r\n buffer = Buffer(size)\r\n responses = ProcessRequests(requests, buffer)\r\n\r\n PrintResponses(responses)\r\n","sub_path":"datastructures/Week1/process_packages.py","file_name":"process_packages.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"466988866","text":"#!python\n# -*- coding: utf-8 -*-\n\"\"\"\n**Author:** Bhishan Poudel; Physics PhD Student, Ohio University\n\n**Date:** Oct 05, 2016\n\n**Last update:** Jul 14, 2017 Fri\n\n**Usage:**::\n\n python square_choice1.py 12\n\n\"\"\"\n# Imports\nimport argparse\n\ndef square_choice1():\n \"\"\"Calcuate power.\n \n Usage:\n python square_choice1.py 12\n \n \"\"\" \n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"square\", type=int,\n help=\"display a square of a given number\")\n parser.add_argument(\"-v\", \"--verbosity\", type=int, choices=[0, 1, 2],\n help=\"increase output verbosity\")\n args = parser.parse_args()\n answer = args.square**2\n if args.verbosity == 2:\n print(\"the square of {} equals {}\".format(args.square, answer))\n elif args.verbosity == 1:\n print(\"{}^2 == {}\".format(args.square, answer))\n else:\n print(answer)\n\nif __name__ == '__main__':\n square_choice1()\n","sub_path":"Doxygen/argsparse/square_choice1.py","file_name":"square_choice1.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"628568996","text":"import math\r\n\r\ndef bruteForce(numPeople, x, y):\r\n m = max(x)\r\n n = max(y)\r\n matrix =[[0]*n for i in range(m)]\r\n cood = [0,0]\r\n\r\n for i in range(len(numPeople)):\r\n xi = x[i]-1\r\n yi = y[i]-1\r\n zi = numPeople[i]\r\n for j in range(m):\r\n for k in range(n):\r\n matrix[j][k] += zi*(abs(xi-j)+abs(yi-k))\r\n\r\n # print(matrix)\r\n minv = float('inf')\r\n for i in range(m):\r\n for j in range(n):\r\n minv = min(matrix[i][j], minv)\r\n cood = [i+1,j+1]\r\n print(cood)\r\n return minv\r\n \r\n\r\ndef optimal(numPeople, x, y):\r\n xans = min(x)\r\n yans = min(y)\r\n res = 0\r\n print(xans, yans)\r\n for i in range(len(numPeople)):\r\n zi = numPeople[i]\r\n res += zi*(abs(xans-x[i]-1) + abs(yans-y[i]-1))\r\n return res\r\n\r\n\r\n'''\r\n1 1 3 1\r\n 3 1\r\n\r\n2\r\n\r\n11 33\r\n 22\r\n1+1+1+1\r\n2+2+2+2\r\n3\r\n\r\n'''\r\n\r\n\r\nif __name__ == '__main__':\r\n numPeople = [100,1]\r\n x = [1, 2]\r\n y = [1, 2]\r\n print(bruteForce(numPeople, x, y))\r\n print(optimal(numPeople, x, y))\r\n\r\n","sub_path":"python/gfg/manhat.py","file_name":"manhat.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"31202196","text":"from __future__ import print_function\nfrom stylelens_product.products import Products\nfrom pprint import pprint\n# create an instance of the API class\napi_instance = Products()\n\nproduct = {}\nproduct['host_code'] = 'HC1'\nproduct['product_no'] = 'sss2'\nproduct['version_id'] = '1'\n\ntry:\n api_response = api_instance.add_product(product)\n pprint(api_response)\nexcept Exception as e:\n print(\"Exception when calling add_product: %s\\n\" % e)\n","sub_path":"sample/add_product.py","file_name":"add_product.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"222103822","text":"# -*- coding: utf-8 -*-#\n\n# -------------------------------------------------------------------------------\n# Name: conv_train\n# Description:\n# Author: 梁超\n# Date: 2021/10/23\n# -------------------------------------------------------------------------------\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom LCNet_model import *\nfrom colorama import Fore\nfrom metric import *\nimport pandas as pd\nimport os\nimport argparse\n\n\nrate = \"0.5\" # 默认为6:4的正负样本比例,若要改为1:1则取rate=“0.5”\n\n\nclass SeedDataset(Dataset):\n\n def __init__(self, annotations_file):\n super().__init__()\n self.data: pd.DataFrame = pd.read_csv(annotations_file)\n self.data: pd.DataFrame = self.data[self.data['label'].notna()]\n self.Y = self.data['label']\n self.X = self.data.drop(columns=['id', 'label']).fillna(value=-1)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return torch.as_tensor(self.X.iloc[idx].values).type(torch.FloatTensor), torch.as_tensor(self.Y.iloc[idx]).type(\n torch.LongTensor)\n\n\ndef train(dataloader, model, loss_fn, optimizer, device, positive_weight):\n model.train()\n\n Y = []\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n logit = model(X)\n positive_index = y == 1\n\n loss = loss_fn(logit, y)\n loss = (positive_weight * loss_fn(logit[positive_index], y[positive_index]) + loss_fn(logit[~positive_index], y[\n ~positive_index])) / len(X)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # if batch % 100 == 0:\n # loss = loss.item()\n # print(f\"{Fore.GREEN + '[train]===>'} loss: {loss} {'' + Fore.RESET}\")\n\n\ndef valid(dataloader, model, loss_fn, device):\n model.eval()\n model = model.to(device)\n num_dataset = len(dataloader.dataset)\n loss = 0\n\n with torch.no_grad():\n pred, Y = [], []\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n\n logit = model(X)\n loss += loss_fn(logit, y).item()\n\n pred.append(logit.argmax(1))\n Y.append(y)\n\n loss /= num_dataset\n\n pred = torch.cat(pred)\n Y = torch.cat(Y)\n print(f\"{Fore.CYAN + '[valid]===>'} \"\n f\"loss: {loss} acc: {100 * Accuracy(pred, Y)}% precision: {Precision(pred, Y)} recall: {Recall(pred, Y)} fscore: {Fscore(pred, Y)}\"\n f\"{'' + Fore.RESET}\")\n return loss\n\n# For updating learning rate\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', type=str,\n default=\"../../data/33_dimension/train.csv\")\n parser.add_argument('--valid', type=str,\n default=f\"../../data/33_dimension/{rate}valid_banlanced.csv\")\n parser.add_argument('--in_feature', type=int,\n default=33)\n parser.add_argument('--device', type=str,\n default='cpu')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n torch.manual_seed(777)\n device = torch.device(args.device)\n\n batch_size, in_features, out_features = 24, args.in_feature, 2\n # 原数据:1e-3 2.33\n lr, positive_weight = 1e-4, 1.5\n epochs = 150\n\n model = CTNet(batch_size, in_features, out_features)\n model = model.to(device)\n loss_fn = nn.CrossEntropyLoss()\n loss_fn = loss_fn.to(device)\n\n optimizer = optim.Adam(model.parameters(), lr=lr)\n\n train_dataset = SeedDataset(args.train)\n train_dataloader = DataLoader(\n train_dataset, batch_size=batch_size, shuffle=True)\n\n valid_dataset = SeedDataset(args.valid)\n valid_dataloader = DataLoader(valid_dataset, batch_size=1, shuffle=False)\n\n if(os.path.isdir(\"../../checkpoints\") == 0):\n os.mkdir(\"../../checkpoints\")\n if(os.path.isdir(\"../../checkpoints/LCNet\") == 0):\n os.mkdir(\"../../checkpoints/LCNet\")\n\n loss = 10000\n for t in range(epochs):\n print(f\"{Fore.GREEN + '===>'} Epoch {t + 1} {'' + Fore.RESET}\\n\"\n \"---------------------------------------\")\n train(train_dataloader, model, loss_fn,\n optimizer, device, positive_weight)\n new_loss = valid(valid_dataloader, model, loss_fn, device)\n if new_loss < loss:\n loss = new_loss\n torch.save(model.state_dict(), f\"../../checkpoints/LCNet/{t}_epoc_loss_{loss}.pt\")\n","sub_path":"models/LCNet/LCNet_train.py","file_name":"LCNet_train.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"213128927","text":"import csv\nimport tensorflow as tf\nimport random\nfrom PIL import Image\n\n\n# 读取csv文件\ncsv_reader = csv.reader(open('./data/captcha/labels/labels.csv', encoding='utf-8'))\ndata = [] # 定义一个数据列表\ni = 0\nj = 0\nm = 0\n# 将图片名和标签名存入列表\nfor row in csv_reader:\n data.append(row)\n\n# 生成训练集tfrecords文件\ntrain_num = random.sample(data, int(0.8*len(data)))\nwhile len(train_num) >= 4000:\n writer = tf.python_io.TFRecordWriter(\"./train\" + str(i) + \".tfrecords\") # 创建一个train.tfrecords文件\n train_data = train_num[:4000]\n for name, label in train_data:\n img = Image.open(name) # 定义操作对象\n img = img.resize((56, 40)) # 将图片改为56*40\n img_raw = img.tobytes() # 将图片转化为原生bytes\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[int(label)])),\n 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))\n }))\n writer.write(example.SerializeToString()) # 序列化为字符串\n writer.close()\n del(train_num[:4000])\n i += 1\n# # ----------------------------------------------------------------------------------------------\n# 生成测试集tfrecord文件\ntest_num = random.sample(data, int(0.1*len(data)))\nwhile len(test_num) >= 4000:\n writer = tf.python_io.TFRecordWriter(\"./test\" + str(j) + \".tfrecords\") # 创建一个test.tfrecords文件\n test_data = test_num[:4000]\n for name, label in test_data:\n img = Image.open(name) # 定义操作对象\n img = img.resize((56, 40)) # 将图片改为56*40\n img_raw = img.tobytes() # 将图片转化为原生bytes\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[int(label)])),\n 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))\n }))\n writer.write(example.SerializeToString()) # 序列化为字符串\n writer.close()\n del (test_num[:4000])\n j += 1\n# # ------------------------------------------------------------------------------------------------\n# # 生成验证集tfrecord文件\nvalidation_num = random.sample(data, int(0.1*len(data)))\nwriter = tf.python_io.TFRecordWriter(\"./validation\" + str(m) + \".tfrecords\") # 创建一个validation .tfrecord文件\nwhile len(validation_num) >= 4000:\n validation_data = validation_num[:4000]\n for name, label in validation_data:\n img = Image.open(name) # 定义操作对象\n img = img.resize((56, 40)) # 将图片改为56*40\n img_raw = img.tobytes() # 将图片转化为原生bytes\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[int(label)])),\n 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))\n }))\n writer.write(example.SerializeToString()) # 序列化为字符串\n writer.close()\n del (validation_num[:4000])\n m += 1\n","sub_path":"Captcha/tfrecord.py","file_name":"tfrecord.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"448969344","text":"\n\n#calss header\nclass _CUR():\n\tdef __init__(self,): \n\t\tself.name = \"CUR\"\n\t\tself.definitions = [u'a mongrel (= dog of mixed type), especially one that is frightening or fierce', u'a person who is thought to be worth nothing or cowardly (= not brave)']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_cur.py","file_name":"_cur.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"610770126","text":"import ogr\nimport os\nimport rtree\nimport time\n\nfrom bison.common import (BISON_DELIMITER, ENCODING)\nfrom bison.common import BisonFiller\nfrom bison.common import (get_csv_dict_reader, get_csv_dict_writer)\n\ngbif_interp_file='/tank/data/bison/2019/AS/occurrence.txt'\ngbif_interp_file='/tank/data/bison/2019/Terr/occurrence_lines_1-10000001.txt'\n\ntmpdir = 'tmp'\noutdir = 'out'\ninfname = '/tank/data/bison/2019/AS/tmp/step3_itis_geo_estmeans_occurrence.csv'\ninpath, gbif_fname = os.path.split(gbif_interp_file)\n# one level up\n\ndatapth, _ = os.path.split(inpath)\nancillary_path = os.path.join(datapth, 'ancillary')\ngbif_basefname, ext = os.path.splitext(gbif_fname)\n\n# ancillary data for record update\nterrestrial_shpname = os.path.join(ancillary_path, 'US_CA_Counties_Centroids.shp')\nestmeans_fname = os.path.join(ancillary_path, 'NonNativesIndex20190912.txt')\nmarine_shpname = os.path.join(ancillary_path, 'World_EEZ_v8_20140228_splitpolygons/World_EEZ_v8_2014_HR.shp')\nitis2_lut_fname = os.path.join(ancillary_path, 'itis_lookup.csv')\n\n# reference files for lookups\ntmppath = os.path.join(inpath, 'tmp')\ndataset_lut_fname = os.path.join(tmppath, 'dataset_lut.csv')\norg_lut_fname = os.path.join(tmppath, 'organization_lut.csv')\nitis1_lut_fname = os.path.join(tmppath, 'step3_itis_lut.txt')\n\ndriver = ogr.GetDriverByName(\"ESRI Shapefile\")\nterr_data_src = driver.Open(terrestrial_shpname, 0)\nterrlyr = terr_data_src.GetLayer()\nterr_def = terrlyr.GetLayerDefn()\nidx_fips = terr_def.GetFieldIndex('FIPS')\nidx_cnty = terr_def.GetFieldIndex('COUNTY_NAM')\nidx_st = terr_def.GetFieldIndex('STATE_NAME')\n\neez_data_src = driver.Open(marine_shpname, 0)\neezlyr = eez_data_src.GetLayer()\neez_def = eezlyr.GetLayerDefn()\nidx_eez = eez_def.GetFieldIndex('EEZ')\nidx_mg = eez_def.GetFieldIndex('MRGID')\n\n# ...............................................\ndef rewrite_records(infname, outfname):\n self = BisonFiller(infname)\n\n drdr, inf = get_csv_dict_reader(infname, BISON_DELIMITER, ENCODING)\n self._files.append(inf)\n\n deleteme = []\n for fld in self._bison_ordered_flds:\n if fld not in drdr.fieldnames:\n deleteme.append(fld)\n\n for fld in deleteme:\n self._bison_ordered_flds.remove(fld)\n\n dwtr, outf = get_csv_dict_writer(outfname, BISON_DELIMITER, ENCODING,\n self._bison_ordered_flds)\n\n dwtr.writeheader()\n self._files.append(outf)\n\n recno = 0\n for rec in drdr:\n rec.pop('taxonKey')\n dwtr.writerow(rec)\n recno += 1\n print(recno)\n\n# ...............................................\ndef get_coords(rec):\n slon = rec['longitude']\n slat = rec['latitude']\n try:\n lon = float(slon)\n lat = float(slat)\n except:\n lon = lat = None\n return lon, lat\n\n\n# ...............................................\ndef read_some_points(infname, count):\n recno = 0\n points = []\n try:\n drdr, inf = get_csv_dict_reader(infname, BISON_DELIMITER, ENCODING)\n for rec in drdr:\n if recno > count:\n break\n recno += 1\n lon, lat = get_coords(rec)\n if lon is not None:\n points.append((lon, lat))\n except Exception as e:\n print('Failed reading data from record {}: {}'.format(recno, e))\n finally:\n inf.close()\n return points\n\n# ...............................................\ndef get_geofields_wo_rtree(lon, lat, terrlyr, idx_fips, idx_cnty, idx_st):\n fips = county = state = None\n terr_count = 0\n pt = ogr.Geometry(ogr.wkbPoint)\n pt.AddPoint(lon, lat)\n terrlyr.SetSpatialFilter(pt)\n eezlyr.SetSpatialFilter(pt)\n for poly in terrlyr:\n if terr_count == 0:\n terr_count += 1\n fips = poly.GetFieldAsString(idx_fips)\n county = poly.GetFieldAsString(idx_cnty)\n state = poly.GetFieldAsString(idx_st)\n else:\n terr_count += 1\n fips = county = state = None\n break\n terrlyr.ResetReading()\n return (fips, county, state)\n\n# ...............................................\ndef get_geofields_with_rtree(lon, lat, terrindex, terrfeats):\n fips = county = state = None\n terr_count = 0\n pt = ogr.Geometry(ogr.wkbPoint)\n pt.AddPoint(lon, lat)\n\n for tfid in list(terrindex.intersection((lon, lat))):\n fips = county = state = eez = mrgid = None\n# feat = terrfeats[tfid]['feature']\n# geom = feat.GetGeometryRef()\n geom = terrfeats[tfid]['geom']\n if pt.Within(geom):\n if terr_count == 0:\n terr_count += 1\n fips = terrfeats[tfid]['fips']\n county = terrfeats[tfid]['county']\n state = terrfeats[tfid]['state']\n else:\n terr_count += 1\n fips = county = state = None\n break\n return (fips, county, state)\n# ...............................................\n#\n#\n# ...............................................\ndef create_marine_index(eezlyr, idx_eez, idx_mg):\n marindex = rtree.index.Index(interleaved=False)\n marfeats = {}\n for fid in range(0, eezlyr.GetFeatureCount()):\n feat = eezlyr.GetFeature(fid)\n geom = feat.GetGeometryRef()\n xmin, xmax, ymin, ymax = geom.GetEnvelope()\n marindex.insert(fid, (xmin, xmax, ymin, ymax))\n marfeats[fid] = {'feature': feat,\n 'geom': geom,\n 'eez': feat.GetFieldAsString(idx_eez),\n 'mrgid': feat.GetFieldAsString(idx_mg)}\n return marindex, marfeats\n\n# ...............................................\ndef create_terr_index(terrlyr, idx_fips, idx_cnty, idx_st):\n terrindex = rtree.index.Index(interleaved=False)\n terrfeats = {}\n for fid in range(0, terrlyr.GetFeatureCount()):\n feat = terrlyr.GetFeature(fid)\n geom = feat.GetGeometryRef()\n xmin, xmax, ymin, ymax = geom.GetEnvelope()\n terrindex.insert(fid, (xmin, xmax, ymin, ymax))\n terrfeats[fid] = {'feature': feat,\n 'geom': geom,\n 'fips': feat.GetFieldAsString(idx_fips),\n 'county': feat.GetFieldAsString(idx_cnty),\n 'state': feat.GetFieldAsString(idx_st)}\n return terrindex, terrfeats\n\n# ...............................................\n# Main...........................................\n# ...............................................\n\n# infname = '/tank/data/bison/2019/Terr/tmp/step4_occurrence_lines_1-10000001.csv'\n# outfname = '/tank/data/bison/2019/Terr/tmp/smoketest_1-2638050.csv'\n# rewrite_records(infname, outfname)\n\n# ......................\noutfname = 'smoketest_1-2637603.csv'\noutfname = '/tank/data/bison/2019/Terr/tmp/smoketest_1-2637603.csv'\ntestsize = 10000\n\n\n# ......................\npoints = read_some_points(outfname, testsize)\nterrindex, terrfeats = create_terr_index(terrlyr, idx_fips, idx_cnty, idx_st)\n# ......................\n\n\n# ......................\nrtree_start = time.time()\nfor i in range(len(points)):\n lon, lat = points[i]\n try:\n (fips, county, state) = get_geofields_with_rtree(lon, lat,\n terrindex, terrfeats)\n except Exception as e:\n print ('Failed on record {} with {}'.format(i, e))\n\nrtree_stop = time.time()\nrtree_elapsed = rtree_stop - rtree_start\nprint(rtree_elapsed)\n\n\n\n# ......................\nogr_start = time.time()\nfor i in range(len(points)):\n lon, lat = points[i]\n try:\n (fips, county, state) = get_geofields_wo_rtree(lon, lat, terrlyr,\n idx_fips, idx_cnty, idx_st)\n except Exception as e:\n print ('Failed on record {} with {}'.format(i, e))\n\nogr_stop = time.time()\nogr_elapsed = ogr_stop - ogr_start\nprint(ogr_elapsed)\n# ......................\n","sub_path":"obsolete/src/common/compare_speed.py","file_name":"compare_speed.py","file_ext":"py","file_size_in_byte":7888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"454371134","text":"#!/usr/bin/python3\n\ndef main():\n FileRead()\n DemarcationLine()\n LineStrip()\n DemarcationLine()\n CheckFileExtension()\n\ndef ReadFile(filename):\n files = open(filename)\n lines = files.readlines()\n for index, line in enumerate(lines):\n print(index, \"=\", line)\n\ndef StripFile(filename):\n files = open(filename)\n for lines in files:print(lines.strip())\n\n\ndef RaisingError(filename):\n if filename.endswith(\".txt\"):\n lines = open(filename)\n for line in lines:print(line.strip())\n else:\n raise ValueError(\"File must end with .txt\")\n\ndef FileRead():\n try:\n ReadFile(\"../primary/files.txt\") # path is okay, it reads file\n except IOError as e:\n print(\"Could not open file:\", e)\n\ndef LineStrip():\n try:\n StripFile(\"primary/files.txt\")\n except IOError as e:\n print(\"Could not open file:\", e) # it will give error\n\n\ndef CheckFileExtension():\n try:\n RaisingError(\"../primary/file.rtf\")\n except IOError as e:\n print(\"Could not open file:\", e)\n except ValueError as e:\n print(\"Bad Filename:\", e)\n\n\ndef DemarcationLine():\n print(\"******************\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"exceptions/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"85291811","text":"from PyQt5 import QtCore\nfrom wikimusic import network, view\n\n\nclass CollectorThread(QtCore.QThread):\n # region Constants\n MAX = 10\n # endregion\n\n # region Signals\n collected = QtCore.pyqtSignal(view.MetaMusicListItem, bool)\n status_update = QtCore.pyqtSignal(view.MetaMusicListItem, str)\n global_progress_update = QtCore.pyqtSignal(int)\n # endregion\n\n def __init__(self):\n super().__init__()\n self.q = None\n self.__i = 0\n\n def run(self):\n if self.q:\n while True:\n item = self.q.get()\n self.process(item)\n self.q.task_done()\n\n # region Main Execution\n def process(self, item):\n self.send(item, 'Page Request')\n complete = self.process_request(item)\n self.i = self.MAX/2\n if complete:\n self.i = self.MAX\n self.send(item, \"Success\")\n self.collected.emit(item, True)\n return\n\n self.send(item, 'Page Request')\n complete = self.process_request(item, True)\n self.i = self.MAX\n self.send(item, \"Success\" if complete else \"Failure\")\n self.collected.emit(item, complete)\n\n def process_request(self, item, fallback=False):\n song = item.model\n title = '{} ({})'.format(song.title, song.main_artist) if fallback else song.title\n pages = network.request_wiki_page(title)\n self.i += 1\n if pages:\n if len(pages) == 1:\n self.send(item, 'Scraping')\n scrape = network.scrape_metadata(song, pages[0])\n self.i += 1\n return scrape\n else:\n self.send(item, 'Filter (artist)')\n page = network.similarity_threshold_filter(pages, song.main_artist)\n self.i += 1\n if page:\n self.send(item, 'Scraping')\n scrape = network.scrape_metadata(song, page)\n self.i += 1\n if not scrape:\n self.send(item, 'Filter (song)')\n page = network.perfect_match_filter(pages, '(song)')\n self.i += 1\n if page:\n self.send(item, 'Scraping')\n scrape = network.scrape_metadata(song, page)\n self.i += 1\n return scrape\n return scrape\n else:\n self.send(item, 'Filter (song)')\n page = network.perfect_match_filter(pages, '(song)')\n self.i += 1\n if page:\n self.send(item, 'Scraping')\n scrape = network.scrape_metadata(song, page)\n self.i += 1\n return scrape\n return False\n # endregion\n\n # region Properties\n @property\n def i(self):\n return self.__i\n\n @i.setter\n def i(self, value):\n self.global_progress_update.emit(value - self.__i)\n self.__i = value % self.MAX\n # endregion\n\n # region Helpers\n def send(self, item, status):\n self.status_update.emit(item, status)\n # endregion\n\n pass\n","sub_path":"build/lib/wikimusic/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"50883446","text":"\nimport re\n\nfrom pymongo import MongoClient\n\nmyclient = MongoClient(\"mongodb+srv://TwitterDB:twitterextraction@twittercluster.udioj.mongodb.net/test\")\ndb = myclient[\"ProcessedDB\"]\nCollection = db[\"ProcessedTweet\"]\nrawDb = myclient[\"RawDB\"]\nTestCollection = rawDb[\"TestTweet\"]\nx = TestCollection.find()\ntweetList = []\n\nfor i in x:\n\n newRecord = {}\n if 'text' in i:\n\n textField = i['text']\n # do operations\n textField = re.sub(r'\\s+', ' ', textField)\n textField = re.sub(r\"http\\S+\", '', textField)\n textField = re.sub(r\"[^a-zA-Z0-9!@',.\\$& ]\", '', textField)\n textField = re.sub(r'\\\\u[A-Za-z0-9]{4}', '', textField)\n textField = re.sub(r'&', '&', textField)\n textField = re.sub(r'\\\\n', ' ', textField)\n\n newRecord['text'] = textField\n if 'id' in i:\n newRecord['id'] = i['id']\n if 'source' in i:\n newRecord['source'] = i['source']\n if 'quote_count' in i:\n newRecord['quote_count'] = i['quote_count']\n if 'in_reply_to_screen_name' in i:\n newRecord['in_reply_to_screen_name'] = i['in_reply_to_screen_name']\n if 'in_reply_to_user_id_str' in i:\n newRecord['in_reply_to_user_id_str'] = i['in_reply_to_user_id_str']\n if 'created_at' in i:\n newRecord['created_at'] = i['created_at']\n\n tweetList.append(newRecord)\nCollection.insert_many(tweetList)\n\nprint(\"ProcessedDB contains cleaned tweets where special characters, URLs, emoticons, ampersand, etc are removed\")\n\n","sub_path":"Python Files/TweetClean.py","file_name":"TweetClean.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"468079432","text":"import os\nimport fnmatch\nimport functools\nimport mimetypes\n\nfrom . import utils\nfrom .exceptions import InvalidArguments, \\\n FileCommandException\n\n\n\nclass Command(object):\n \n def __init__(self, path, **defaults):\n self.path = path\n self.defaults = defaults\n\n def request(self, client, **kwargs):\n return client.request(self.path, **kwargs)\n\n def prepare(self, client, **kwargs):\n kwargs.update(self.defaults)\n return functools.partial(self.request, client, **kwargs)\n\n\nclass ArgCommand(Command):\n \n def __init__(self, path, argc=None, **defaults):\n Command.__init__(self, path, **defaults)\n self.argc = argc\n\n def request(self, client, *args, **kwargs):\n if self.argc and len(args) != self.argc:\n raise InvalidArguments(\"[%s] command requires %d arguments.\" % (self.path, self.argc))\n return client.request(self.path, args=args, **kwargs)\n\n\nclass FileCommand(Command):\n\n def __init__(self, path, accept_multiple=True, **defaults):\n Command.__init__(self, path, **defaults)\n self.accept_multiple = accept_multiple\n \n def request(self, client, f, **kwargs):\n if kwargs.pop('recursive', False):\n return self.recursive(client, f, **kwargs)\n if isinstance(f, (list, tuple)):\n return self.multiple(client, f, **kwargs)\n if isinstance(f, basestring) and os.path.isdir(f):\n ls = [os.path.join(f,p) for p in os.listdir(f)]\n fs = filter(os.path.isfile, ls)\n return self.multiple(client, fs, **kwargs)\n else:\n return self.single(client, f, **kwargs)\n \n\n @staticmethod\n def _multipart_field(_file):\n try:\n content = _file.read()\n try:\n fn = _file.name\n except AttributeError:\n fn = ''\n except AttributeError:\n fn = _file\n if os.path.isdir(fn):\n raise FileCommandException(\"Use keyword argument [recursive=True] in order to add multiple directories.\")\n with open(_file, 'rb') as fp:\n content = fp.read()\n ft = mimetypes.guess_type(fn)[0] or 'application/octet-stream'\n \n return ('file', (os.path.basename(fn), content, ft))\n\n \n def single(self, client, _file, **kwargs):\n \"\"\"Adds a single file-like object to IPFS.\"\"\"\n files = [self._multipart_field(_file)]\n return client.request(self.path, files=files, **kwargs)\n \n\n def multiple(self, client, _files, **kwargs):\n \"\"\"Adds multiple file-like objects as a multipart request to IPFS.\"\"\"\n if not self.accept_multiple:\n raise FileCommandException(\"[%s] does not accept multiple files.\" % self.path)\n \n fnpattern = kwargs.pop('match', '*')\n files = []\n for fn in _files:\n if not fnmatch.fnmatch(fn, fnpattern):\n continue\n files.append(self._multipart_field(fn))\n if not files:\n raise FileCommandException(\"No files matching pattern: {}\".format(fnpattern))\n return client.request(self.path, files=files, **kwargs)\n\n\n def recursive(self, client, dirname, **kwargs):\n \"\"\"Loads a directory recursively into IPFS, files are matched against\n the given pattern.\n \n ***NOTE: This is a ghetto temp solution until streaming multipart files\n can be figured out.\n \"\"\"\n if not self.accept_multiple:\n raise FileCommandException(\"[%s] does not accept multiple files.\" % self.path)\n \n kwargs.update({'decoder': 'json'})\n fnpattern = kwargs.pop('match', '*')\n results = []\n\n def fsize(fullpath):\n \"\"\"This value is fudged to match the discrepancy between however\n the IPFS Api calculates file sizes and the value given by Python\"\"\"\n return os.path.getsize(fullpath) + 8\n \n def walk(dirname):\n ls = os.listdir(dirname)\n files = filter(lambda p: os.path.isfile(os.path.join(dirname, p)), ls)\n dirs = filter(lambda p: os.path.isdir(os.path.join(dirname, p)), ls)\n \n dir_json = { u\"Data\": u'\\x08\\x01',\n u\"Links\": []}\n\n for fn in files:\n if not fnmatch.fnmatch(fn, fnpattern):\n continue\n fullpath = os.path.join(dirname, fn)\n res = client.request('/add',\n files=[self._multipart_field(fullpath)],\n **kwargs)\n \n res[u\"Size\"] = fsize(fullpath)\n dir_json[u\"Links\"].append(res)\n results.append({\"Name\": fullpath, \"Hash\": res[u\"Hash\"]})\n \n for subdir in dirs:\n fullpath = os.path.join(dirname, subdir)\n res = walk(fullpath)\n \n dir_json[u\"Links\"].append({u\"Name\": unicode(subdir, 'utf-8'), u\"Hash\": res[u\"Hash\"]})\n results.append({\"Name\": fullpath, \"Hash\": res[u\"Hash\"]})\n \n buf = utils.make_json_buffer(dir_json)\n return client.request('/object/put',\n files=[self._multipart_field(buf)],\n **kwargs)\n \n # walk directory and then add final hash root to results\n res = walk(dirname)\n results.append({\"Name\": dirname, \"Hash\": res[u\"Hash\"]})\n return results\n","sub_path":"ipfsApi/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"413875299","text":"import unittest\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom surgery import surgery\n\n\nglobal surgery_on\nsurgery_on = True\n\n\n@surgery(surgery_on)\ndef test1(x, y, z):\n def test2(a):\n return 10 + a + x\n\n def test3():\n return 100\n\n def test4():\n a = 1000\n return a + z\n\n def test5(a, b, c, d):\n return a + b + c + d\n\n return x + y + z\n\n\nclass Test(unittest.TestCase):\n def test_return(self):\n if surgery_on:\n print(\"surgery is active\")\n inner_f = test1(50, 100, 200)\n self.assertEqual(inner_f['test2'](100), 160)\n self.assertEqual(inner_f['test3'](), 100)\n self.assertEqual(inner_f['test4'](), 1200)\n self.assertEqual(inner_f['test5'](1, 2, 3, 4), 10)\n else:\n print(\"surgery is inactive\")\n self.assertEqual(test1(50, 100, 200), 350)\n \nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"525167313","text":"# -*-coding:utf-8 -*\r\nfrom tkinter import *\r\ndef pointeur(event):\r\n\tchaine.configure(text = \"X =\" + str(event.x) +\\\r\n\t\", Y =\" + str(event.y))\r\n\r\nfen = Tk()\r\ncadre = Frame(fen, width =200, height =150, bg=\"red\")\r\n\r\ncadre.bind(\"\", pointeur)\r\n\r\ncadre.pack()\r\nchaine = Label(fen)\r\nchaine.pack()\r\nfen.mainloop()","sub_path":"POO/click.py","file_name":"click.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"490737686","text":"#### Weighted Means ####\n\n## store input data\n# number of values, converted to integer\nn = int(input()) \n# numbers to calculated weighted mean of, converted to list of float\ndata = list(map(int, input().split()))\n# weights, converted to list of ints\nweights = list(map(int, input().split()))\n\n## calculate the weighted mean\n# initialize a new list for the sumproduct of the numbers and weights\nweighted_data = []\nfor i in range(len(data)):\n weighted_data.append(data[i] * weights[i])\n# sum weighted_data list and divide by the sum of the weights\nprint(round(sum(weighted_data)/sum(weights), 1))\n","sub_path":"python/10-days-of-statistics/day-0/weighted-mean.py","file_name":"weighted-mean.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"487132881","text":"import os\nfrom conans import ConanFile, tools, CMake\nfrom conans.errors import ConanException\n\n\nclass ZlibConan(ConanFile):\n name = \"zlib\"\n version = \"1.2.11\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://zlib.net\"\n license = \"Zlib\"\n description = (\"A Massively Spiffy Yet Delicately Unobtrusive Compression Library \"\n \"(Also Free, Not to Mention Unencumbered by Patents)\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"minizip\": [True, False, \"deprecated\"]}\n default_options = {\"shared\": False, \"fPIC\": True, \"minizip\": \"deprecated\"}\n exports_sources = [\"CMakeLists.txt\", \"CMakeLists_minizip.txt\", \"patches/**\"]\n generators = \"cmake\"\n topics = (\"conan\", \"zlib\", \"compression\")\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n if self.options.shared:\n del self.options.fPIC\n\n if self.options.minizip != \"deprecated\":\n self.output.warn(\"minizip option is deprecated. Please use the new minizip/1.2.11 package\")\n\n def package_id(self):\n del self.info.options.minizip\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n with tools.chdir(self._source_subfolder):\n # https://github.com/madler/zlib/issues/268\n tools.replace_in_file('gzguts.h',\n '#if defined(_WIN32) || defined(__CYGWIN__)',\n '#if defined(_WIN32) || defined(__MINGW32__)')\n\n is_apple_clang12 = self.settings.compiler == \"apple-clang\" and tools.Version(self.settings.compiler.version) >= \"12.0\"\n if not is_apple_clang12:\n for filename in ['zconf.h', 'zconf.h.cmakein', 'zconf.h.in']:\n tools.replace_in_file(filename,\n '#ifdef HAVE_UNISTD_H '\n '/* may be set to #if 1 by ./configure */',\n '#if defined(HAVE_UNISTD_H) && (1-HAVE_UNISTD_H-1 != 0)')\n tools.replace_in_file(filename,\n '#ifdef HAVE_STDARG_H '\n '/* may be set to #if 1 by ./configure */',\n '#if defined(HAVE_STDARG_H) && (1-HAVE_STDARG_H-1 != 0)')\n\n def build(self):\n self._patch_sources()\n make_target = \"zlib\" if self.options.shared else \"zlibstatic\"\n cmake = CMake(self)\n cmake.configure(build_folder=self._build_subfolder)\n cmake.build(target=make_target)\n\n def _rename_libraries(self):\n if self.settings.os == \"Windows\":\n lib_path = os.path.join(self.package_folder, \"lib\")\n suffix = \"d\" if self.settings.build_type == \"Debug\" else \"\"\n\n if self.options.shared:\n if self.settings.compiler == \"Visual Studio\" and suffix:\n current_lib = os.path.join(lib_path, \"zlib%s.lib\" % suffix)\n tools.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n else:\n if self.settings.compiler == \"Visual Studio\":\n current_lib = os.path.join(lib_path, \"zlibstatic%s.lib\" % suffix)\n tools.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n elif self.settings.compiler in (\"clang\", \"gcc\", ):\n if not self.settings.os.subsystem:\n current_lib = os.path.join(lib_path, \"libzlibstatic.a\")\n tools.rename(current_lib, os.path.join(lib_path, \"libzlib.a\"))\n\n def _extract_license(self):\n with tools.chdir(os.path.join(self.source_folder, self._source_subfolder)):\n tmp = tools.load(\"zlib.h\")\n license_contents = tmp[2:tmp.find(\"*/\", 1)]\n tools.save(\"LICENSE\", license_contents)\n\n def package(self):\n self._extract_license()\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n\n # Copy headers\n for header in [\"*zlib.h\", \"*zconf.h\"]:\n self.copy(pattern=header, dst=\"include\", src=self._source_subfolder, keep_path=False)\n self.copy(pattern=header, dst=\"include\", src=self._build_subfolder, keep_path=False)\n\n # Copying static and dynamic libs\n if self.options.shared:\n self.copy(pattern=\"*.dylib*\", dst=\"lib\", src=self._build_subfolder, keep_path=False, symlinks=True)\n self.copy(pattern=\"*.so*\", dst=\"lib\", src=self._build_subfolder, keep_path=False, symlinks=True)\n self.copy(pattern=\"*.dll\", dst=\"bin\", src=self._build_subfolder, keep_path=False)\n self.copy(pattern=\"*.dll.a\", dst=\"lib\", src=self._build_subfolder, keep_path=False)\n else:\n self.copy(pattern=\"*.a\", dst=\"lib\", src=self._build_subfolder, keep_path=False)\n self.copy(pattern=\"*.lib\", dst=\"lib\", src=self._build_subfolder, keep_path=False)\n\n self._rename_libraries()\n\n def package_info(self):\n self.cpp_info.libs.append(\"zlib\" if self.settings.os == \"Windows\" and not self.settings.os.subsystem else \"z\")\n self.cpp_info.names[\"cmake_find_package\"] = \"ZLIB\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"ZLIB\"\n","sub_path":"recipes/zlib/1.2.11/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":5852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"429665914","text":"# -*- coding: UTF-8 -*-\n\nimport re\nimport json\nimport requests\nimport time\n# 在根目录下执行以下指令:\n# python3 ./tools/extractStages.py\n\n# https://github.com/Kengxxiao/ArknightsGameData.git\nbase = \"https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/master/%s/gamedata\"\nservers=[\"en_US\",\"ja_JP\",\"ko_KR\",\"zh_CN\"] #\"en_US\",\"ja_JP\",\"ko_KR\",\"zh_CN\"\noutputDict={\"allStage\":[],\"preset\":{}}\n# 某次更新后上面所用的数据出现了乱码问题,可以clone到本地后手动修复,此时需要用以下路径\n# base = r\"C:\\Users\\user\\ArknightsGameData\\zh_CN\\gamedata\"\n\ndef readJson(path , server = \"zh_CN\"):\n if base.startswith(\"http\"):\n r = requests.get(base % server + path\n # 如果需要可以取消注释以使用代理,请注意socks5代理需要 pip3 install -U requests[socks]\n , proxies = { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5://127.0.0.1:1080'}\n )\n r.encoding = \"utf-8\"\n return r.json()\n else:\n with open(base + path, encoding='utf-8') as f:\n return json.load(f, encoding='utf-8')\ndef addId(stage):\n output={\"stageId\":stage[0],\"code\":stage[1][\"code\"]}\n return output\ndef removeTR(stage):\n #去除剿灭作战和教程关/突袭模式/超难关\n if(re.search(\"(TR-\\d+|H\\d+-\\d+|PR-[A-Z]-\\d+)\",stage[\"code\"]) or not re.search(\"-\",stage[\"code\"]) or re.search(\"#f#\",stage[\"stageId\"])):\n return False\n return True\ndef checkActivityOpen(stage):\n if(StageRawData[\"stageValidInfo\"].__contains__(stage[\"stageId\"])):\n StageTime = StageRawData[\"stageValidInfo\"][stage[\"stageId\"]]\n if(time.time()>StageTime[\"startTs\"] and (StageTime[\"endTs\"] == -1 or time.time()\" %self.name\n\n\nclass File(db.Model):\n# __tablename__ = \"file\"\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(80))\n created_time = db.Column(db.DateTime)\n category_id = db.Column(db.Integer, db.ForeignKey('category.id'))\n category = db.relationship('Category', backref='files')\n content = db.Column(db.Text)\n\n\n def __init__(self, title, time, category, content):\n self.title = title\n self.created_time = time\n self.category = category\n self.content = content\n\n\n def __repr__(self):\n return '' %self.title\n\n\n\n\n def add_tag(self, tag_name):\n file_tags = mongodb.tag.find_one({'file_id': self.id})\n if file_tags == None:\n mongodb.tag.insert_one({'file_id': self.id, 'title':self.title, 'tags':[tag_name]})\n else:\n tags= file_tags['tags']\n tags.append(tag_name)\n tags = set(tags)\n mongodb.tag.update_one({'file_id': self.id},{'$set':{'tags': list(tags)}})\n\n\n\n\n def remove_tag(self, tag_name):\n file_tags = mongodb.tag.find_one({'file_id': self.id})\n tags= file_tags['tags']\n try:\n tags.remove(tag_name)\n except ValueError:\n return \"tag not exist\"\n\n mongodb.tag.update_one({'file_id': self.id},{'$set':{'tags': tags}})\n \n\n\n\n @property\n def tags(self):\n file_tags = mongodb.tag.find_one({'file_id': self.id})\n return file_tags['tags']\n\n\n\n\n\nclass dbinit(object):\n\n def create(self):\n db.create_all()\n\n def insertdata(self):\n java = Category('Java')\n python = Category('Python')\n\n file1 = File('Hello Java', datetime.utcnow(), java, 'File Content - Java is cool!')\n file2 = File('Hello Python', datetime.utcnow(), python, 'File Content - Python is cool!')\n\n db.session.add(java)\n db.session.add(python)\n db.session.add(file1)\n db.session.add(file2)\n\n db.session.commit()\n\n\n file1.add_tag('tech')\n file1.add_tag('Java')\n file1.add_tag('linux')\n file2.add_tag('tech')\n file2.add_tag('Python')\n\n\n def drop(self):\n db.drop_all()\n\n\n\ndatabase = dbinit()\n\n\n\n@app.route('/')\ndef index():\n article_list = db.session.query(File.id, File.title).all()\n al =[]\n for article in article_list:\n article = list(article)\n file = File.query.get(article[0])\n article.append(file.tags)\n al.append(article)\n\n article_list = al\n\n\n return render_template(\"index.html\", article_list = article_list)\n\n\n\n\n@app.route('/files/')\n\n\ndef file(fileid):\n file = File.query.get_or_404(fileid)\n\n return render_template('file.html', file = file)\n\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html'), 404\n\n\n\n\nif __name__ == '__main__':\n database.drop()\n database.create()\n database.insertdata()\n app.run()\n\n\n","sub_path":"challenge08-nosql/news/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"296944164","text":"'''\nyou are given a string to find the greatest conssuctive string in it that \n'''\n\nt='massachusetts'\na=t[0]\nlist1=[]\nlist2=[]\nfor i in range(len(t)-1):\n if t[i]>t[i+1]:\n list1.append(a)\n a=''\n else:\n a+=t[i+1]\n\nfor i in range(len(list1)):\n list2.append(len(list1[i]))\nm=max(list2)\np=list2.index(m)\nprint(list1[p])\n","sub_path":"omarp.py","file_name":"omarp.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"290334874","text":"\n#This script runs the benchmark over different increment values and saves the graph as pdf\n\nimport os\nimport commands\nimport subprocess\nimport numpy as np\nimport csv\n\ndef run(st,inc):\n start = st *1024 *1024\n increment = inc *1024 \n values = [x for x in range(start,(start + (increment * 70)),increment)]\n res = []\n for i in values:\n com = \"taskset 0x1 scala -J-Xms\"+str(i)+\" -J-Xmx\"+str(i)+\" -J-XX:+UseSerialGC fragger2.scala\"\n #com = \"scalac fragger.scala && taskset 0x1 java -cp .:/usr/share/java/scala-library.jar -Xms\"+str(i)+\" -Xmx\"+str(i)+\" -XX:-UseGCOverheadLimit fragger\"\n result =subprocess.check_output(com, shell=True)\n print(result)\n res.append(result)\n print(\"Plotting graph\\n\")\n print(res)\n a = [x.split(',') for x in res]\n res = [int(x)/1000000 for x,y in a]\n free = [y for x,y in a]\n values = [x for x in values]\n d = zip(values,res,free)\n name = \"scala_\"+str(inc)+\"k.txt\"\n with open(name,'wb') as myFile:\n wr = csv.writer(myFile, delimiter=',')\n for a,b,c in d:\n e =[a,b ,c]\n wr.writerow(e)\n\n \n\ndef main():\n for i in range(100,1000,50):\n run(29,i)\n\n\n\nmain()\n\n","sub_path":"Fragmentation_tolerance/scala_small_heap/runtest.py","file_name":"runtest.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"277487988","text":"from cs50 import get_string\n\n\ndef main():\n # Get text\n text = get_string(\"Text: \")\n determine_grade(text)\n\n\ndef determine_grade(text):\n letters = 0\n sentences = 0\n # iterating over the text and determining\n # number of letters and sentences\n for char in text:\n if char.isalpha():\n letters += 1\n elif char in {'.', '!', '?'}:\n sentences += 1\n\n # determining number of words by split() function\n words = len(text.split())\n\n # Calculate and print Coleman–Liau index\n Grade = calculate_index(letters, words, sentences)\n print(Grade)\n\n\ndef calculate_index(letters, words, sentences):\n # Calculation\n L = (letters / words) * 100\n S = (sentences / words) * 100\n index = round(0.0588 * L - 0.296 * S - 15.8)\n\n # Determine grade\n if index > 16:\n return 'Grade 16+'\n elif index < 1:\n return 'Before Grade 1'\n else:\n return f\"Grade {index}\"\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Problem Sets/pset6/readability/readability.py","file_name":"readability.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"618163037","text":"\n# coding=utf-8\n\n__author__ = 'Kevin_Liao'\n__all__ = ['I2CPacket']\n\nfrom driver.crtpstack import CRTPStruct\nfrom driver.logger import Logger\nfrom lib import Library\n\nlogger = Logger(__name__)\nlib = Library()\n\nclass I2CHeader:\n Start = 0x00\n Stop = 0x01\n End = 0x02\n Nack = 0x03\n I2CM1 = 0x04\n I2CM2 = 0x05\n I2CS1 = 0x06\n I2CMT1 = 0x07\n MASK = 0x0F\n \nclass I2CPacket(object):\n def __init__(self, *args, **kargs):\n self.data = []\n self.buffer = []\n self.maxheader = 4\n self.packetsize = CRTPStruct.MaxDataSize\n self.clearI2C()\n\n def setAddr(self, data):\n self.addr8 = data\n if (self.addr8 & 0x01):\n self.rw = \"R\"\n else:\n self.rw = \"W\"\n self.addr7 = (self.addr8 >> 1)\n\n def clearI2C(self):\n self.i2cQueue = {}\n self.i2cQueue[\"start\"] = \"\"\n self.i2cQueue[\"stop\"] = \"\"\n self.i2cQueue[\"addr\"] = 0x00\n self.i2cQueue[\"rw\"] = \"\"\n self.i2cQueue[\"source\"] = \"\"\n self.i2cQueue[\"nack\"] = \"\"\n self.i2cQueue[\"error\"] = \"\"\n self.i2cQueue[\"length\"] = 0\n self.i2cQueue[\"data\"] = []\n self.i2cQueue[\"end\"] = False\n\n def pushI2C(self, data):\n if (len(data) < self.maxheader): return {}\n serial = data[0]\n header = data[1]\n addr8 = data[2]\n size = data[3]\n if len(data) >= self.maxheader:\n self.buffer = data[self.maxheader: self.maxheader + size]\n else:\n self.buffer = []\n \n if size != len(self.buffer): size = len(self.buffer) # 修正實際長度\n \n if lib.format.getBit(header, I2CHeader.Start):\n self.clearI2C()\n self.i2cQueue[\"start\"] = \"S\"\n \n if (addr8 & 0xFE) > 0: # valid i2c addr\n self.i2cQueue[\"addr\"] = (addr8 >> 1)\n if (addr8 & 0x01): self.i2cQueue[\"rw\"] = \"R\"\n else: self.i2cQueue[\"rw\"] = \"W\"\n\n if lib.format.getBit(header, I2CHeader.I2CM1):\n self.i2cQueue[\"source\"] = \"M1\"\n elif lib.format.getBit(header, I2CHeader.I2CM2):\n self.i2cQueue[\"source\"] = \"M2\"\n elif lib.format.getBit(header, I2CHeader.I2CS1):\n self.i2cQueue[\"source\"] = \"S1\"\n elif lib.format.getBit(header, I2CHeader.I2CMT1):\n self.i2cQueue[\"source\"] = \"MT\"\n \n if lib.format.getBit(header, I2CHeader.Nack):\n self.i2cQueue[\"nack\"] = \"*\"\n self.i2cQueue[\"error\"] = \"No response\"\n\n if size > 0:\n self.i2cQueue[\"length\"] = self.i2cQueue[\"length\"] + size\n self.i2cQueue[\"data\"] = self.i2cQueue[\"data\"] + self.buffer # 串聯所有資料\n\n if lib.format.getBit(header, I2CHeader.Stop):\n self.i2cQueue[\"stop\"] = \"P\"\n \n if lib.format.getBit(header, I2CHeader.End):\n self.i2cQueue[\"end\"] = True\n return self.i2cQueue\n else:\n return {}\n\n def createReadI2C(self, mode, addr7, cmd, size):\n self.package = {}\n # create write addr and cmd\n self.serial = 1\n self.header = lib.format.setBit(I2CHeader.Start) + lib.format.setBit(I2CHeader.End)\n if mode == I2CHeader.I2CM1: self.header = self.header + lib.format.setBit(I2CHeader.I2CM1)\n else: self.header = self.header + lib.format.setBit(I2CHeader.I2CM2)\n self.addr8 = (addr7 << 1) % 0xFE\n self.length = 1\n self.package[0] = [self.serial, self.header, self.addr8, self.length, cmd]\n # create read length\n self.serial = 1\n self.header = lib.format.setBit(I2CHeader.Start) + lib.format.setBit(I2CHeader.Stop) + lib.format.setBit(I2CHeader.End)\n if mode == I2CHeader.I2CM1: self.header = self.header + lib.format.setBit(I2CHeader.I2CM1)\n else: self.header = self.header + lib.format.setBit(I2CHeader.I2CM2)\n self.addr8 = (addr7 << 1) | 0x01\n self.length = 2\n sizelow = size & 0x00FF\n sizehigh = (size & 0xFF00) >> 8\n self.package[1] = [self.serial, self.header, self.addr8, self.length, sizelow, sizehigh]\n return self.package\n\n def createWriteI2C(self, mode, addr7, cmd, data):\n self.package = {}\n self.databuff = []\n self.databuff.append(int(cmd))\n try:\n self.databuff = self.databuff + data\n except:\n logger.warning(\"data must be list\")\n\n self.data = []\n datasize = self.packetsize - self.maxheader\n loop = len(self.databuff) / datasize\n if (len(self.databuff) % datasize != 0): loop = loop + 1\n for index in range(loop):\n self.serial = index + 1\n if index == 0:\n self.header = lib.format.setBit(I2CHeader.Start)\n if mode == I2CHeader.I2CM1: self.header = self.header + lib.format.setBit(I2CHeader.I2CM1)\n else: self.header = self.header + lib.format.setBit(I2CHeader.I2CM2)\n self.addr8 = (addr7 << 1) % 0xFE\n else:\n self.header = 0x00\n self.addr8 = 0x00\n \n if index == loop - 1:\n self.header = self.header + lib.format.setBit(I2CHeader.Stop) + lib.format.setBit(I2CHeader.End)\n\n local = index * datasize\n self.data = self.databuff[local: local + datasize]\n self.length = len(self.data)\n self.package[index] = [self.serial, self.header, self.addr8, self.length]\n self.package[index] = self.package[index] + self.data\n return self.package","sub_path":"source/driver/i2cstack.py","file_name":"i2cstack.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"172078347","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyrpl/hardware_modules/pid.py\n# Compiled at: 2017-08-29 09:44:06\nimport numpy as np\nfrom qtpy import QtCore\nfrom ..attributes import FloatProperty, BoolRegister, FloatRegister, GainRegister\nfrom ..modules import SignalLauncher\nfrom . import FilterModule\nfrom ..widgets.module_widgets import PidWidget\n\nclass IValAttribute(FloatProperty):\n \"\"\"\n Attribute for integrator value\n \"\"\"\n\n def get_value(self, obj):\n return float(obj._to_pyint(obj._read(256), bitlength=16)) / 8192\n\n def set_value(self, obj, value):\n \"\"\"set the value of the register holding the integrator's sum [volts]\"\"\"\n return obj._write(256, obj._from_pyint(int(round(value * 8192)), bitlength=16))\n\n\nclass SignalLauncherPid(SignalLauncher):\n update_ival = QtCore.Signal()\n\n def __init__(self, module):\n super(SignalLauncherPid, self).__init__(module)\n self.timer_ival = QtCore.QTimer()\n self.timer_ival.setInterval(1000)\n self.timer_ival.timeout.connect(self.update_ival)\n self.timer_ival.setSingleShot(False)\n self.timer_ival.start()\n\n def _clear(self):\n \"\"\"\n kill all timers\n \"\"\"\n self.timer_ival.stop()\n super(SignalLauncherPid, self)._clear()\n\n\nclass Pid(FilterModule):\n _widget_class = PidWidget\n _signal_launcher = SignalLauncherPid\n _setup_attributes = ['input',\n 'output_direct',\n 'setpoint',\n 'p',\n 'i',\n 'inputfilter',\n 'max_voltage',\n 'min_voltage']\n _gui_attributes = _setup_attributes + ['ival']\n\n def _setup(self):\n \"\"\"\n sets up the pid (just setting the attributes is OK).\n \"\"\"\n pass\n\n _delay = 3\n _PSR = 12\n _ISR = 32\n _DSR = 10\n _GAINBITS = 24\n ival = IValAttribute(min=-4, max=4, increment=8.0 / 65536, doc='Current value of the integrator memory (i.e. pid output voltage offset)')\n setpoint = FloatRegister(260, bits=14, norm=8192, doc='pid setpoint [volts]')\n min_voltage = FloatRegister(292, bits=14, norm=8192, doc='minimum output signal [volts]')\n max_voltage = FloatRegister(296, bits=14, norm=8192, doc='maximum output signal [volts]')\n p = GainRegister(264, bits=_GAINBITS, norm=2 ** _PSR, doc='pid proportional gain [1]')\n i = GainRegister(268, bits=_GAINBITS, norm=2 ** _ISR * 2.0 * np.pi * 8e-09, doc='pid integral unity-gain frequency [Hz]')\n\n @property\n def proportional(self):\n return self.p\n\n @property\n def integral(self):\n return self.i\n\n @property\n def derivative(self):\n return self.d\n\n @property\n def reg_integral(self):\n return self.ival\n\n @proportional.setter\n def proportional(self, v):\n self.p = v\n\n @integral.setter\n def integral(self, v):\n self.i = v\n\n @derivative.setter\n def derivative(self, v):\n self.d = v\n\n @reg_integral.setter\n def reg_integral(self, v):\n self.ival = v\n\n def transfer_function(self, frequencies, extradelay=0):\n \"\"\"\n Returns a complex np.array containing the transfer function of the\n current PID module setting for the given frequency array. The\n settings for p, i, d and inputfilter, as well as delay are aken into\n account for the modelisation. There is a slight dependency of delay\n on the setting of inputfilter, i.e. about 2 extracycles per filter\n that is not set to 0, which is however taken into account.\n\n Parameters\n ----------\n frequencies: np.array or float\n Frequencies to compute the transfer function for\n extradelay: float\n External delay to add to the transfer function (in s). If zero,\n only the delay for internal propagation from input to\n output_signal is used. If the module is fed to analog inputs and\n outputs, an extra delay of the order of 200 ns must be passed as\n an argument for the correct delay modelisation.\n\n Returns\n -------\n tf: np.array(..., dtype=np.complex)\n The complex open loop transfer function of the module.\n \"\"\"\n return Pid._transfer_function(frequencies, p=self.p, i=self.i, d=0, filter_values=self.inputfilter, extradelay_s=extradelay, module_delay_cycle=self._delay, frequency_correction=self._frequency_correction)\n\n @classmethod\n def _transfer_function(cls, frequencies, p, i, filter_values=list(), d=0, module_delay_cycle=_delay, extradelay_s=0.0, frequency_correction=1.0):\n return Pid._pid_transfer_function(frequencies, p=p, i=i, d=d, frequency_correction=frequency_correction) * Pid._filter_transfer_function(frequencies, filter_values=filter_values, frequency_correction=frequency_correction) * Pid._delay_transfer_function(frequencies, module_delay_cycle=module_delay_cycle, extradelay_s=extradelay_s, frequency_correction=frequency_correction)\n\n @classmethod\n def _pid_transfer_function(cls, frequencies, p, i, d=0, frequency_correction=1.0):\n \"\"\"\n returns the transfer function of a generic pid module\n delay is the module delay as found in pid._delay, p, i and d are the\n proportional, integral, and differential gains\n frequency_correction is the module frequency_corection as\n found in pid._frequency_corection\n \"\"\"\n frequencies = np.array(frequencies, dtype=np.complex)\n tf = i / (frequencies * complex(0.0, 1.0)) * np.exp(complex(0.0, -8e-09) * frequency_correction * frequencies * 2 * np.pi)\n tf += p\n delay = 0\n tf *= np.exp(complex(0.0, -1.0) * delay * frequencies * 2 * np.pi)\n return tf\n\n @classmethod\n def _delay_transfer_function(cls, frequencies, module_delay_cycle=_delay, extradelay_s=0, frequency_correction=1.0):\n \"\"\"\n Transfer function of the eventual extradelay of a pid module\n \"\"\"\n delay = module_delay_cycle * 8e-09 / frequency_correction + extradelay_s\n frequencies = np.array(frequencies, dtype=np.complex)\n tf = np.ones(len(frequencies), dtype=np.complex)\n tf *= np.exp(complex(0.0, -1.0) * delay * frequencies * 2 * np.pi)\n return tf\n\n @classmethod\n def _filter_transfer_function(cls, frequencies, filter_values, frequency_correction=1.0):\n \"\"\"\n Transfer function of the inputfilter part of a pid module\n \"\"\"\n frequencies = np.array(frequencies, dtype=np.complex)\n module_delay = 0\n tf = np.ones(len(frequencies), dtype=complex)\n if not isinstance(filter_values, list):\n filter_values = list([filter_values])\n for f in filter_values:\n if f == 0:\n continue\n elif f > 0:\n tf /= 1.0 + complex(0.0, 1.0) * frequencies / f\n module_delay += 2\n elif f < 0:\n tf /= 1.0 + complex(0.0, 1.0) * f / frequencies\n module_delay += 1\n\n delay = module_delay * 8e-09 / frequency_correction\n tf *= np.exp(complex(0.0, -1.0) * delay * frequencies * 2 * np.pi)\n return tf","sub_path":"pycfiles/pyrpl-0.9.3.6-py2.7/pid.py","file_name":"pid.py","file_ext":"py","file_size_in_byte":7253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"69207028","text":"import socket\nimport threading\nimport struct\n\nhost = '127.0.0.1'\nport = 55343\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind((host, port))\nprint(\"Server Started.\")\nipList = []\ndataList = []\naddrList = []\n\n \ndef send(addr):\n print('loop send')\n global ipList\n global dataList\n global addrList\n global s\n dataBufferList = dataList[:]\n del dataBufferList[addrList.index(addr)]\n print(addr)\n s.sendto(bytes(len(dataBufferList)), addr)\n for data in dataBufferList:\n s.sendto(data, addr)\n print(dataList)\ndef main():\n global ipList\n global dataList\n global addrList\n print(\"waiting\")\n while True:\n print('recieving')\n data, addr = s.recvfrom(1024)\n ip, port = addr\n if ip in ipList:\n \n ipIndex = ipList.index(ip)\n dataList[ipIndex] = data\n addrList[ipIndex] = data\n else:\n ipList.append(ip)\n addrList.append(addr)\n dataList.append(data)\n for addr in addrList:\n TS = threading.Thread(target=send, args=(addr,))\n #TR.start() \n TS.start()\n #TR = threading.Thread(target=recieve)\n\n if data == b'disconnect':\n print('disconnected')\n \n del dataList[ipList.index(ip)]\n ipList.remove[ipList.index(ip)]\n addrList.remove(addr)\n s.close\nmain()\n","sub_path":"Server2.py","file_name":"Server2.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"8577501","text":"#Link to problem:\n#https://www.urionlinejudge.com.br/judge/pt/problems/view/1089\n\nclass oscii:\n def __init__(self,init):\n self.cont = 0\n if init:\n self.subindo = True\n self.descendo = False\n else:\n self.subindo = False\n self.descendo = True\n def mudou(self):\n if self.subindo:\n self.subindo = False\n self.descendo = True\n else:\n self.subindo = True\n self.descendo = False\n self.cont = self.cont + 1\n\n def atual(self,n1,n2):\n if self.subindo:\n if int(n1) > int(n2):\n self.mudou()\n else:\n if int(n1) < int(n2):\n self.mudou()\n\n def getCont(self):\n return self.cont\n\ndef setList(lista):\n newlist = []\n for i in lista:\n newlist.append(int(i))\n return newlist\n\nwhile True:\n n = int(input())\n if n == 0:\n exit(0)\n lista = input().split(\" \")\n lista.append(lista[0])\n lista = setList(lista)\n obj = oscii(lista[0] < lista[1])\n\n for i in range(len(lista)):\n if i < len(lista) - 1:\n obj.atual(lista[i],lista[i + 1])\n i = 0\n if (lista[0] > lista[1]) & (lista[n - 1] > lista[n]):\n i = i - 1\n if (lista[0] < lista[1]) & (lista[n - 1] < lista[n]):\n i = i - 1\n\n print( obj.getCont() + 1 + i)\n","sub_path":"python/1089.py","file_name":"1089.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"511989135","text":"from torch.autograd import Variable\nfrom bi_lstm import BiLstm\nfrom abstract_blse import Abstract_Blse\n\n\nclass RNN_BLSE(Abstract_Blse):\n def __init__(self, src_vecs, trg_vecs, pdataset,\n cdataset, trg_dataset,\n projection_loss='mse',\n output_dim=4,\n hidden_size=800,\n n_layers=2,\n batch_size=16,\n to_cuda=True,\n src_syn1=None, src_syn2=None, src_neg=None,\n trg_syn1=None, trg_syn2=None, trg_neg=None,\n ):\n super(RNN_BLSE, self).__init__(src_vecs, trg_vecs, pdataset,\n cdataset, trg_dataset,\n projection_loss,\n output_dim,\n hidden_size,\n n_layers,\n batch_size,\n to_cuda,\n src_syn1, src_syn2, src_neg,\n trg_syn1, trg_syn2, trg_neg,\n )\n self.padding_idx = 0\n self.lstm = BiLstm(hidden_size, src_vecs.vector_size, output_dim, n_layers,\n self.batch_size, self.padding_idx, to_cuda)\n\n def forward(self, input_text, text_lens, proj_X, proj_Y):\n embedded_words = self.semb(Variable(input_text))\n projected_embedded_words = self.m(embedded_words)\n\n lstm_preds = self.lstm(projected_embedded_words, text_lens)\n\n x_proj, y_proj = self.project(proj_X, proj_Y)\n\n return lstm_preds, x_proj, y_proj\n\n def forward_without_proj(self, input_text, text_lens, src_lang):\n if src_lang:\n embedded_words = self.semb(Variable(input_text))\n projected_embedded_words = self.m(embedded_words)\n else:\n\n embedded_words = self.temb(Variable(input_text))\n projected_embedded_words = self.mp(embedded_words)\n\n lstm_preds = self.lstm(projected_embedded_words, text_lens)\n\n return lstm_preds\n","sub_path":"rnn_blse.py","file_name":"rnn_blse.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"466260183","text":"from scipy import sparse\nimport pickle\nimport spacy\nfrom os import listdir\nimport numpy as np\nimport pandas as pd\nfrom elasticsearch import Elasticsearch\n\nes = Elasticsearch([{'host': \"localhost\", 'port': \"9200\"}])\n\nwith open('relevances_dict_tokens.pkl', 'rb') as handle:\n lexicon = pickle.load(handle)\n handle.close()\nprint(len(lexicon))\n\n\nrelevances_list = []\nreputations_list = []\npapers_list = []\n\ndef get_document_relevance(dir_path):\n\n # Load English model\n # install if needed: python -m spacy download en\n nlp = spacy.load('en')\n # nlp = spacy.load('en', disable=['parser','textcat','ner'])\n\n # here we have a list with the documents and their terms with the frequencies as dictionaries\n txd_data=[]\n terms_list=[]\n documents_list = []\n documents_reputations = []\n valid_pos = [\"NOUN\"]\n\n i=0\n print(dir_path)\n for file in listdir(dir_path):\n valid_tokens = 0\n # print(file,i)\n doc_score = 0\n i+=1\n if i == 20000:\n break\n try:\n fr = open(dir_path+\"/\" + file)\n text = fr.read()\n fr.close()\n doc = nlp(text)\n # TOKENS ########################################################\n for token in doc:\n if token.pos_ in valid_pos and token.lemma_ in lexicon:\n doc_score += lexicon[token.lemma_]\n valid_tokens += 1\n except:\n pass\n print(\"score:\", doc_score)\n if valid_tokens != 0:\n doc_score = doc_score / valid_tokens\n print(\"ponderated score: \",doc_score)\n relevances_list.append(doc_score)\n reputation = float(file.split(\"_reputation_\")[1])\n reputations_list.append(reputation)\n print(reputation)\n papers_list.append(file)\n es.index(index=\"scores_vs_reputations_ponderated\", doc_type='score', body={\"doc\":file,\"lex_score\":doc_score,\"reputation\":reputation})\n\nget_document_relevance(\"/media/adrian/Data/TFM/summaries_papers_medlab_2015-2017\")\n\nd = {'relevance': relevances_list, 'reputation': reputations_list}\ndf = pd.DataFrame(data=d)\n\ndf.to_csv(\"relevances.csv\", sep='\\t', encoding='utf-8')","sub_path":"lexicon_validation.py","file_name":"lexicon_validation.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"177201171","text":"import random\n \ndef start_game():\n dashes = \"-\" * 36\n print(\"\\n{}\\nWelcome to the Number Guessing Game!\\n{}\\n\".format(dashes, dashes))\n play_again = \"y\"\n high_score = 10\n while play_again.lower() == \"y\":\n number = random.randint(1,10)\n guess_count = 1\n guess = None\n while guess != number:\n guess = input(\"Guess a number between 1-10. \")\n try: \n guess = int(guess)\n except ValueError:\n print(\"Invalid entry. Please enter a numeral value between 1-10.\")\n else:\n if not (1 <= guess <= 10):\n print(\"Your guess must be between 1-10. Please try again. \")\n elif guess == number:\n print(\"You got it! It took you {} guesses. Thanks for playing!!\".format(guess_count))\n else:\n word = 'lower' if guess > number else 'higher'\n print(\"It's {}!! Guess again: \".format(word))\n guess_count += 1\n if guess_count < high_score:\n high_score = guess_count\n play_again = input(\"\\nWould you like to play again? Y/N \").lower()\n if play_again == \"y\":\n print(\"\\nThe high score so far is {}. Can you beat it?\\n\".format(high_score))\n else:\n dashes2 = (\"-\" * 47)\n print(\"\\n{}\\nThank you for playing! The game is now closing.\\n{}\\n\".format(dashes2,dashes2))\n\nstart_game()","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"563346060","text":"from Cryptodome.Hash import SHA256\nimport random\n\nk = \"evilgenius\"\nm = \"Your friend Mallory changed her phone number to 02%d\" % random.randint(1000,10000)\nm_dash = \"Alice sent a message to your wall -- 'I hate you!' (post id: %d)\"\nm = m + \"%d\"\nprint(\"Original Message\")\nprint(m)\nM1, M2 = {}, {}\n\nwhile True:\n m1 = m % random.randint(1,1000);\n m2 = m_dash % random.randint(1,1000);\n h1 = SHA256.new(m1.encode(\"ascii\")).hexdigest()\n h2 = SHA256.new(m1.encode(\"ascii\")).hexdigest()\n if (h1 in M2) or (h2 in M1):\n print(\"Message1\")\n print(h2,M1[h2])\n print(\"Message2\")\n print(h1,M2[h2])\n break\n else:\n M1[h1] = m1\n M2[h2] = m2\n","sub_path":"Labs/Lab1/birthday_attack.py","file_name":"birthday_attack.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"637956983","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 1 13:59:01 2018\n\n@author: Pascal Meers\n\"\"\"\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\n# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\nonehotencoder = OneHotEncoder(categorical_features = [1])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Importing the Keras libraries and packages\nimport keras\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\ndef build_classifier(optimizer, rate):\n # Initialising the ANN\n classifier = Sequential()\n # Adding the input layer and the first hidden layer\n classifier.add(Dense(units = 6, kernel_initializer = 'glorot_uniform', activation = 'relu', input_dim = 11))\n # Adding dropout to disable a certain percentage of the neurons\n classifier.add(Dropout(rate = rate))\n # Adding the second hidden layer\n classifier.add(Dense(units = 6, kernel_initializer = 'glorot_uniform', activation = 'relu'))\n # Adding the third hidden layer\n classifier.add(Dense(units = 6, kernel_initializer = 'glorot_uniform', activation = 'relu'))\n # Adding the output layer\n classifier.add(Dense(units = 1, kernel_initializer = 'glorot_uniform', activation = 'sigmoid'))\n # Compiling the ANN\n classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])\n return classifier\n\nclassifier = KerasClassifier(build_fn = build_classifier)\n\n# parameters to train on fine tune these params to your needs. Training will take several hours depending on the amount of params.\nparameters = {'batch_size': [25, 32],\n 'epochs': [100, 500],\n 'optimizer': ['adam', 'rmsprop'],\n 'rate': [0.1, 0.2, 0.3]}\n\ngrid_search = GridSearchCV(estimator = classifier, param_grid = parameters, scoring = 'accuracy', cv = 10)\n\ngrid_search = grid_search.fit(X_train, y_train)\n\n# Gets the best parameters used in the training model based on the highest accuracy returned\nbest_parameters = grid_search.best_params_\n\n# Gets the best accuracy \nbest_accuracy = grid_search.best_score_\n\n# Predicting a single new observation\n\"\"\"Predict if the customer with the following informations will leave the bank:\nGeography: France\nCredit Score: 600\nGender: Male\nAge: 40\nTenure: 3\nBalance: 60000\nNumber of Products: 2\nHas Credit Card: Yes\nIs Active Member: Yes\nEstimated Salary: 50000\"\"\"\nnew_prediction = classifier.predict(sc.transform(np.array([[0.0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])))\nnew_prediction = (new_prediction > 0.5)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, new_prediction)\n\n\n\n\n","sub_path":"ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"451077026","text":"enter = input(\"Enter a list of numbers seperated by spaces: \");\r\nfive = list(map(int,enter.split(' ')));\r\n\r\nmax = five[0];\r\n\r\nfor x in five:\r\n if x > max:\r\n max = x;\r\n\r\nprint (\"The largest number you inputed is \"+str(max));\r\n","sub_path":"findmax.py","file_name":"findmax.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"601998795","text":"\"\"\"Test error handling.\n\nIncludes tests for :py:class:`windspharm.standard.VectorWind`,\n:py:class:`windspharm.iris.VectorWind` and\n:py:class:`windspharm.cdms.VectorWind`.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom nose import SkipTest\nfrom nose.tools import raises\nfrom unittest import skipIf\nimport numpy as np\nimport numpy.ma as ma\ntry:\n import cdms2\nexcept ImportError:\n pass\ntry:\n import iris\nexcept ImportError:\n pass\n\nimport windspharm\n\nfrom .reference import reference_solutions\n\n\nclass TestErrorsStandard(object):\n \"\"\"Tests for error handling in the :py:mod:`numpy` interface.\"\"\"\n\n @raises(ValueError)\n def test_missing_values(self):\n \"\"\"missing values raise an error?\"\"\"\n ref = reference_solutions('standard')\n u = ref['uwnd']\n v = ref['vwnd']\n mask = np.empty(u.shape, dtype=np.bool)\n mask[:] = False\n mask[1, 1] = True\n u = ma.array(u, mask=mask, fill_value=1.e20)\n v = ma.array(v, mask=mask, fill_value=1.e20)\n vw = windspharm.standard.VectorWind(u, v)\n\n @raises(ValueError)\n def test_not_a_number(self):\n \"\"\"not-a-number raises an error?\"\"\"\n ref = reference_solutions('standard')\n u = ref['uwnd']\n v = ref['vwnd']\n u[1, 1] = np.nan\n vw = windspharm.standard.VectorWind(u, v)\n\n @raises(ValueError)\n def test_shape_mismatch(self):\n \"\"\"different shape u and v raises an error?\"\"\"\n ref = reference_solutions('standard')\n u = ref['uwnd'][:-1]\n v = ref['vwnd']\n vw = windspharm.standard.VectorWind(u, v)\n\n @raises(ValueError)\n def test_rank(self):\n \"\"\"incorrect rank raises an error?\"\"\"\n ref = reference_solutions('standard')\n u = ref['uwnd'][..., np.newaxis, np.newaxis]\n v = ref['vwnd'][..., np.newaxis, np.newaxis]\n vw = windspharm.standard.VectorWind(u, v)\n\n @raises(ValueError)\n def test_gridtype(self):\n \"\"\"invalid grid type raises and error?\"\"\"\n ref = reference_solutions('standard')\n vw = windspharm.standard.VectorWind(\n ref['uwnd'], ref['vwnd'], gridtype='curvilinear')\n\n @raises(ValueError)\n def test_shape_invalid(self):\n \"\"\"invalid shape raises an error?\"\"\"\n ref = reference_solutions('standard')\n u = ref['uwnd'][np.newaxis].repeat(2, axis=0)\n v = ref['vwnd'][np.newaxis].repeat(2, axis=0)\n vw = windspharm.standard.VectorWind(u, v)\n\n\n@skipIf('cdms' not in dir(windspharm) or 'cdms2' not in dir(),\n 'library component (cdms2) not available')\nclass TestErrorsCDMS(object):\n\n @raises(TypeError)\n def test_non_cdms_variables(self):\n \"\"\"inputs not cdms variables raises an error?\"\"\"\n ref = reference_solutions('standard')\n vw = windspharm.cdms.VectorWind(ref['uwnd'], ref['vwnd'])\n\n @raises(ValueError)\n def test_dimension_order(self):\n ref = reference_solutions('cdms')\n u = ref['uwnd'].reorder('yx')\n v = ref['vwnd'].reorder('xy')\n vw = windspharm.cdms.VectorWind(u, v)\n\n @raises(ValueError)\n def test_lat_lon_grid(self):\n \"\"\"unable to find lat/lon grid raises an error?\"\"\"\n ref = reference_solutions('cdms')\n u = ref['uwnd']\n v = ref['vwnd']\n axes = u.getAxisList()\n unknown = cdms2.createAxis(axes[0][:], id='unknown')\n axes[0] = unknown\n u.setAxisList(axes)\n vw = windspharm.cdms.VectorWind(u, v)\n\n\n@skipIf('iris' not in dir(windspharm) or 'iris' not in dir(),\n 'library component (iris) not available')\nclass TestErrorsIris(object):\n\n @raises(TypeError)\n def test_non_iris_cubes(self):\n ref = reference_solutions('standard')\n vw = windspharm.iris.VectorWind(ref['uwnd'], ref['vwnd'])\n\n @raises(ValueError)\n def test_dimension_order(self):\n ref = reference_solutions('iris')\n u = ref['uwnd']\n v = ref['vwnd']\n v.transpose([1, 0])\n vw = windspharm.iris.VectorWind(u, v)\n\n @raises(ValueError)\n def test_lat_lon_grid(self):\n ref = reference_solutions('iris')\n u = ref['uwnd']\n v = ref['vwnd']\n unknown = u.coord('latitude').rename('unknown')\n vw = windspharm.iris.VectorWind(u, v)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"lib/windspharm/tests/test_error_handling.py","file_name":"test_error_handling.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"154169480","text":"#!/usr/bin/python\nfrom optparse import OptionParser\n\nimport chiron\n\ndef init_match_engine():\n match_engine = chiron.MatchEngine()\n add_default_fetchers(match_engine)\n add_default_matchers(match_engine)\n return match_engine\n\ndef add_default_classes(match_engine):\n match_engine.add_classes([\n 'broder-test', 'geofft-test', 'adehnert-test',\n 'linerva', 'debathena', 'undebathena', 'consult',\n 'sipb', 'sipb-auto', 'scripts', 'barnowl', 'zephyr-dev', 'xvm', 'chiron', 'mirrors',\n 'geofft', 'lizdenys', 'jdreed', 'axs', 'adehnert', 'achernya', 'leee', 'kcr', 'jesus', 'nelhage', 'csvoss', 'shulinye',\n 'assassin',\n 'shank',\n 'remit', 'asa', 'esp',\n ])\n\ndef add_default_fetchers(match_engine):\n match_engine.add_fetchers({\n 'RFC': chiron.fetch_rfc,\n 'CVE': chiron.fetch_cve,\n 'Launchpad': chiron.fetch_launchpad,\n 'Debian': chiron.fetch_debbugs('http://bugs.debian.org'),\n 'DSA': chiron.fetch_dsa,\n 'Chiron': chiron.fetch_github('sipb', 'chiron'),\n 'zcommit': chiron.fetch_github('sipb', 'zcommit'),\n 'Zulip': chiron.fetch_github('zulip', 'zulip'),\n 'RHBZ': chiron.fetch_bugzilla('https://bugzilla.redhat.com'),\n 'pag-screen': chiron.fetch_github('sipb', 'pag-screen'),\n 'Mosh': chiron.fetch_github('keithw', 'mosh'),\n 'Scripts FAQ': chiron.fetch_scripts_faq,\n 'ESP': chiron.fetch_github('learning-unlimited', 'ESP-Website'),\n 'Pokedex': chiron.fetch_pokemon,\n 'MIT Class': chiron.fetch_mit_class,\n 'whats': chiron.fetch_whats,\n 'Bible': chiron.fetch_bible,\n 'XKCD': chiron.fetch_xkcd,\n 'Unicode': chiron.fetch_unicode,\n 'Unicode Character': chiron.fetch_unicode_char,\n 'Airport': chiron.fetch_airport,\n 'Assassin': chiron.deal_with_assassin,\n 'SCIENCE': chiron.invoke_science,\n 'Debothena Test': chiron.invoke_debothena,\n 'Puzzle Editing': chiron.fetch_github('mysteryhunt', 'puzzle-editing'),\n })\n\ndef add_default_matchers(match_engine):\n match_engine.add_matcher('RFC', r'\\bRFC[-\\s:]*#?([0-9]{2,5})\\b')\n match_engine.add_matcher('CVE', r'\\b(CVE-[0-9]{4}-[0-9]{4,7})\\b')\n match_engine.add_matcher('Launchpad', r'\\blp[-\\s:]*#([0-9]{4,8})\\b')\n match_engine.add_matcher('Debian', r'\\bdebian[-\\s:]#([0-9]{4,6})\\b')\n match_engine.add_matcher('DSA', r'\\b(DSA-[0-9-]{4,10})\\b')\n match_engine.add_matcher('Chiron', r'\\bchiron[-\\s:]*#([0-9]{1,5})\\b')\n match_engine.add_matcher('zcommit', r'\\bzcommit[-\\s:]*#([0-9]{1,5})\\b')\n match_engine.add_matcher('Zulip', r'\\bZulip[-\\s:]*#([0-9]{1,6})\\b')\n match_engine.add_matcher('RHBZ', r'\\bRHBZ[-\\s:]#([0-9]{4,7})\\b')\n match_engine.add_matcher('pag-screen', r'\\bpag-screen[-\\s:]*#([0-9]{1,5})\\b')\n match_engine.add_matcher('Mosh', r'\\bmosh[-\\s:]*#([0-9]{1,5})\\b')\n match_engine.add_matcher('Scripts FAQ', r'\\bscripts\\sfaq[-\\s:]*#([0-9]{1,5})\\b')\n match_engine.add_matcher('Scripts FAQ', r'\\bfaq[-\\s:]*#([0-9]{1,5})\\b', classes=['scripts'])\n match_engine.add_matcher('ESP', r'#([0-9]{2,5})\\b(?!-Ubuntu)', classes=['esp'])\n match_engine.add_matcher('ESP', r'\\besp[-\\s:]*#([0-9]{1,5})\\b')\n match_engine.add_matcher('Pokedex', r'\\bpokemon[-\\s:]*#([0-9]{1,3})\\b')\n match_engine.add_matcher('Pokedex', r'#([0-9]{1,3})\\b', classes=['lizdenys'])\n match_engine.add_matcher('MIT Class', r'class\\s([0-9a-z]{1,3}[.][0-9a-z]{1,4})\\b')\n match_engine.add_matcher('MIT Class', r\"what's\\s([0-9a-z]{1,3}[.][0-9a-z]{1,4})\\?\\b\")\n match_engine.add_matcher('MIT Class', r'([0-9a-z]{1,3}[.][0-9]{1,4})\\b', cond=lambda m: m.is_personal())\n match_engine.add_matcher('whats', r'whats ([0-9a-z,:;-]{2,10})\\b')\n match_engine.add_matcher('Bible', r'Bible\\(([\\w :-]+)\\)')\n match_engine.add_matcher('XKCD', r'\\bxkcd[-\\s:]#([0-9]{1,5})\\b')\n match_engine.add_matcher('Unicode', r'\\bu\\+([0-9a-fA-F]{2,6})\\b')\n match_engine.add_matcher('Unicode Character', r'\\bunicode\\((.)\\)')\n match_engine.add_matcher('Airport', r'\\b([0-9A-Z]{3,4}(?:[.](?:IATA|FAA))?)\\s[Aa]irport\\b', flags=0)\n match_engine.add_matcher('Assassin', r'\\b(combo)\\b', classes=['assassin'])\n match_engine.add_matcher('Assassin', r'\\b(combination)\\b', classes=['assassin'])\n match_engine.add_matcher('SCIENCE', r'^(science)$', classes=['axs'])\n match_engine.add_matcher('Debothena Test', r'\\bdebothena test[-\\s:]*#([0-9]{1,5})\\b')\n match_engine.add_matcher('Puzzle Editing', r'\\bpuzzle[ -]editing[-\\s:]*#([0-9]{1,5})\\b')\n\n match_engine.add_trac('Django', 'https://code.djangoproject.com', classes=[])\n match_engine.add_trac('Debathena', 'http://debathena.mit.edu/trac', classes=['debathena', 'jdreed', ])\n match_engine.add_trac('Linerva', 'http://debathena.mit.edu/trac', classes=['linerva', ])\n match_engine.add_trac('Scripts', 'http://scripts.mit.edu/trac', )\n match_engine.add_trac('XVM', 'http://xvm.scripts.mit.edu/trac', )\n match_engine.add_trac('Barnowl', 'http://barnowl.mit.edu', )\n match_engine.add_trac('Zephyr', 'http://zephyr.1ts.org', classes=['zephyr-dev'])\n match_engine.add_trac('SIPB', 'http://sipb.mit.edu/trac', )\n match_engine.add_trac('Remit', 'http://remit.scripts.mit.edu/trac', )\n match_engine.add_trac('etherpad.mit.edu', 'http://etherpad.scripts.mit.edu/trac', )\n match_engine.add_trac('ASA', 'http://asa.mit.edu/trac', )\n\ndef parse_args():\n usage = ('usage: %prog'\n + ' [--no-personals]'\n + ' [--protocol=zephyr|zulip]'\n + ' [--zulip-rc]'\n + ' [--default-classes]'\n + ' [--class=class ...]'\n )\n parser = OptionParser(usage=usage)\n parser.add_option('--no-personals', dest='no_personals',\n default=False, action='store_true',\n help='Disable replying to personals',\n )\n parser.add_option('-p', '--protocol', dest='protocol', default='zephyr', )\n parser.add_option('--zulip-rc', dest='zuliprc', default=None)\n parser.add_option('--default-classes', dest='default_classes',\n default=False, action='store_true',\n help='Sub to a default set of classes',\n )\n parser.add_option('-c', '--class', dest='classes',\n default=[], action='append',\n help='Sub to additional classes',\n )\n (options, args) = parser.parse_args()\n if len(args) != 0:\n parser.error(\"got %d arguments; expected none\" % (len(args), ))\n if options.protocol not in ('zephyr', 'zulip'):\n parser.error(\"the only supported protocols are zephyr and zulip; you requested %s\" % (options.protocol, ))\n if options.zuliprc and options.protocol != 'zulip':\n parser.error('Protocol must be \"zulip\" if --zulip-rc is provided.')\n if options.protocol != 'zephyr':\n if options.default_classes or options.classes:\n parser.error('Protocol must be \"zephyr\" if --default-classes or --class is provided.')\n return options, args\n\ndef run_with_args(match_engine):\n options, args = parse_args()\n\n match_engine.ignore_personals = options.no_personals\n if options.default_classes:\n add_default_classes(match_engine)\n if options.classes:\n match_engine.add_classes(options.classes)\n\n if options.protocol == 'zephyr':\n import chiron_zephyr as chiron_protocol\n elif options.protocol == 'zulip':\n import chiron_zulip as chiron_protocol\n else:\n raise ValueError\n chiron_protocol.main(match_engine, options)\n\nif __name__ == '__main__':\n match_engine = init_match_engine()\n run_with_args(match_engine)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"540576953","text":"# Filename : 1001S02E04_control_flow.py\n# author by : kidswonder\n\n#九九乘数表\nfor i in range(1,10):\n for j in range(1,i+1):\n print('{}*{}={}\\t'. format(j, i, i*j), end='')\n print()\n\n#九九乘数表去除偶数行\ni = 0\nwhile i < 9:\n j = 1\n i += 1\n while j <= i:\n if i % 2 != 0:\n if j < i:\n print(j, '*', i,'=',i*j, end='\\t')\n j += 1\n else:\n print(j, '*', i,'=',i*j, end='\\n')\n j += 1\n else: \n j += 1","sub_path":"exercises/1901050022/1001S02E04_control_flow.py","file_name":"1001S02E04_control_flow.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"121895073","text":"import pathlib\nimport torch.nn.functional as F\nimport numpy as np\nimport re\nimport math\nimport copy\nfrom torchtext import data\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch\nfrom load_trg import get_trg\npathlib.PosixPath = pathlib.WindowsPath\n\n\nclass Embedder(nn.Module):\n \"\"\"\n tầng embedder dùng để vector hoá các câu đầu vào input của tầng decoder\n \"\"\"\n\n def __init__(self, vocab_size, d_model):\n super().__init__()\n self.vocab_size = vocab_size\n self.d_model = d_model\n\n self.embed = nn.Embedding(vocab_size, d_model)\n\n def forward(self, x):\n return self.embed(x)\n\n\nclass PositionalEncoder(nn.Module):\n \"\"\"\n Lớp positional dùng để thêm vị trí của các từ trong câu.\n Nằm sau lớp emdeder\n \"\"\"\n\n def __init__(self, d_model, max_seq_length=200, dropout=0.1):\n super().__init__()\n\n self.d_model = d_model\n self.dropout = nn.Dropout(dropout)\n\n pe = torch.zeros(max_seq_length, d_model)\n\n for pos in range(max_seq_length):\n for i in range(0, d_model, 2):\n pe[pos, i] = math.sin(pos/(10000**(2*i/d_model)))\n pe[pos, i+1] = math.cos(pos/(10000**((2*i+1)/d_model)))\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n\n x = x*math.sqrt(self.d_model)\n seq_length = x.size(1)\n\n pe = Variable(self.pe[:, :seq_length], requires_grad=False)\n\n if x.is_cuda:\n pe.cuda()\n # cộng embedding vector với pe\n x = x + pe\n x = self.dropout(x)\n\n return x\n# Cơ chế attention trong model\n\n\ndef attention(q, k, v, mask=None, dropout=None):\n \"\"\"\n 1 lớp attention dùng để học các đặc tính có liên quan với nhau nằm ở sau\n lớp positional\n q: batch_size x head x seq_length x d_model\n k: batch_size x head x seq_length x d_model\n v: batch_size x head x seq_length x d_model\n mask: batch_size x 1 x 1 x seq_length\n output: batch_size x head x seq_length x d_model\n \"\"\"\n\n # attention score được tính bằng cách nhân q với k\n d_k = q.size(-1)\n scores = torch.matmul(q, k.transpose(-2, -1))/math.sqrt(d_k)\n\n if mask is not None:\n mask = mask.unsqueeze(1)\n scores = scores.masked_fill(mask == 0, -1e9)\n # chuẩn hóa bằng softmax\n scores = F.softmax(scores, dim=-1)\n\n if dropout is not None:\n scores = dropout(scores)\n\n output = torch.matmul(scores, v)\n return output, scores\n\n# attention(torch.rand(32, 8, 30, 512), torch.rand(32, 8, 30, 512), torch.rand(32, 8, 30, 512)).shape\n# PositionalEncoder(512)(torch.rand(5, 30, 512)).shape\n# tinh multiHeadAttention\n\n\nclass MultiHeadAttention(nn.Module):\n \"\"\"\n Mô hình có nhiều lớp ettention\n \"\"\"\n\n def __init__(self, heads, d_model, dropout=0.1):\n super().__init__()\n assert d_model % heads == 0\n\n self.d_model = d_model\n self.d_k = d_model//heads\n self.h = heads\n self.attn = None\n\n # tạo ra 3 ma trận trọng số là q_linear, k_linear, v_linear\n self.q_linear = nn.Linear(d_model, d_model)\n self.k_linear = nn.Linear(d_model, d_model)\n self.v_linear = nn.Linear(d_model, d_model)\n\n self.dropout = nn.Dropout(dropout)\n self.out = nn.Linear(d_model, d_model)\n\n def forward(self, q, k, v, mask=None):\n \"\"\"\n q: batch_size x seq_length x d_model\n k: batch_size x seq_length x d_model\n v: batch_size x seq_length x d_model\n mask: batch_size x 1 x seq_length\n output: batch_size x seq_length x d_model\n \"\"\"\n bs = q.size(0)\n # nhân ma trận trọng số q_linear, k_linear, v_linear với dữ liệu đầu vào q, k, v\n # ở bước encode các bạn lưu ý rằng q, k, v chỉ là một\n q = self.q_linear(q).view(bs, -1, self.h, self.d_k)\n k = self.k_linear(k).view(bs, -1, self.h, self.d_k)\n v = self.v_linear(v).view(bs, -1, self.h, self.d_k)\n\n q = q.transpose(1, 2)\n k = k.transpose(1, 2)\n v = v.transpose(1, 2)\n\n # tính attention score\n scores, self.attn = attention(q, k, v, mask, self.dropout)\n\n concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)\n\n output = self.out(concat)\n return output\n\n# MultiHeadAttention(8, 512)(torch.rand(32, 30, 512), torch.rand(32, 30, 512), torch.rand(32, 30, 512)).shape\n\n\nclass Norm(nn.Module):\n \"\"\"\n lớp norm lize chuẩn hoá dữ liệu \n \"\"\"\n\n def __init__(self, d_model, eps=1e-6):\n super().__init__()\n\n self.size = d_model\n\n # create two learnable parameters to calibrate normalisation\n self.alpha = nn.Parameter(torch.ones(self.size))\n self.bias = nn.Parameter(torch.zeros(self.size))\n\n self.eps = eps\n\n def forward(self, x):\n norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \\\n / (x.std(dim=-1, keepdim=True) + self.eps) + self.bias\n return norm\n\n\nclass FeedForward(nn.Module):\n \"\"\" Trong kiến trúc của chúng ta có tầng linear \n \"\"\"\n\n def __init__(self, d_model, d_ff=2048, dropout=0.1):\n super().__init__()\n\n # We set d_ff as a default to 2048\n self.linear_1 = nn.Linear(d_model, d_ff)\n self.dropout = nn.Dropout(dropout)\n self.linear_2 = nn.Linear(d_ff, d_model)\n\n def forward(self, x):\n x = self.dropout(F.relu(self.linear_1(x)))\n x = self.linear_2(x)\n return x\n\n\nclass EncoderLayer(nn.Module):\n \"\"\"\n 1 khối encoder gồm input và nhiều lớp attention giúp học được các đặc tính\n \"\"\"\n\n def __init__(self, d_model, heads, dropout=0.1):\n super().__init__()\n self.norm_1 = Norm(d_model)\n self.norm_2 = Norm(d_model)\n self.attn = MultiHeadAttention(heads, d_model, dropout=dropout)\n self.ff = FeedForward(d_model, dropout=dropout)\n self.dropout_1 = nn.Dropout(dropout)\n self.dropout_2 = nn.Dropout(dropout)\n\n def forward(self, x, mask):\n \"\"\"\n x: batch_size x seq_length x d_model\n mask: batch_size x 1 x seq_length\n output: batch_size x seq_length x d_model\n \"\"\"\n\n x2 = self.norm_1(x)\n # tính attention value, các bạn để ý q, k, v là giống nhau\n x = x + self.dropout_1(self.attn(x2, x2, x2, mask))\n x2 = self.norm_2(x)\n x = x + self.dropout_2(self.ff(x2))\n return x\n\n# EncoderLayer(512, 8)(torch.rand(32, 30, 512), torch.rand(32 , 1, 30)).shape\n# 1 khoi decoder\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"\n 1 khối decoder gồm output và các mutiattention giúp học các đặc tính của output\n \"\"\"\n\n def __init__(self, d_model, heads, dropout=0.1):\n super().__init__()\n self.norm_1 = Norm(d_model)\n self.norm_2 = Norm(d_model)\n self.norm_3 = Norm(d_model)\n\n self.dropout_1 = nn.Dropout(dropout)\n self.dropout_2 = nn.Dropout(dropout)\n self.dropout_3 = nn.Dropout(dropout)\n\n self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout)\n self.attn_2 = MultiHeadAttention(heads, d_model, dropout=dropout)\n self.ff = FeedForward(d_model, dropout=dropout)\n\n def forward(self, x, e_outputs, src_mask, trg_mask):\n \"\"\"\n x: batch_size x seq_length x d_model\n e_outputs: batch_size x seq_length x d_model\n src_mask: batch_size x 1 x seq_length\n trg_mask: batch_size x 1 x seq_length\n \"\"\"\n x2 = self.norm_1(x)\n # multihead attention thứ nhất, chú ý các từ ở target\n x = x + self.dropout_1(self.attn_1(x2, x2, x2, trg_mask))\n x2 = self.norm_2(x)\n\n # masked mulithead attention thứ 2. k, v là giá trị output của mô hình encoder\n x = x + self.dropout_2(self.attn_2(x2, e_outputs, e_outputs, src_mask))\n x2 = self.norm_3(x)\n x = x + self.dropout_3(self.ff(x2))\n return x\n\n# DecoderLayer(512, 8)(torch.rand(32, 30, 512), torch.rand(32, 30, 512), torch.rand(32, 1, 30), torch.rand(32, 1, 30)).shape\n\n\ndef get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n# encoder mo hinh transformer\n\n\nclass Encoder(nn.Module):\n \"\"\"Một encoder có nhiều encoder layer với input là các feature của video\n \"\"\"\n\n def __init__(self, d_model, N, heads, dropout):\n super().__init__()\n self.N = N\n self.pe = PositionalEncoder(d_model, dropout=dropout)\n self.layers = get_clones(EncoderLayer(d_model, heads, dropout), N)\n self.norm = Norm(d_model)\n\n def forward(self, src, mask):\n \"\"\"\n src: batch_size x seq_length\n mask: batch_size x 1 x seq_length\n output: batch_size x seq_length x d_model\n \"\"\"\n # x = self.pe(src)\n x = src\n for i in range(self.N):\n x = self.layers[i](x, mask)\n return self.norm(x)\n# decoder mo hinhh transformer\n\n\nclass Decoder(nn.Module):\n \"\"\"Một decoder có nhiều decoder layer với input là các câu caption qua 1 tầng decoder\n rồi concat với output của encoder làm input của tấng decoder tiếp theo\n \"\"\"\n\n def __init__(self, vocab_size, d_model, N, heads, dropout):\n super().__init__()\n self.N = N\n self.embed = Embedder(vocab_size, d_model)\n self.pe = PositionalEncoder(d_model, dropout=dropout)\n self.layers = get_clones(DecoderLayer(d_model, heads, dropout), N)\n self.norm = Norm(d_model)\n\n def forward(self, trg, e_outputs, src_mask, trg_mask):\n \"\"\"\n trg: batch_size x seq_length\n e_outputs: batch_size x seq_length x d_model\n src_mask: batch_size x 1 x seq_length\n trg_mask: batch_size x 1 x seq_length\n output: batch_size x seq_length x d_model\n \"\"\"\n x = self.embed(trg)\n x = self.pe(x)\n for i in range(self.N):\n x = self.layers[i](x, e_outputs, src_mask, trg_mask)\n return self.norm(x)\n\n# Decoder(232, 512, 6, 8, 0.1)(torch.LongTensor(32, 30).random_(0, 10), torch.rand(32, 30, 512), torch.rand(32, 1, 30), torch.rand(32, 1, 30)).shape\n# mô hình transformer\n\n\nclass Transformer(nn.Module):\n \"\"\" Cuối cùng ghép chúng lại với nhau để được mô hình transformer hoàn chỉnh\n gồm 2 lớp encoder và decoder\n \"\"\"\n\n def __init__(self, trg_vocab, d_model, N, heads, dropout):\n super().__init__()\n self.linear = nn.Linear(2048, d_model)\n self.encoder = Encoder(d_model, N, heads, dropout)\n self.decoder = Decoder(trg_vocab, d_model, N, heads, dropout)\n self.out = nn.Linear(d_model, trg_vocab)\n\n def forward(self, src, trg, src_mask, trg_mask):\n \"\"\"\n src: batch_size x seq_length\n trg: batch_size x seq_length\n src_mask: batch_size x 1 x seq_length\n trg_mask batch_size x 1 x seq_length\n output: batch_size x seq_length x vocab_size\n \"\"\"\n\n ln = self.linear(src)\n e_outputs = self.encoder(ln, src_mask)\n\n d_output = self.decoder(trg, e_outputs, src_mask, trg_mask)\n output = self.out(d_output)\n return output\n\n\ndef nopeak_mask(size, device):\n \"\"\"Tạo mask được sử dụng trong decoder để lúc dự đoán trong quá trình huấn luyện\n mô hình không nhìn thấy được các từ ở tương lai\n \"\"\"\n np_mask = np.triu(np.ones((1, size, size)),\n k=1).astype('uint8')\n np_mask = Variable(torch.from_numpy(np_mask) == 0)\n np_mask = np_mask.to(device)\n\n return np_mask\n\n\ndef create_masks(src, trg, trg_pad, device):\n \"\"\" Tạo mask cho encoder, \n để mô hình không bỏ qua thông tin của các kí tự PAD do chúng ta thêm vào \n \"\"\"\n src_mask = (torch.ones(src.shape[0], 1, src.shape[1]) == 1).cuda()\n if trg is not None:\n trg_mask = (trg != trg_pad).unsqueeze(-2)\n size = trg.size(1) # get seq_len for matrix\n np_mask = nopeak_mask(size, device)\n if trg.is_cuda:\n np_mask.cuda()\n trg_mask = trg_mask & np_mask\n\n else:\n trg_mask = None\n return src_mask, trg_mask\n# Transformer(232, 232, 512, 6, 8, 0.1)(torch.LongTensor(32, 30).random_(0, 10), torch.LongTensor(32, 30).random_(0, 10),torch.rand(32, 1, 30),torch.rand(32, 1, 30)).shape\n\n\ndef init_vars(src, model, TRG, device, k, max_len):\n \"\"\" Tính toán các ma trận cần thiết trong quá trình translation sau khi mô hình học xong\n \"\"\"\n init_tok = TRG.vocab.stoi['']\n\n # tính sẵn output của encoder\n src = src.unsqueeze(0)\n ln = model.linear(src)\n\n src_mask = (torch.ones(ln.shape[0], 1, ln.shape[1]) == 1).cuda()\n\n e_output = model.encoder(ln, src_mask)\n\n outputs = torch.LongTensor([[init_tok]])\n\n outputs = outputs.to(device)\n\n trg_mask = nopeak_mask(1, device)\n # dự đoán kí tự đầu tiên\n out = model.out(model.decoder(outputs,\n e_output, src_mask, trg_mask))\n out = F.softmax(out, dim=-1)\n\n probs, ix = out[:, -1].data.topk(k)\n log_scores = torch.Tensor([math.log(prob)\n for prob in probs.data[0]]).unsqueeze(0)\n\n outputs = torch.zeros(k, max_len).long()\n outputs = outputs.to(device)\n outputs[:, 0] = init_tok\n outputs[:, 1] = ix[0]\n\n e_outputs = torch.zeros(k, e_output.size(-2), e_output.size(-1))\n\n e_outputs = e_outputs.to(device)\n e_outputs[:, :] = e_output[0]\n\n return outputs, e_outputs, log_scores\n\n\ndef k_best_outputs(outputs, out, log_scores, i, k):\n\n probs, ix = out[:, -1].data.topk(k)\n log_probs = torch.Tensor(\n [math.log(p) for p in probs.data.view(-1)]).view(k, -1) + log_scores.transpose(0, 1)\n k_probs, k_ix = log_probs.view(-1).topk(k)\n\n row = k_ix // k\n col = k_ix % k\n\n outputs[:, :i] = outputs[row, :i]\n outputs[:, i] = ix[row, col]\n\n log_scores = k_probs.unsqueeze(0)\n\n return outputs, log_scores\n\n\ndef beam_search(src, model, TRG, device, k, max_len):\n\n outputs, e_outputs, log_scores = init_vars(\n src, model, TRG, device, k, max_len)\n eos_tok = TRG.vocab.stoi['']\n\n src_mask = (torch.ones(1, 1, src.shape[0]) == 1).cuda()\n\n ind = None\n for i in range(2, max_len):\n\n trg_mask = nopeak_mask(i, device)\n\n out = model.out(model.decoder(outputs[:, :i],\n e_outputs, src_mask, trg_mask))\n\n out = F.softmax(out, dim=-1)\n\n outputs, log_scores = k_best_outputs(outputs, out, log_scores, i, k)\n\n # Occurrences of end symbols for all input sentences.\n ones = (outputs == eos_tok).nonzero()\n sentence_lengths = torch.zeros(len(outputs), dtype=torch.long).cuda()\n for vec in ones:\n i = vec[0]\n if sentence_lengths[i] == 0: # First end symbol has not been found yet\n sentence_lengths[i] = vec[1] # Position of first end symbol\n\n num_finished_sentences = len([s for s in sentence_lengths if s > 0])\n\n if num_finished_sentences == k:\n alpha = 0.7\n div = 1/(sentence_lengths.type_as(log_scores)**alpha)\n _, ind = torch.max(log_scores * div, 1)\n ind = ind.data[0]\n break\n\n if ind is None:\n\n length = (outputs[0] == eos_tok).nonzero()[0] if len(\n (outputs[0] == eos_tok).nonzero()) > 0 else -1\n return ' '.join([TRG.vocab.itos[tok] for tok in outputs[0][1:length]])\n\n else:\n length = (outputs[ind] == eos_tok).nonzero()[0]\n return ' '.join([TRG.vocab.itos[tok] for tok in outputs[ind][1:length]])\n\n\ndef multiple_replace(dict, text):\n # Create a regular expression from the dictionary keys\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, dict.keys())))\n\n # For each match, look-up corresponding value in dictionary\n return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)\n\n\ndef translate_sentence(fea, model, TRG, device, k, max_len):\n \"\"\"Dịch một câu sử dụng beamsearch\n \"\"\"\n model.eval()\n\n sentence = torch.tensor(fea).to(device)\n\n sentence = beam_search(sentence, model, TRG, device, k, max_len)\n\n return multiple_replace({' ?': '?', ' !': '!', ' .': '.', '\\' ': '\\'', ' ,': ','}, sentence)\n\n\nclass tokenize(object):\n\n def __init__(self, lang):\n self.nlp = spacy.load(lang)\n\n def tokenizer(self, sentence):\n sentence = re.sub(\n r\"[\\*\\\"“”\\n\\\\…\\+\\-\\/\\=\\(\\)‘•:\\[\\]\\|’\\!;]\", \" \", str(sentence))\n sentence = re.sub(r\"[ ]+\", \" \", sentence)\n sentence = re.sub(r\"\\!+\", \"!\", sentence)\n sentence = re.sub(r\"\\,+\", \",\", sentence)\n sentence = re.sub(r\"\\?+\", \"?\", sentence)\n sentence = sentence.lower()\n return [tok.text for tok in self.nlp.tokenizer(sentence) if tok.text != \" \"]\n\n\ndef predict_cap(fear):\n opt = {\n 'trg_lang': 'en',\n 'max_strlen': 160,\n 'batchsize': 1500,\n 'device': 'cuda',\n 'd_model': 512,\n 'n_layers': 6,\n 'heads': 8,\n 'dropout': 0.1,\n 'lr': 0.0001,\n 'epochs': 2,\n 'printevery': 100,\n 'k': 5,\n }\n TRG = get_trg()\n device = torch.device(\"cuda\")\n model_tran = Transformer(\n len(TRG.vocab), opt['d_model'], opt['n_layers'], opt['heads'], opt['dropout'])\n state = torch.load(\n 'model_test.pth')\n model_tran.load_state_dict(state['state_dict'])\n model_tran.to(device)\n trans_sent = translate_sentence(\n fear, model_tran, TRG, device, opt['k'], opt['max_strlen'])\n return trans_sent\n\n\n# if __name__ == '__main__':\n# fear = np.load('testv1RN40.npy')\n# cap = predict_cap(fear)\n# print(cap)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"532515092","text":"from .myqt import QT\nimport pyqtgraph as pg\nfrom .base import WidgetBase\nimport numpy as np\n\nfrom example.cbnu.utils import get_trigger_times, get_spiketrains, get_interval\n\n\nclass MyViewBox(pg.ViewBox):\n doubleclicked = QT.pyqtSignal()\n\n # noinspection PyPep8Naming\n def mouseDoubleClickEvent(self, ev):\n self.doubleclicked.emit()\n ev.accept()\n\n\nclass PSTH(WidgetBase):\n\n _params = [\n {'name': 'bin_method', 'type': 'list', 'value': 'auto', 'values':\n ['manual', 'auto', 'fd', 'doane', 'scott', 'stone',\n 'rice', 'sturges', 'sqrt']},\n {'name': 'num_bins', 'type': 'int', 'value': 100, 'step': 10},\n {'name': 'pre', 'type': 'float', 'value': 1, 'step': 0.5,\n 'suffix': 's', 'siPrefix': True},\n {'name': 'post', 'type': 'float', 'value': 1, 'step': 0.5,\n 'suffix': 's', 'siPrefix': True}]\n\n def __init__(self, controller=None, parent=None):\n WidgetBase.__init__(self, parent, controller)\n\n self.catalogueconstructor = controller.cc\n self.canvas = pg.GraphicsLayoutWidget()\n self.layout = QT.QVBoxLayout()\n self.setLayout(self.layout)\n self.layout.addWidget(self.canvas)\n\n self.trigger_times = get_trigger_times(\n self.catalogueconstructor.cbnu.filepath,\n self.catalogueconstructor.cbnu.trigger_filename)\n self.initialize_plot()\n\n self.tree_params.setWindowTitle(\"PSTH settings\")\n self.params.param('num_bins').setLimits((1, 1e6))\n self.params.param('pre').setLimits((0, 1e3))\n self.params.param('post').setLimits((0, 1e3))\n self.params.param('bin_method').sigTreeStateChanged.connect(\n self.on_method_change)\n self.on_method_change()\n\n def initialize_plot(self):\n\n # Return if there are no triggers.\n if len(self.trigger_times) == 0:\n return\n\n us_per_tick = int(1e6 / self.catalogueconstructor.dataio.sample_rate)\n\n start = int(self.catalogueconstructor.cbnu.config['start_time'] * 1e6)\n spiketrains = get_spiketrains(self.catalogueconstructor, us_per_tick,\n start)\n\n pre = int(self.params['pre'] * 1e6)\n post = int(self.params['post'] * 1e6)\n bin_method = self.params['bin_method']\n num_bins = self.params['num_bins'] if bin_method == 'manual' \\\n else bin_method\n\n histograms = {}\n ylim = 0\n for cluster_label, cluster_trains in spiketrains.items():\n spike_times_section = get_interval(cluster_trains,\n self.trigger_times[0] - pre,\n self.trigger_times[-1] + post)\n\n spike_times_zerocentered = []\n\n for trigger_time in self.trigger_times:\n t_pre = trigger_time - pre\n t_post = trigger_time + post\n\n x = get_interval(spike_times_section, t_pre, t_post)\n if len(x):\n x -= trigger_time\n spike_times_zerocentered += list(x)\n\n cluster_counts, bin_edges = np.histogram(spike_times_zerocentered,\n num_bins)\n histograms[cluster_label] = (bin_edges / 1e6, cluster_counts)\n # Update common plot range for y axis.\n max_count = np.max(cluster_counts)\n if max_count > ylim:\n ylim = max_count\n\n n = 2\n if len(histograms) > n * n:\n print(\"WARNING: Only {} out of {} available PSTH plots can be \"\n \"shown.\".format(n * n, len(histograms)))\n\n viewboxes = []\n for i in range(n):\n for j in range(n):\n if len(histograms) == 0:\n return\n viewboxes.append(MyViewBox())\n plt = self.canvas.addPlot(row=i, col=j, viewBox=viewboxes[-1])\n cluster_label, (x, y) = histograms.popitem()\n color = self.controller.qcolors.get(cluster_label,\n QT.QColor('white'))\n plt.plot(x, y, stepMode=True, fillLevel=0, brush=color)\n txt = pg.TextItem(str(cluster_label), color)\n txt.setPos(0, ylim)\n plt.addItem(txt)\n plt.setYRange(0, ylim)\n plt.setXRange(-pre / 1e6, post / 1e6)\n\n viewboxes[-1].doubleclicked.connect(self.open_settings)\n\n def on_method_change(self):\n if self.params['bin_method'] == 'manual':\n self.params.param('num_bins').show()\n else:\n self.params.param('num_bins').hide()\n self.refresh()\n\n def refresh(self):\n self.canvas.clear()\n self.initialize_plot()\n","sub_path":"tridesclous/gui/psth.py","file_name":"psth.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"333763048","text":"import FWCore.ParameterSet.Config as cms\n\nfrom RecoEcal.EgammaClusterProducers.ecalRecHitFlags_cfi import *\nfrom RecoEcal.EgammaClusterProducers.ecalSeverityLevelAlgos_cfi import *\nfrom RecoEcal.EgammaClusterProducers.ecalSeverityLevelFlags_cfi import *\n\ninterestingEleIsoDetId = cms.EDProducer(\"EleIsoDetIdCollectionProducer\",\n recHitsLabel = cms.InputTag(\"ecalRecHit\",\"EcalRecHitsEB\"),\n emObjectLabel = cms.InputTag(\"gsfElectrons\"),\n etCandCut = cms.double(0.0),\n energyCut = cms.double(0.040),\n etCut = cms.double(0),\n outerRadius = cms.double(0.6),\n innerRadius = cms.double(0.0),\n interestingDetIdCollection = cms.string(''),\n\n severityLevelCut = cms.int32(4),\n severityRecHitThreshold = cms.double(5.0),\n spikeIdString = cms.string('kSwissCrossBordersIncluded'),\n spikeIdThreshold = cms.double(0.95),\n\n recHitFlagsToBeExcluded = cms.vint32(\n ecalRecHitFlag_kFaultyHardware,\n ecalRecHitFlag_kPoorCalib,\n# ecalRecHitFlag_kSaturated,\n# ecalRecHitFlag_kLeadingEdgeRecovered,\n# ecalRecHitFlag_kNeighboursRecovered,\n ecalRecHitFlag_kTowerRecovered,\n ecalRecHitFlag_kDead\n ),\n)\n","sub_path":"RecoEgamma/EgammaIsolationAlgos/python/interestingEleIsoDetIdModule_cff.py","file_name":"interestingEleIsoDetIdModule_cff.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"314431552","text":"import re\n\nQList = [\"谁\", \"何\", \"怎\", \"哪\", \"咋\", \"啥\", \"?\", \"?\",\n \"什么\", \"几个\", \"几只\", \"几条\", \"多少\",\n \"请问\", \"有没有\", \"是不是\", \"会不会\", \"请教\"]\n\n\ndef rule_base(seq):\n \"\"\"\n 基于规则的问题检测方法\n :param seq: 待检测的语句\n :return: 表征是否为问题的boolean值\n \"\"\"\n for line in seq.split(\"\\n\"):\n if re.match(r'.*(呢|吗|?|\\?)$', line):\n return True\n for item in QList:\n if line.find(item) != -1:\n return True\n return False\n\n\nif __name__=='__main__':\n print(\"Rule Base Module\")","sub_path":"detectqa/detect_q.py","file_name":"detect_q.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"615931954","text":"from aktos_dcs import *\n\ngpio_list = [i for i in range(2, 27)]\n\ninput_pins = {}\nfor i in gpio_list:\n input_pins['gpio.%d' % i] = i\n\nfor k, v in input_pins.items():\n GPIOInputActor(pin_name=k, pin_number=v, invert=True)\n\nwait_all()\n\n","sub_path":"examples/rpi-test-input.py","file_name":"rpi-test-input.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"562874138","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 20 21:15:04 2020\r\n\r\n@author: SOPHIE\r\n\"\"\"\r\n#导入数据\r\nimport pandas as pd\r\ntrain = pd.read_csv('train.csv')\r\n#print (train.head())\r\n\r\n#转换时间格式\r\ntrain['Datetime'] = pd.to_datetime(train.Datetime, format='%d-%m-%Y %H:%M')\r\ntrain.index = train.Datetime\r\ntrain.drop(['ID','Datetime'],axis=1,inplace=True)\r\n#print(train.head())\r\n\r\n#按照天求采样和\r\ndaily_train = train.resample('D').sum()\r\ndaily_train['ds'] = daily_train.index\r\ndaily_train['y'] = daily_train.Count\r\ndaily_train.drop(['Count'],axis=1,inplace=True)\r\nprint(daily_train.head())\r\n\r\n#预测半年数据\r\nfrom fbprophet import Prophet\r\nm = Prophet(yearly_seasonality=True, seasonality_prior_scale=0.1)\r\nm.fit(daily_train)\r\nfurture = m.make_future_dataframe(periods=213)\r\nforecast = m.predict(furture)\r\n#print(forecast)\r\nm.plot(forecast)\r\nm.plot_components(forecast)","sub_path":"Week6.py","file_name":"Week6.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5455048","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 4 14:45:48 2017\n@author: mz\nto-do:\n \n staircase implementation\n\"\"\"\n\n#imports\nfrom psychopy import visual,core,gui,event\nfrom datetime import datetime\nimport itertools, csv, traceback\nfrom random import shuffle\nimport numpy as np\nfrom copy import copy\nfrom math import sqrt\n\n\nclass Stimuli:\n\n def __init__(self, win, timing):\n self.win = win\n self.timing = timing\n\n self.ready = visual.TextStim(win,'Ready?', color=(1.0,1.0,1.0),units='norm', height=0.08, pos=(0,0),wrapWidth=1)\n self.sure = visual.TextStim(win,'Are you sure? Press Escape to exit, press Enter to resume experiment.',\n color=(1.0,1.0,1.0),units='norm', height=0.07, pos=(0,0),wrapWidth=2)\n \n self.fixation = visual.TextStim(self.win, text='+',\n alignHoriz='center',\n alignVert='center', units='norm',\n pos=(0, 0), height=0.1,\n color=[255, 255, 255], colorSpace='rgb255',\n wrapWidth=3)\n self.probe = visual.TextStim(self.win,text='z: anticlockwise m: clockwise',\n font='Helvetica', alignHoriz='center', alignVert='center',\n units='norm',pos=(0, 0.8), height=0.08,\n color=[255, 255, 255], colorSpace='rgb255',wrapWidth=4)\n self.probe_vs = visual.TextStim(self.win,text='z: target present m: target not present',\n font='Helvetica', alignHoriz='center', alignVert='center',\n units='norm',pos=(0, 0.8), height=0.08,\n color=[255, 255, 255], colorSpace='rgb255',wrapWidth=4)\n self.recall_keymap = {'z': 'anticlockwise', 'm': 'clockwise'}\n self.vs_keymap = {'z': 'yes', 'm': 'no'}\n \n def set_ori(self,obj, angle):#depricated\n an = angle*np.pi/180\n vertice = obj.vertices\n x = vertice[1][0]\n beta = an-np.arctan((.2-x)/.4)\n vertice = [(.2-.2*np.cos(an), .2*np.sin(an)), (.2-np.cos(an)*(.2-x), np.sin(an)*(.2-x)), (.2+sqrt(.4**2+(.2-x)**2)*np.sin(beta), sqrt(.4**2+(.2-x)**2)*np.cos(beta)), (.2-np.cos(an)*(.2-x), np.sin(an)*(.2-x)),(.2+.2*np.cos(an),-.2*np.sin(an))]\n obj.setVertices(vertice)\n \n \n def get_input(self, max_wait=3.0, keylist=None):\n\n key = event.waitKeys(maxWait=max_wait, keyList=keylist,timeStamped=True)\n if key is not None:\n key = key[0][0]\n time = core.getTime()\n\n return (key, time)\n\n def draw_fixation(self):\n \n self.fixation.draw()\n self.win.flip()\n core.wait(self.timing['fixation'])\n self.win.flip()\n\n# c mean N/T similarity 0< c <0.2\n# d1# D1/T similarity 0< d1 =0:\n answer = 'clockwise'\n else:\n answer = 'anticlockwise'\n target_probe.draw()\n self.probe.draw()\n start_time = self.win.flip()\n key, resp_time = self.get_input(max_wait=self.timing['recall'],\n keylist=self.recall_keymap.keys() + ['escape'])\n self.win.flip()\n if key is None:\n return ('timeout', answer, resp_time-start_time)\n elif key == 'escape':\n self.sure.draw()\n self.win.flip()\n k, r =self.get_input(max_wait=float('inf'),keylist=['escape','return'])\n if k=='escape':\n print('quiting experiment')\n raise Exception('quiting')\n else:\n pass\n else:\n return (self.recall_keymap[key], answer, resp_time-start_time)#return response, correct answer &RT\n\n def text_and_stim_keypress(self, text, stim=None,pos=(0,-0.8), max_wait=float('inf')):\n if stim is not None:\n if type(stim) == list:\n map(lambda x: x.draw(), stim)\n else:\n stim.draw()\n display_text = visual.TextStim(self.win, text=text,\n font='Helvetica', alignHoriz='center',\n alignVert='center', units='norm',\n pos=pos, height=0.08,\n color=[255, 255, 255], colorSpace='rgb255',\n wrapWidth=3)\n display_text.draw()\n self.win.flip()\n key = event.waitKeys(maxWait=max_wait)\n if key is not None:\n if key[0] == 'escape':\n print('quiting experiment')\n raise Exception('quiting')\n self.win.flip()\n\n def text(self,text,image=None, max_wait=3.0):\n display_text = visual.TextStim(self.win, text=text,\n font='Helvetica', alignHoriz='center',\n alignVert='center', units='norm',\n pos=(0,0), height=0.1,\n color=[255, 255, 255], colorSpace='rgb255',\n wrapWidth=3)\n \n if image is not None:\n display_image = visual.ImageStim(win,image='cryingface.jpg',pos=(0.22,0), size=0.08,units='norm')\n display_image.draw()\n display_text.draw()\n self.win.flip()\n key = event.waitKeys(maxWait=max_wait)\n if key is not None:\n if key[0] == 'escape':\n print('quiting experiment')\n raise Exception('quiting')\n self.win.flip()\n\n\ndef blockbreak(win, num, total):#create a break in between trials and present progress message\n msg1 = visual.TextStim(win,'Well done!',color=(1.0,1.0,1.0),units='height', height=0.07, pos=(0,0.1),wrapWidth=1)\n msg2 = visual.TextStim(win,str(num)+'/%d block completed'%(total),color=(1.0,1.0,1.0),units='norm', height=0.07, pos=(0,0),wrapWidth=2)\n msg3 = visual.TextStim(win,'Press Return to continue',color=(1.0,1.0,1.0),units='norm', height=0.07, pos=(0,-0.1),wrapWidth=2)\n msg1.draw()\n msg2.draw()\n msg3.draw()\n win.flip()\n event.waitKeys(maxWait=float('inf'), keyList=['return'],timeStamped=False)\n win.flip()\n\ndef get_window(width):\n win = visual.Window([width,width],\n winType='pyglet', monitor=\"testMonitor\",fullscr=True, colorSpace='rgb',color=(0,0,0),units='height')\n event.Mouse(visible=False)\n return win\n\ndef autoDraw_on(stim):\n stim.autoDraw = True\n return stim\n\ndef autoDraw_off(stim):\n stim.autoDraw = False\n return stim\n\n#to be used after trialGen, not use it after trialGen_ori\ndef stimulirule(parameter, yrule):#parameters format: x1 y1 x2 y2, yrule to control orientation to be <90 degree\n triallist=[]; P = []\n for i in range(parameter.shape[0]):\n if abs(parameter[i,1]) <= yrule and abs(parameter[i,3])<=yrule and parameter[i,0]!=parameter[i,2]:\n triallist.append(parameter[i,:])\n P.extend([abs(parameter[i,0]-parameter[i,2])+abs(parameter[i,1]-parameter[i,3])])\n P = [round(x,2) for x in P]\n P,indice = np.unique(P,return_index=True)\n triallist = np.array(triallist)[indice]\n return P, np.array(triallist)\n\n#ideally this function takes 2 similarity index directly \n#and generate eligible trials.\n# add rules:\n #x1 x2 should not be same\n #y1 y2 in range (0, 0.5)\n\"\"\"\ndepricated in this version of script, using trialGen_ori instead\n\"\"\"\ndef trialGen(c,p): #c is NT disimilarity, p is NN disimilarity #02*c or p>(4-2*c) or p>2*c-1 or c>2:\n warnings.warn('c or p is not in acceptable range')\n return [0]\n else:\n d1= c-p/2; d2 = c+p/2\n #d1= c+p/2; d2 = c-p/2\n if d1<=1: \n x1 = np.arange(0.5,d1,0.05)#make sure x1(bar position) is far enough from target\n \n else: \n x1 = np.arange(0.5,1,0.05)\n y1 = d1-x1\n \n if d2<=1: \n x2_1 = np.arange(0.5,d2,0.05)\n x2_2 = -1 * x2_1\n else: \n x2_1 = np.arange(0.5,1,0.05) #round(random.uniform(0.5,1),2) * 1\n x2_2 = -1 * x2_1\n y2_1 = d2 - (x2_1)\n y2_2 = -1 * y2_1\n for x,y in zip(x1,y1):\n for i in range(len(x2_1)):\n temp = [abs(x-x2_1[i])+abs(y-y2_1[i]),abs(x-x2_1[i])+abs(y-y2_2[i]),abs(x-x2_2[i])+abs(y-y2_1[i]),abs(x-x2_2[i])+abs(y-y2_2[i])]\n P.extend(temp)\n triallist.append([x,y,x2_1[i],y2_1[i]])\n triallist.append([x,y,x2_1[i],y2_2[i]])\n triallist.append([x,y,x2_2[i],y2_1[i]])\n triallist.append([x,y,x2_2[i],y2_2[i]])\n P = [round(x,2) for x in P]\n P,indice = np.unique(P,return_index=True)\n #P.sort()\n triallist = np.array(triallist)\n # triallist = triallist[indice]\n return P, indice, triallist\n \n\n#c is constant, c=|y1|+|y2|\n#randomize bar position, one orientation difference value can have multiple composition\ndef trialGen_ori(c,limit, step): #randomize bar position #0 max(d2)\n for y in d2:\n y1 = np.array([(-1*c-y)/2,(c-y)/2, round(np.random.uniform(-0.4,0.4),2),round(np.random.uniform(-0.4,0.4),2)]) #first 2 elements allows the same c, 3rd one doesn't\n y2 = y1+y\n x1 = np.round(np.random.uniform(0.5,0.7,4),1) #bar position is jittered but always >0.5\n x2 = np.round(np.random.uniform(0.5,0.7,4),1) * np.array([-1,-1,-1,-1])\n for each in zip(x1,y1,x2,y2):\n triallist.append(each)\n p.append(abs(each[1]-each[3]))\n C.append(abs(each[1])+abs(each[3]))\n triallist = np.array(triallist)\n return np.round(C,1), np.round(p,2), triallist\n\n\ndef run_vs(win, fi=None,setSize=3):\n# (expname, sid, numblocks, speed, mark_mode, input_mode) = get_settings()\n win.flip()\n timing = {'fixation': 0.8, #set timing\n 'search': float('inf'),\n 'blank': 2,\n 'recall': 4 ,\n 'intertrial': 1.0}\n\n stim = Stimuli(win, timing)\n\n stim.text_and_stim_keypress('Welcome to the attention and working memory study',pos=(0,0.7),\n stim=stim.ready)\n stim.text_and_stim_keypress('This is a visual search task',pos=(0,0.7),\n stim=stim.fixation)\n\n# a,b,c = trialGen(1,0.1)\n# p, parameters = stimulirule(c,0.45) #0.5 = +/- 90 degree\n c, p, parameters = trialGen_ori(0.5, 0.45, 0.05) #c is mean NT disimilarity(only y), p is NN(only y), parameters has x1,y1,x2,y2\n triallist=[]\n for i in range(len(parameters)):\n trial = {}\n trial['x1'] =parameters[i][0]\n trial['y1'] = parameters[i][1]\n trial['x2'] = parameters[i][2]\n trial['y2'] = parameters[i][3]\n trial['p'] = p[i]\n trial['c'] = c[i]\n triallist.append(trial)\n trial_list1 = []; trial_list2=[]\n for i,trial in enumerate(triallist): #half trials have target, half do not\n trial['target']=1\n trial_list1.append(copy(trial))\n for i,trial in enumerate(triallist): #half trials have target, half do not\n trial['target']=0\n trial_list2.append(copy(trial))\n \n trial_list=trial_list1*6+ trial_list2*3 #inclement trial numbers\n shuffle(trial_list)\n print (len(trial_list))\n # run trials\n for i, trial in enumerate(trial_list):\n try:\n resp, answer, rt = stim.search_array(trial,condition='vs',target= trial['target'],setSize=setSize)\n corr = (resp == answer)\n if not corr:\n if resp == 'timeout':\n stim.text('Timeout',max_wait=0.6)\n else:\n stim.text('Incorrect',image=1, max_wait=0.6)\n # condition', 'answer', 'response', 'RT', 'N/T similarity','N/N similarity','orientation'\n \n if fi is not None:\n fi.writerow(['%s, %s, %s, %d, %.3f, %.2f, %.2f, %.2f, %.2f, %.2f, %.2f, %d'%('vs', answer, resp, int(corr), rt*1000, \n trial['c'], trial['p'], trial['x1'],trial['y1'],trial['x2'],trial['y2'],0)])\n if i!=0 and i%(int(len(trial_list)/4))==0:\n blockbreak(win, i/int((len(trial_list)/4)), 4)\n core.wait(timing['intertrial'])\n except Exception as err:\n if err =='quiting':\n raise Exception('quiting')\n else:\n traceback.print_exc()\n raise Exception(err)\n \n \n stim.text_and_stim_keypress('Congratulations! You have finished.',\n max_wait=2.0)\n\n\ndef run_memory(win,fi, setSize=3):\n \n win.flip()\n timing = {'fixation': 0.8,\n 'search': 6, #float('inf'),\n 'blank': 2,\n 'recall': 6 ,\n 'intertrial': 1.0}\n##\n \n\n stim = Stimuli(win, timing)\n\n stim.text_and_stim_keypress('Welcome to the attention and working memory study',pos=(0,0.7),\n stim=stim.ready)\n stim.text_and_stim_keypress('This is a memory task',pos=(0,0.7),\n stim=stim.fixation)\n\n # construct trials\n orientation = [-50,-25,-5,5,25,50] #staircase #as used in Bayes(2008)\n triallist=[]\n for i in range(len(orientation)):\n trial = {}\n trial['x1'] =0.5\n trial['y1'] = 0\n trial['x2'] = -0.5\n trial['y2'] = 0\n trial['p'] = 1\n trial['c'] = 1\n trial['ori'] = orientation[i]\n triallist.append(trial)\n\n triallist=triallist*8 #inclement trial numbers\n\n shuffle(triallist)\n print (len(triallist))\n # run trials\n for i, trial in enumerate(triallist):\n try:\n target = stim.search_array(trial,condition='memory',setSize=setSize)\n resp, answer, rt = stim.recall(target=target, orientation=trial['ori'])\n corr = (resp == answer)\n if not corr:\n if resp == 'timeout':\n stim.text('Timeout',max_wait=0.6)\n else:\n stim.text('Incorrect',max_wait=0.6) \n if fi is not None:\n fi.writerow(['%s, %s, %s, %d,%.3f, %.2f, %.2f, %.2f, %.2f, %.2f, %.2f, %d'%('vs', answer, resp, int(corr), rt*1000, \n trial['c'], trial['p'], trial['x1'],trial['y1'],trial['x2'],trial['y2'],trial['ori'])])\n if i!=0 and i%(int(len(triallist)/2))==0:\n blockbreak(win, i/int((len(triallist)/2)), 2)\n core.wait(timing['intertrial'])\n except Exception as err:\n if err =='quiting':\n raise Exception('quiting')\n else:\n traceback.print_exc()\n raise Exception(err)\n \n \n stim.text_and_stim_keypress('Congratulations! You have finished.',\n max_wait=2.0)\n\n\ndef get_settings():\n data={}\n data['expname']='Attention_WM'\n data['expdate']=datetime.now().strftime('%Y%m%d_%H%M')\n data['PID']=''\n data['condition']=['vs','memory']\n data['create file'] = False\n dlg=gui.DlgFromDict(data,title='Exp Info',fixed=['expname','expdate'],order=['expname','expdate','PID','condition','create file'])\n\n if not dlg.OK:\n core.quit()\n if data['create file']==True:\n outName='P%s_%s_%s.csv'%(data['PID'],data['condition'],data['expdate'])\n outFile = open(outName, 'wb')\n outWr = csv.writer(outFile, delimiter=';', lineterminator='\\n', quotechar=' ', quoting=csv.QUOTE_MINIMAL) # a .csv file with that name. Could be improved, but gives us some control\n outWr.writerow(['%s, %s, %s, %s, %s, %s,%s, %s, %s, %s,%s, %s'%(\n 'condition', 'answer', 'response', 'correct', 'RT', \n 'N/T disimilarity','N/N disimilarity','x1','y1','x2','y2','orientation')]) # write out header\n return outWr, outFile, data['condition']\n else: \n return None,None,data['condition']\n\n #cleanup/file closing/participant thank you messag\ndef close(win, fname=None):\n if fname is not None:\n fname.close() #close the output file\n thanks = visual.TextStim(win,'Thank you for your participation',font='Helvetica', alignHoriz='center',\n alignVert='center', units='norm', height=0.1,color=(1.0,1.0,1.0),wrapWidth=3) \n thanks.draw()\n win.flip()\n event.waitKeys(keyList=['return']) \n win.close() #close the psychopy windo\n core.quit()\n print (\"all tests concluded\") \n\n\n\nif __name__ == '__main__':\n filewriter, fname , condition = get_settings()\n win = get_window(1200)\n if condition=='memory':\n try:\n run_memory(win,filewriter,setSize=3)\n except Exception: \n close(win,fname=fname)\n \n else:\n try:\n run_vs(win,filewriter)\n except Exception: \n close(win,fname=fname)\n","sub_path":"task functions_orientationonly.py","file_name":"task functions_orientationonly.py","file_ext":"py","file_size_in_byte":22749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"583215289","text":"from matplotlib import pyplot as plt\nimport pickle\nimport numpy as np\nfrom Classify.DataSet import HogDataSet\n\ntrain_dataset = HogDataSet(\"../../DataSet/TrainData\")\ntest_dataset = HogDataSet(\"../../DataSet/TestData\")\n_, y_train = zip(*train_dataset)\n_, y_test = zip(*test_dataset)\ny_train = np.array(y_train).ravel()\ny_test = np.array(y_test).ravel()\ny_train = y_train % 10\ny_test = y_test % 10\n\ntrain_sta = np.bincount(y_train)\ntest_sta = np.bincount(y_test)\nx = np.array(list(range(0, 10)))\n\nfig = plt.figure(figsize=(8, 6))\nplt.bar(x - 0.2, train_sta / y_train.shape[0] * 100, width=0.4, label=\"Train\", alpha=0.3)\nplt.bar(x + 0.2, test_sta / y_test.shape[0] * 100, width=0.4, label=\"Test\", alpha=0.3)\nplt.legend(fontsize=15, loc=\"upper right\")\nplt.xticks(list(range(10)), list(range(10)), fontsize=12)\nplt.yticks(fontsize=12)\nplt.ylabel('Ratio(%)', fontsize=15)\nplt.xlabel('Class', fontsize=15)\nplt.tight_layout()\nplt.savefig(\"Class_Distribute.png\")\nplt.show()\n\n","sub_path":"Classify/ResultData/PlotOverfitting.py","file_name":"PlotOverfitting.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"434769323","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 10 11:39:17 2017\n\n@author: tecclaire\n\"\"\"\nimport csv\n\nemails = ['bad','dog','cat']\n\ndef write_to_csv(list_of_emails):\n with open ('emails.csv','w',newline='') as f:\n f_csv = csv.writer(f)\n f_csv.writerow(['Emails'])\n f_csv.writerows([x.split() for x in list_of_emails])\n return f_csv\n\nprint (write_to_csv(emails))\n\nfor item in emails:\n print (item)","sub_path":"python/faculty exercise4.py","file_name":"faculty exercise4.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"288789410","text":"# # English - UoG\n# ident_str = 'Identifier'\n# fullname_str = 'Full name'\n# email_str = 'Email address'\n# status_str = 'Status'\n# grade_str = 'Grade'\n# max_grade_str = 'Maximum Grade'\n# submitted_str = 'Submitted'\n\n# Catalan - UAB\nident_str = 'Identificador'\nfullname_str = 'Nom complet'\nemail_str = 'Número ID'\nstatus_str = 'Estat'\ngrade_str = 'Qualificació'\nmax_grade_str = 'Qualificació màxima'\nsubmitted_str = \"S'ha tramès\"\n","sub_path":"i18n.py","file_name":"i18n.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"106197757","text":"import re\nfrom difflib import SequenceMatcher\n\nimport nltk\n\nimport lyrics\n\ndef get_as_string(file):\n with open(file, 'r') as myfile:\n return myfile.read().replace('\\n', '')\n\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n\ndef tokenize(line):\n line.strip()\n tokens = nltk.word_tokenize(line)\n tagged = nltk.pos_tag(tokens)\n line_tags = []\n for t in tagged:\n line_tags.append(t)\n return line_tags\n\n\ndef do_comparison(artist, song):\n track_id = lyrics.get_track_id_for_keywords_and_artists(song, artist)\n lyr = lyrics.get_lyrics_for_track_id(track_id)\n if lyr is None:\n return \"\"\n song = re.sub(\"\\(.*\\)\", \"\", lyr)\n lines = str.split(song, \"\\n\")\n words = \"\"\n tags = \"\"\n for line in lines:\n for t in tokenize(line):\n words += t[0] + \" \"\n tags += t[1] + \" \"\n\n words.replace(\"... ******* This Lyrics is NOT for Commercial use *******\", \"\").strip()\n tags.replace(\"... ******* This Lyrics is NOT for Commercial use *******\", \"\").strip()\n\n b_score_l = similar(get_as_string(\"../belligerent/all_belligerent_l.txt\"), words)\n c_score_l = similar(get_as_string(\"../conscious/all_conscious_l.txt\"), words)\n\n b_score_t = similar(get_as_string(\"../belligerent/all_belligerent_t.txt\"), tags)\n c_score_t = similar(get_as_string(\"../conscious/all_conscious_t.txt\"), tags)\n\n b_score = abs(b_score_l - b_score_t)\n c_score = abs(c_score_l - c_score_t)\n\n return (\"conscious\" if c_score < b_score else \"belligerent\") + \",\" + str(abs(c_score - b_score))\n\n\ndef similar_artist(type):\n pass\n\n\ndef save_data(lyrics):\n pass\n\n\ndef create_result(artist, song):\n comp = do_comparison(artist, song).split(\",\")\n # track_id = lyrics.get_track_id_for_keywords_and_artists(keywords=song, artist=artist)\n # print(track_id)\n # album = lyrics.get_album_info(track_id)\n fout = open(\"../result.html\", 'w')\n fout.write(\"

Results for \" + song + \" by \" + artist +\n \"

\" +\n comp[0] + \"

With a difference of : \" + str(float(comp[1]) * 100) +\n \"%

\")\n fout.close()\n\n\ncreate_result(\"Kendrick Lamar\", \"Alright\")\n","sub_path":"static/scripts/do_analysis.py","file_name":"do_analysis.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"542655206","text":"import random\nfrom PIL import Image, ImageDraw\nimport colorsys\nimport numpy as np\nimport pickle\nimport colorgram\n\n\nwith open('extract_colors.pkl', 'rb') as f:\n image_colors_list = pickle.load(f)\n\n\nprint(image_colors_list)\npixels = [\n image_colors_list,\n]\n\n# Convert the pixels into an array using numpy\narray = np.array(pixels, dtype=np.uint8)\n\n# Use PIL to create an image from the new array of pixels\nnew_image = Image.fromarray(array)\nnew_image.save('test.png')\n\ncolors = colorgram.extract('test.png', 8)\nresult = map(lambda x: (x.rgb.r, x.rgb.g, x.rgb.b), colors)\nfinal_palette = list(result)\n\nprint(final_palette)\n\npixels = [\n final_palette,\n]\n\n# Convert the pixels into an array using numpy\narray = np.array(pixels, dtype=np.uint8)\n\n# Use PIL to create an image from the new array of pixels\nnew_image = Image.fromarray(array)\nnew_image.show()\nnew_image.save('battlestations.png')\n","sub_path":"color_sort_test.py","file_name":"color_sort_test.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"151047891","text":"def simpleSymbols(string):\r\n\r\n\tflag = True\r\n\tspecial = \"=+-\"\r\n\r\n\tif string[0] in special and string[-1] in special:\r\n\t\tfor i in range(1, len(string) - 1):\r\n\t\t\tif string[i].isalpha() and string[i+1] not in special:\r\n\t\t\t\treturn False\r\n\telse:\r\n\t\treturn False\r\n\r\n\treturn flag\r\n\r\nprint(simpleSymbols(\"+f+a+d+\"))\r\n\r\n","sub_path":"Coderbyte_simpleSymbols.py","file_name":"Coderbyte_simpleSymbols.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"192668183","text":"import re,zipfile\n\nfindnothing = re.compile(r'Next nothing is (\\d+)').match\nseed = '90052'\ncomment=[]\nz=zipfile.ZipFile('channel.zip','r')\n\nwhile True:\n fname = seed+'.txt'\n comment.append(z.getinfo(fname).comment)\n guts = z.read(fname)\n m = findnothing(guts)\n if m:\n seed = m.group(1)\n else:\n break\nprint(\"\".join(comment))","sub_path":"test4_quiz/quiz_7.py","file_name":"quiz_7.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"82775144","text":"def quick_sort(list, start, end):\n low = start\n high = end\n if start >= end:\n return\n pivot = list[low]\n # mid =(high - low)/2\n while low < high:\n # 小的往前挪\n while pivot <= list[high] and low < high:\n # list[pivot] = list[high]\n high -= 1\n list[low] = list[high]\n # 大的往后挪\n while pivot > list[low] and low < high:\n low += 1\n list[high] = list[low]\n\n list[low] = pivot\n # 对基准元素左边的子序列进行快速排序\n quick_sort(list, start, low - 1)\n # 对基准元素右边的子序列进行快速排序\n quick_sort(list, low + 1, end)\n\n\n\nlist = [1,5,4,2,6,3]\nquick_sort(list,0,len(list)-1)\nprint(list)","sub_path":"DataStructDemo-Python/Search/Sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"195728405","text":"#!/usr/bin/home/yossi/python\n# Necessary imports:\nimport rospy\nfrom std_msgs.msg import String\n\n# Parameters\nnode_name = \"nodeB\"\nlisten_topic = \"ping\"\npublish_topic = \"pong\"\ndelay = 1\n\n\nclass DummyNode:\n \"\"\"\n A node which relays messages from [listen_topic] to [] after [delay] seconds\n \"\"\"\n def __init__(self):\n # Initializing node\n rospy.init_node(name=node_name)\n # Creating publisher\n self.p = rospy.Publisher(name=publish_topic, data_class=String, queue_size=10)\n # Creating subscriber\n rospy.Subscriber(name=listen_topic, data_class=String, callback=self.woof)\n\n def woof(self, data):\n \"\"\"\n This is a callback function.\n 1. Get the message\n 2. Print the message\n 3. Wait [delay]\n 4. Publish the message\n\n :paramdata:\n :type data: String\n :return:\n :rtype:\n \"\"\"\n print(data.data)\n rospy.sleep(delay)\n self.p.publish(data)\n\n\ndef main():\n # Create tho node\n n = DummyNode()\n\n # Wait a second and send a message\n rospy.sleep(1)\n n.p.publish(String(data=\"--pong--\"))\n\n # Ah, ha, ha, ha, stayin' alive\n rospy.spin()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n print(e)\n","sub_path":"scripts/nodeB.py","file_name":"nodeB.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"74417735","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass QNetwork(nn.Module):\n def __init__(self, state_size, action_size, seed, fc1_units=16, fc2_units=16):\n \"\"\"\n Initialize parameters and build simple model\n \"\"\"\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, action_size)\n\n def forward(self, state):\n \"\"\"\n Build a network that maps state to action values\n \"\"\"\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)\n\n\nclass DuelingQNetwork(nn.Module):\n def __init__(self, state_size, action_size, seed, fc1_units=16, fc2_units=16, fc3_units=8):\n \"\"\"\n Initialize parameters and build simple model\n \"\"\"\n super(DuelingQNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n \n # initial part of model is shared by both the advantage estimator and value estimator\n self.initial_forward = nn.Sequential(\n nn.Linear(state_size, fc1_units),\n nn.ReLU(),\n nn.Linear(fc1_units, fc2_units)\n )\n\n # estimates the value of a particular state\n self.fc_val = nn.Sequential(\n nn.Linear(fc2_units, fc3_units),\n nn.ReLU(),\n nn.Linear(fc3_units, 1)\n )\n\n # estimates the advantage of a particular action in a given state\n self.fc_adv = nn.Sequential(\n nn.Linear(fc2_units, fc3_units),\n nn.ReLU(),\n nn.Linear(fc3_units, action_size)\n )\n\n def forward(self, state):\n \"\"\"\n Build a network that maps state to action values\n \"\"\"\n x = self.initial_forward(state)\n val = self.fc_val(x)\n adv = self.fc_adv(x)\n x = val + adv - adv.mean()\n return x\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509937374","text":"OPERATION_NAMES = (\"conjunction\", \"disjunction\", \"implication\", \"exclusive\", \"equivalence\")\n\n# x | y | x∧y | x∨y | x→y | x⊕y | x≡y |\n# --------------------------------------\n# 0 | 0 | 0 | 0 | 1 | 0 | 1 |\n# 1 | 0 | 0 | 1 | 0 | 1 | 0 |\n# 0 | 1 | 0 | 1 | 1 | 1 | 0 |\n# 1 | 1 | 1 | 1 | 1 | 0 | 1 |\n# --------------------------------------\nconjunction = {(0,0):0, (1,0):0, (0,1):0, (1,1):1}\ndisjunction = {(0,0):0, (1,0):1, (0,1):1, (1,1):1}\nimplication = {(0,0):1, (1,0):0, (0,1):1, (1,1):1}\nexclusive = {(0,0):0, (1,0):1, (0,1):1, (1,1):0}\nequivalence = {(0,0):1, (1,0):0, (0,1):0, (1,1):1}\n\ndef boolean(x, y, operation):\n if operation == 'conjunction':\n return conjunction[(x, y)]\n elif operation == 'disjunction':\n return disjunction[(x, y)]\n elif operation == 'implication':\n return implication[(x, y)]\n elif operation == 'exclusive':\n return exclusive[(x, y)]\n elif operation == 'equivalence':\n return equivalence[(x, y)]\n else:\n return 0 # Error situation\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert boolean(1, 0, \"conjunction\") == 0, \"and\"\n assert boolean(1, 0, \"disjunction\") == 1, \"or\"\n assert boolean(1, 1, \"implication\") == 1, \"material\"\n assert boolean(0, 1, \"exclusive\") == 1, \"xor\"\n assert boolean(0, 1, \"equivalence\") == 0, \"same?\"\n","sub_path":"CheckiO/unknown/boolean-algebra.py","file_name":"boolean-algebra.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61312090","text":"# -*- coding: utf-8 -*-\n\n'''\n\nCreated on 2016-12-29\n\n@author: ranyixu\n@Description:\n\n'''\nimport json\n\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom django_extensions.extension_responses.restful_responses import RestfulResponse405, \\\n RestfulResponse, RestfulResponse404, RestfulResponse500, ParamError,\\\n ModelResponse\nfrom django_extensions.extension_view.dispatch_operation_view import DispatchOperationView\n\n\nclass RestfulView(DispatchOperationView):\n '''\n \n @Description: this class sub from DispatchOperationView, \n replace return HttpResponse with RestfulResponse\n '''\n \n def dispatch(self, request, operation=None, *args, **kwargs):\n #merge param\n self._merge_param(request)\n return super(RestfulView, self).dispatch(request, operation=operation, *args, **kwargs)\n \n def operation_undefined(self, request, *args, **kwargs):\n '''\n \n @Description: override this, return restful response instead\n '''\n return RestfulResponse405(\"undefined operation\")\n \n def operation_not_allowed(self, request, *args, **kwargs):\n '''\n \n @Description: override this, return restful response instead\n '''\n return RestfulResponse405(\"operation not allowed\")\n \n def on_exception(self, e):\n '''\n \n @Description: override this, deal some common exception\n \n '''\n try:\n raise e\n except ObjectDoesNotExist:\n return RestfulResponse404\n except KeyError:\n return ParamError(e)\n except:\n raise e\n return RestfulResponse500\n \n def _merge_param(self, request):\n '''\n @Description: sometimes params not only in request.POST dict, but also in body as a string, \n this function will merge the params in the body to request.POST\n '''\n try:\n if request.body is None or request.body.strip() == \"\":\n return\n if (not 'application/json' in request.META['CONTENT_TYPE'].lower()):\n return\n except:\n return\n try:\n mutable = request.POST._mutable\n if not request.POST._mutable:\n request.POST._mutable = True\n try:\n charset = None\n if getattr(request, \"encoding\", None) is None:\n charset = 'utf-8'\n else:\n charset = request.encoding\n request.POST.update(json.loads(request.body.decode(encoding = charset)))\n except:\n pass\n request.POST._mutable = mutable\n except:\n return\n \ndef delegate_required(func):\n def _fun(obj, *args, **kwargs):\n if obj.delegate is None:\n return RestfulResponse405('operation is undefined')\n return func(obj, *args, **kwargs)\n return _fun\n\nclass RestfulModelView(RestfulView):\n '''\n \n @Description: this class sub from RestfulView, add some default operation for model,\n if want to use this, should sub this first, add assigned a Delegate instance to var delegate \n '''\n allowed_get_operations = [\"get\"]\n allowed_post_operations = [\"add\", \"delete\", \"modify\"]\n \n delegate = None\n '''\n \n model's default get,add, delete, modify operation will use this, this should be a\n instance of django_extensions.delegate.Delegate\n '''\n \n @delegate_required\n def get(self, request, *args, **kwargs):\n objs = self.delegate.get(**(request.GET.dict()))\n return ModelResponse(model_objs = objs)\n \n @delegate_required \n def add(self, request, *args, **kwargs):\n obj = self.delegate.add(**(request.POST.dict()))\n return ModelResponse(model_objs = obj)\n \n @delegate_required\n def delete(self, request, *args, **kwargs):\n self.delegate.delete(**(request.POST.dict()))\n return RestfulResponse()\n \n @delegate_required\n def modify(self, request, *args, **kwargs):\n try:\n param_dict = request.POST.dict()\n obj = self.delegate.modify(self.delegate.get_one(id = param_dict.pop(\"pk\")), **(param_dict))\n except KeyError as e:\n return ParamError(e)\n return ModelResponse(obj)\n","sub_path":"django_extensions/extension_view/restful_support.py","file_name":"restful_support.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"389116314","text":"import numpy as np\nimport os\nimport pdb\nimport matplotlib.pyplot as plt\n\n#datasets_dir = '/Users/cubic/hemanth/S2018/cse591/miniProjects/data/'\ndatasets_dir = './data/'\n\ndef one_hot(x, n):\n if type(x) == list:\n x = np.array(x)\n x = x.flatten()\n o_h = np.zeros((len(x), n))\n o_h[np.arange(len(x)), x] = 1\n return o_h\n\n\ndef mnist(noTrSamples=1000, noValSamples = 400,noTsSamples=100, \\\n digit_range=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], \\\n noTrPerClass=100, noValPerClass=10 ,noTsPerClass=10):\n assert noTrSamples==noTrPerClass*len(digit_range), 'noTrSamples and noTrPerClass mismatch'\n assert noTsSamples==noTsPerClass*len(digit_range), 'noTrSamples and noTrPerClass mismatch'\n data_dir = os.path.join(datasets_dir, 'mnist/')\n fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte'))\n loaded = np.fromfile(file=fd, dtype=np.uint8)\n trData = loaded[16:].reshape((60000, 28*28)).astype(float)\n\n fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte'))\n loaded = np.fromfile(file=fd, dtype=np.uint8)\n trLabels = loaded[8:].reshape((60000)).astype(float)\n\n fd = open(os.path.join(data_dir, 't10k-images-idx3-ubyte'))\n loaded = np.fromfile(file=fd, dtype=np.uint8)\n tsData = loaded[16:].reshape((10000, 28*28)).astype(float)\n\n fd = open(os.path.join(data_dir, 't10k-labels-idx1-ubyte'))\n loaded = np.fromfile(file=fd, dtype=np.uint8)\n tsLabels = loaded[8:].reshape((10000)).astype(float)\n\n trData = trData/255.\n tsData = tsData/255.\n\n tsX = np.zeros((noTsSamples, 28*28))\n trX = np.zeros((noTrSamples-noValSamples, 28*28))\n valX= np.zeros((noValSamples, 28*28))\n tsY = np.zeros(noTsSamples)\n trY = np.zeros(noTrSamples-noValSamples)\n valY = np.zeros(noValSamples)\n\n count = 0\n for ll in digit_range:\n # Train data\n idl = np.where(trLabels == ll)\n #print(idl)\n idl1 = idl[0][: (noTrPerClass-noValPerClass)]\n idl2 = idl[0][(noTrPerClass-noValPerClass):noTrPerClass]\n #print(idl1)\n #print(idl2)\n idx1 = list(range(count*(noTrPerClass-noValPerClass), (count+1)*(noTrPerClass-noValPerClass)))\n idx2 = list(range(count*(noValPerClass), (count+1)*noValPerClass))\n #print(idx1)\n #print(idx2)\n trX[idx1, :] = trData[idl1, :]\n trY[idx1] = trLabels[idl1]\n #print(trY)\n # Val data\n valX[idx2, :] = trData[idl2, :]\n valY[idx2] = trLabels[idl2]\n # Test data\n idl = np.where(tsLabels == ll)\n idl = idl[0][: noTsPerClass]\n idx = list(range(count*noTsPerClass, (count+1)*noTsPerClass))\n tsX[idx, :] = tsData[idl, :]\n tsY[idx] = tsLabels[idl]\n count += 1\n \n np.random.seed(1)\n test_idx = np.random.permutation(tsX.shape[0])\n tsX = tsX[test_idx,:]\n tsY = tsY[test_idx]\n\n trX = trX.T\n tsX = tsX.T\n valX = valX.T\n trY = trY.reshape(1, -1)\n valY = valY.reshape(1, -1)\n tsY = tsY.reshape(1, -1)\n return trX, trY, valX, valY, tsX, tsY\n\n\ndef main():\n trX, trY, valX, valY, tsX, tsY = mnist(noTrSamples=21,noValSamples=6,\n noTsSamples=9, digit_range=[0, 5, 8],\n noTrPerClass=7,noValPerClass=2, noTsPerClass=3)\n\n plt.imshow(trX[:,5].reshape(28, -1))\n trY[0,5]\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"Stacked Autoencoder/svm_exp/load_mnist.py","file_name":"load_mnist.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"462258471","text":"\"\"\"\nDjango settings for KW project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nfrom collections import namedtuple\nfrom datetime import timedelta\nimport os\nfrom django.core.urlresolvers import reverse_lazy\n\ntry:\n import KW.secrets as secrets\nexcept ImportError:\n print(\"Couldn't find a secrets file. Defaulting\")\n secrets = namedtuple('secrets', ['DEPLOY', 'SECRET_KEY', 'DB_TYPE'])\n secrets.DB_TYPE = \"sqlite\"\n secrets.DEPLOY = False\n secrets.SECRET_KEY = \"samplekey\"\n secrets.EMAIL_HOST_PASSWORD = \"nope\"\n secrets.EMAIL_HOST_USER = \"dontmatter@whatever.com\"\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nMY_TIME_ZONE = 'America/New_York'\n\nlogging_class = 'logging.StreamHandler'\nlogging_level = 'ERROR' if secrets.DEPLOY else 'DEBUG'\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s---%(asctime)s---%(module)s : %(message)s',\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n 'time_only': {\n 'format': '%(asctime)s---%(message)s'\n }\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'filters': ['require_debug_true'],\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'views': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'when': 'midnight',\n 'formatter': 'verbose',\n 'filename': os.path.join(BASE_DIR, \"logs\", \"views.log\"),\n },\n 'models': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'when': 'midnight',\n 'formatter': 'verbose',\n 'filename': os.path.join(BASE_DIR, \"logs\", \"models.log\"),\n },\n 'errors': {\n 'level': 'ERROR',\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'when': 'midnight',\n 'formatter': 'verbose',\n 'filename': os.path.join(BASE_DIR, \"logs\", \"errors.log\"),\n },\n 'tasks': {\n 'level': 'INFO',\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'when': 'midnight',\n 'formatter': 'verbose',\n 'filename': os.path.join(BASE_DIR, \"logs\", \"tasks.log\"),\n },\n 'sporadic_tasks': {\n 'level': 'INFO',\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'when': 'midnight',\n 'formatter': 'verbose',\n 'filename': os.path.join(BASE_DIR, \"logs\", \"sporadic_tasks.log\"),\n },\n 'review_data': {\n 'level': 'INFO',\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'when': 'midnight',\n 'formatter': 'time_only',\n 'filename': os.path.join(BASE_DIR, \"logs\", \"review_data.log\"),\n }\n },\n 'loggers': {\n 'kw.views': {\n 'handlers': ['views', 'errors', 'console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'kw.models': {\n 'handlers': ['models', 'errors', 'console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'kw.tasks': {\n 'handlers': ['tasks', 'errors', 'console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'kw.db_repopulator': {\n 'handlers': ['sporadic_tasks', 'errors', 'console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'kw.review_data': {\n 'handlers':['review_data', 'console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n },\n}\n\n\n#CELERY SETTINGS\n#CELERY_RESULT_BACKEND = 'amqp'\nCELERY_RESULTS_BACKEND = 'redis://localhost:6379/0'\nCELERY_RESULT_BACKEND = 'redis://localhost:6379/0'\n#CELERY_BROKER_URL = broker = 'amqp://guest@localhost//'\nCELERY_BROKER_URL = 'redis://localhost:6379/0'\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULTS_SERIALIZER = 'json'\nCELERY_TIMEZONE = MY_TIME_ZONE\nCELERY_BEAT_SCHEDULE = {\n 'all_user_srs_every_hour': {\n 'task': 'kw_webapp.tasks.all_srs',\n 'schedule': timedelta(minutes=15)\n },\n 'update_users_unlocked_vocab': {\n 'task': 'kw_webapp.tasks.sync_all_users_to_wk',\n 'schedule': timedelta(hours=12),\n },\n 'sync_vocab_db_with_wk': {\n 'task': 'kw_webapp.tasks.repopulate',\n 'schedule': timedelta(hours=3)\n }\n}\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secrets.SECRET_KEY\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['127.0.0.1', 'localhost', 'www.kaniwani.com', '.kaniwani.com']\n\n# Application definition\n\n\nLOGIN_URL = reverse_lazy(\"login\")\nLOGIN_REDIRECT_URL = reverse_lazy(\"kw:home\")\n\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.humanize',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_celery_beat',\n 'crispy_forms',\n 'rest_framework',\n 'lineage',\n 'kw_webapp.apps.KaniwaniConfig', #Make sure this is the top entry in order to correctly override template folders.\n 'debug_toolbar',\n 'rest_framework.authtoken'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n 'async_messages.middleware.AsyncMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'kw_webapp.middleware.SetLastVisitMiddleware'\n)\n\nif DEBUG:\n MIDDLEWARE_CLASSES += (\n 'KW.LoggingMiddleware.ExceptionLoggingMiddleware',\n )\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated'\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication'\n\n\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': 100,\n 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)\n}\n\nREST_FRAMEWORK_DOCS = {\n 'HIDE_DOCS': not DEBUG\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': 'localhost:6379'\n }\n}\n\nROOT_URLCONF = 'KW.urls'\n\nWSGI_APPLICATION = 'KW.wsgi.application'\n\n#EMAIL BACKEND SETTINGS\nMANAGERS = [(\"Gary\", \"tadgh@cs.toronto.edu\",), (\"Duncan\", \"duncan.bay@gmail.com\")]\nDEFAULT_FROM_EMAIL = \"gary@kaniwani.com\"\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = secrets.EMAIL_HOST_USER\nEMAIL_HOST_PASSWORD = secrets.EMAIL_HOST_PASSWORD\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\n\nTIME_ZONE = MY_TIME_ZONE\nSITE_ID = 1\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nif secrets.DB_TYPE == \"postgres\":\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': secrets.DB_NAME,\n 'USER': secrets.DB_USER,\n 'PASSWORD': secrets.DB_PASSWORD,\n 'HOST': 'localhost',\n 'PORT': '',\n }\n }\nelif secrets.DB_TYPE == \"sqlite\":\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n }\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLINEAGE_ANCESTOR_PHRASE = \"-active\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = \"/var/www/kaniwani.com/static\"\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"_front-end/dist/assets\"),\n)\n\nINTERNAL_IPS = ('127.0.0.1',)\n#For cache-busting in production mode.\nif not DEBUG:\n STATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, 'templates'),\n os.path.join(BASE_DIR, 'kw_webapp/templates/kw_webapp')\n ],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n 'django.contrib.auth.context_processors.auth',\n \"KW.preprocessors.review_count_preprocessor\",\n \"KW.preprocessors.srs_level_count_preprocessor\",\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n ],\n \"debug\": DEBUG\n }\n }\n]\n\nAUTHENTICATION_BACKENDS = [\n 'kw_webapp.backends.EmailOrUsernameAuthenticationBackend',\n 'django.contrib.auth.backends.ModelBackend'\n]\n\n","sub_path":"KW/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":9784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"269471819","text":"from keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau\nfrom keras.layers import GlobalAveragePooling2D,Dense,Dropout\nfrom keras.models import Model\nfrom keras.applications.resnet50 import ResNet50\nfrom keras import optimizers\nfrom generate_data import *\n\n\ntraining_set,valid_set,test_set = data_gen()\n\nbatch_size=8\ntraining_set.target_size,valid_set.target_size,test_set.target_size\ntrain_size = training_set.n\nvalid_size = valid_set.n\ntest_size =test_set.n\ntrain_size,valid_size,test_size\n\n\n\n\ndef train_model():\n # create the base pre-trained model\n base_model = ResNet50(weights='imagenet', include_top=False)\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(1024, activation='relu')(x)\n x = Dropout(0.5)(x)\n predictions = Dense(17, activation='softmax')(x)\n\n # this is the model we will train\n model = Model(inputs=base_model.input, outputs=predictions)\n # train the model on the new data for a few epochs\n\n for layer in model.layers[:25]:\n layer.trainable = False\n\n # Compile the model\n model.compile(optimizer=optimizers.SGD(lr=1e-4,momentum=0.99), loss='categorical_crossentropy', metrics=['accuracy'])\n\n checkpoint = ModelCheckpoint(\"./vgg16_model.h5\",\n monitor=\"val_loss\",\n mode=\"min\",\n save_best_only = True,\n verbose=1)\n earlystop = EarlyStopping(monitor = 'val_loss',\n mode=\"min\",\n min_delta = 0,\n patience = 5,\n verbose = 1,\n restore_best_weights = True)\n\n reduce_lr = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 2,verbose = 1, min_delta = 0.0001)\n\n #combining in single so that we will pass callbacks in model.fit\n callbacks = [checkpoint,earlystop,reduce_lr]\n\n #train the model\n history = model.fit_generator(training_set,steps_per_epoch =4000,epochs = 12,validation_data = valid_set,validation_steps = 100,callbacks=callbacks)\n\n #evaluating the model\n eval = model.evaluate_generator(generator=valid_set,steps=valid_size/batch_size)\n print('accuracy=',eval[1])\n\nif __name__ == '__main__':\n train_model()\n","sub_path":"Resnet/train_resnet.py","file_name":"train_resnet.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"460518189","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 19 23:18:40 2019\n\n@author: phongnd205\n\"\"\"\n\nimport tensorflow\nfrom tensorflow import keras\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\n\n\n# input data\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# preparing data\ntrain_images = train_images.reshape((60000, 28, 28, 1))\ntrain_images = train_images.astype('float32') / 255\n\ntest_images = test_images.reshape((10000, 28, 28, 1))\ntest_images = test_images.astype('float32') / 255\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\n\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\nmodel.add(keras.layers.MaxPooling2D((2, 2)))\nmodel.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(keras.layers.MaxPooling2D((2, 2)))\nmodel.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))\n\nmodel.add(keras.layers.Flatten())\nmodel.add(keras.layers.Dense(64, activation='relu'))\nmodel.add(keras.layers.Dense(10, activation='softmax'))\n\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.fit(train_images, train_labels, epochs=5, batch_size=64)\n#test_loss, test_acc = model.evaluate(test_images, test_labels)\n#test_acc\n\n","sub_path":"DLwP_5.1_smallConvnet.py","file_name":"DLwP_5.1_smallConvnet.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"462729945","text":"import ts3lib, ts3defines, datetime\nfrom ts3plugin import ts3plugin\nfrom pytsonui import setupUi\nfrom PythonQt.QtGui import *\nfrom configparser import ConfigParser\nfrom os import path\n\nclass NoX(ts3plugin):\n name = \"BanBypasser (NoX)\"\n apiVersion = 21\n requestAutoload = False\n version = \"1.0\"\n author = \"Bluscream\"\n description = \"Fights for you against admin abuse!\"\n offersConfigure = True\n commandKeyword = \"\"\n infoTitle = None\n iconPath = path.join(ts3lib.getPluginPath(), \"pyTSon\", \"scripts\", \"NoX\", \"icons\")\n menuItems = [(ts3defines.PluginMenuType.PLUGIN_MENU_TYPE_GLOBAL, 0, \"Change Identity\", \"\")]\n hotkeys = []\n debug = False\n ini = path.join(ts3lib.getPluginPath(), \"pyTSon\", \"scripts\", \"NoX\", \"settings.ini\")\n cfg = ConfigParser()\n dlg = None\n\n def __init__(self):\n if path.isfile(self.ini):\n self.cfg.read(self.ini)\n else:\n self.cfg['general'] = { \"cfgversion\": \"1\", \"debug\": \"False\", \"enabled\": \"True\", \"channelpw\": \"123\", \"serverpw\": \"123\", \"anticrash\": \"True\" }\n self.cfg['antimove'] = { \"enabled\": \"True\", \"delay\": \"0\"}\n self.cfg['antichannelkick'] = { \"enabled\": \"True\", \"delay\": \"0\"}\n self.cfg['antichannelban'] = { \"enabled\": \"True\", \"delay\": \"0\"}\n self.cfg['antiserverkick'] = { \"enabled\": \"True\", \"delay\": \"0\"}\n self.cfg['antiserverban'] = { \"enabled\": \"True\", \"delay\": \"0\"}\n self.cfg['antichanneldelete'] = { \"enabled\": \"True\", \"delay\": \"0\"}\n self.cfg['lastconnection'] = { \"ip\": \"127.0.0.1\", \"port\": \"9987\", \"channelid\": \"0\", \"channelname\": \"Default Channel\", \"nickname\": \"TeamspeakUser\", \"phoneticnick\": \"\", \"metaData\": \"\" }\n with open(self.ini, 'w') as configfile:\n self.cfg.write(configfile)\n ts3lib.logMessage(self.name+\" script for pyTSon by \"+self.author+\" loaded from \\\"\"+__file__+\"\\\".\", ts3defines.LogLevel.LogLevel_INFO, \"Python Script\", 0)\n if self.debug: ts3lib.printMessageToCurrentTab('[{:%Y-%m-%d %H:%M:%S}]'.format(datetime.now())+\" [color=orange]\"+self.name+\"[/color] Plugin for pyTSon by [url=https://github.com/\"+self.author+\"]\"+self.author+\"[/url] loaded.\")\n\n def configure(self, qParentWidget):\n try:\n if not self.dlg:\n self.dlg = SettingsDialog(self)\n self.dlg.show()\n self.dlg.raise_()\n self.dlg.activateWindow()\n except: from traceback import format_exc;ts3lib.logMessage(format_exc(), ts3defines.LogLevel.LogLevel_ERROR, \"PyTSon\", 0)\n\n def onMenuItemEvent(self, schid, atype, menuItemID, selectedItemID):\n if atype == ts3defines.PluginMenuType.PLUGIN_MENU_TYPE_GLOBAL:\n if menuItemID == 0: self.reconnect(schid)\n\n def onConnectStatusChangeEvent(self, schid, newStatus, errorNumber):\n if newStatus == ts3defines.ConnectStatus.STATUS_CONNECTION_ESTABLISHED:\n (error, ip) = ts3lib.getConnect(schid, channelID, ts3defines.ChannelProperties.CHANNEL_DESCRIPTION)\n\nclass SettingsDialog(QDialog):\n def __init__(self, this, parent=None):\n self.this = this\n super(QDialog, self).__init__(parent)\n setupUi(self, path.join(ts3lib.getPluginPath(), \"pyTSon\", \"scripts\", \"NoX\", \"settings.ui\"))\n self.setWindowTitle(\"%s Settings\" % this.name)\n self.chk_debug.setChecked(this.cfg.getboolean(\"general\", \"debug\"))","sub_path":"scripts/NoX/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"396104098","text":"import subprocess\nimport os\nimport sys\n\n#add debug and release builds\nAddOption('--d',action='store_true',help='build a debug build',default=False)\nAddOption('--r',action='store_true',help='build a release build',default=False)\nAddOption('--g',action='store_true',help='fetch newest from git and build that',default=False)\nAddOption('--ocv',action='store_true',help='build with opencv',default=False)\n\nif GetOption('g'):\n\tsubprocess.check_call(['git','fetch'])\n\tsubprocess.check_call(['git','submodule','foreach','git','fetch'])\n\n\n##SETTING UP COLORS\ncolors = {}\ncolors['cyan'] = '\\033[96m'\ncolors['purple'] = '\\033[95m'\ncolors['blue'] = '\\033[94m'\ncolors['green'] = '\\033[92m'\ncolors['yellow'] = '\\033[93m'\ncolors['red'] = '\\033[91m'\ncolors['end'] = '\\033[0m'\n\n#If the output is not a terminal, remove the colors\nif not sys.stdout.isatty():\n for key, value in colors.iteritems():\n colors[key] = ''\n\ncompile_source_message = '%sCompiling %s \\t\\t\\t ==> %s$SOURCE%s' % \\\n (colors['blue'], colors['purple'], colors['yellow'], colors['end'])\n\ncompile_shared_source_message = '%sCompiling shared %s \\t==> %s$SOURCE%s' % \\\n (colors['blue'], colors['purple'], colors['yellow'], colors['end'])\n\nlink_program_message = '%sLinking Program %s \\t\\t ==> %s$TARGET%s' % \\\n (colors['red'], colors['purple'], colors['yellow'], colors['end'])\n\nlink_library_message = '%sLinking Static Library %s \\t ==> %s$TARGET%s' % \\\n (colors['red'], colors['purple'], colors['yellow'], colors['end'])\n\nranlib_library_message = '%sRanlib Library %s \\t\\t ==> %s$TARGET%s' % \\\n (colors['red'], colors['purple'], colors['yellow'], colors['end'])\n\nlink_shared_library_message = '%sLinking Shared Library %s \\t ==> %s$TARGET%s' % \\\n (colors['red'], colors['purple'], colors['yellow'], colors['end'])\n\njava_library_message = '%sCreating Java Archive %s \\t ==> %s$TARGET%s' % \\\n (colors['red'], colors['purple'], colors['yellow'], colors['end'])\n\nenv = Environment(\n CXXCOMSTR = compile_source_message,\n CCCOMSTR = compile_source_message,\n SHCCCOMSTR = compile_shared_source_message,\n SHCXXCOMSTR = compile_shared_source_message,\n ARCOMSTR = link_library_message,\n RANLIBCOMSTR = ranlib_library_message,\n SHLINKCOMSTR = link_shared_library_message,\n LINKCOMSTR = link_program_message,\n JARCOMSTR = java_library_message,\n JAVACCOMSTR = compile_source_message\n)\n##END COLORIZER\nbins = \"bins/\"\n\nclass ProgramBuilder:\n\tdef __init__(self, name, src_dir,env):\n\t\tself.name = name\n\t\tself.src_dir = src_dir\n\t\tself.object_creator = ObjectCreator(src_dir)\n\t\tself.env = env\n\tdef build(self):\n\t\tself.build_link([])\n\tdef build_link(self,libs):\n\t\tprint(\" I am \" + self.name + \" \" + str(libs))\n\t\tself.env.Program(self.name,self.object_creator.get_objects(),LIBS=libs,LIBPATH=bins)\nclass LibraryBuilder:\n\tdef __init__(self,name,src_dir,env):\n\t\tself.name = name\n\t\tself.src_dir =src_dir\n\t\tself.object_creator = ObjectCreator(src_dir)\n\t\tself.env = env\n\tdef build(self):\n\t\tself.env.Append(CPPPATH = ['../'+self.src_dir])\n\t\tself.env.Library(self.name,self.object_creator.get_objects())\n\n\nclass ObjectCreator:\n\tbuild = \"build/\"\n\tdef __init__(self,src_dir):\n\t\tself.src_dir = src_dir\n\t\tself.build_dir = ObjectCreator.build + src_dir\n\t\n\tdef get_objects(self):\n\t\treturn self.get_objects_alt_scons(\"SConscript\")\n\n\tdef get_objects_alt_scons(self,scons_file):\n\t\tself._copy_to_build_dir()\n\t\treturn SConscript(self.build_dir+scons_file,exports='env')\n\n\tdef _copy_to_build_dir(self):\n\t\tdef make_build_dir(dir):\n\t\t\tsubprocess.check_call([\"mkdir\",\"--parents\",dir])\n\t\tdef copyanytree(src, dst):\n\t\t\tsubprocess.check_call(['rsync','-i','-r',src,dst])\n\t\tmake_build_dir(self.build_dir)\n\t\tcopyanytree(self.src_dir ,self.build_dir)\n\n\n\n#Sets up an environment object\n\nflags = \"-Wall -std=c++11\"\n#set up some differences between debug and release\nif GetOption('d'):\n\tflags += \"-g\"\nif GetOption('r'):\n\tflags += \"-O3\"\nenv.Append(CCFLAGS=flags)\n\n\n\n\nframework = LibraryBuilder(bins+'framework',\"lib/2014-2015-Framework/src/\",env)\nprogram = ProgramBuilder('robot_program','src/',env)\n\nOPENCV_FORMATTED_LIBS = []\nif GetOption('ocv'):\n\t#incoming hacky stuff to make openCV link\n\tOPENCV_FLAGS=subprocess.check_output(['pkg-config','--cflags','opencv'])\n\tOPENCV_LIBS=subprocess.check_output(['pkg-config','--libs-only-l','opencv'])\n\tOPENCV_LIBPATH=subprocess.check_output(['pkg-config','--libs-only-L','opencv'])\n\tenv.Append(CCFLAGS=OPENCV_FLAGS)\n\tenv.Append(LIBPATH=OPENCV_FLAGS)\n\n\t#this is a mess but it needs to be this way because i haev no better way for\n\t#string manipulation\n\tOPENCV_FORMATTED_LIBS = map(lambda a: a[2:],OPENCV_LIBS.split(\" \")[:-2]) + ['tesseract']\n\t#openCV hacky linking over\n\nif GetOption(\"clean\"):\n\tsubprocess.check_call(['rm','-rf',bins])\n\tsubprocess.check_call(['rm','-rf',\"build/\"])\n\nif not GetOption(\"clean\"):\n\tprint('Building PROGRAM...')\n\tprogram.build()\n\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"184429859","text":"import requests\r\nimport json\r\n\r\nurl = \"https://openexchangerates.org/api/latest.json?app_id=58824ee47b2f4dcc8290c780d9ab9b2c\"\r\n\r\nresponse = requests.get(url)\r\n\r\ndata = response.text\r\n\r\nparsed = json.loads(data)\r\n\r\nprint(parsed)\r\n\r\nprint(json.dumps(parsed, indent = 3))\r\n\r\n#base_rate = parsed[\"base\"][\"USD\"]\r\neur_rate = parsed[\"symbol\"][\"EUR\"]\r\ngbp_rate = parsed[\"symbol\"][\"GBP\"]\r\n\r\nprint(\"On \" + date + \" EUR equals \" + str(gbp_rate) + \" GBP\")\r\nprint(\"On \" + date + \" EUR equals \" + str(eur_rate) + \" EUR\")","sub_path":"Failed/cc.py","file_name":"cc.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"456177009","text":"import media\nimport fresh_tomatoes\n#Create instances of class Movie here\n#ToyStory3 Instance\ntoy_story_3 = media.Movie(\"Toy Story 3\",\n \"Lee Unkrich\",\n \"https://www.youtube.com/watch?v=JcpWXaA2qeg\",\n \"https://upload.wikimedia.org/wikipedia/en/6/69/Toy_Story_3_poster.jpg\",\n \"17-year-old Andy is about to leave for \"\n \"college, and his toys have not been played \"\n \"with for years. He intends to take Woody with \"\n \"him, and puts Buzz Lightyear, Jessie and the \"\n \"other toys in a trash bag to be stored in the \"\n \"attic. Andy's mother mistakenly takes the bag \"\n \"to the curb for garbage pickup.\")\n#High School Musical Instance\nhigh_school_musical = media.Movie(\"High School Musical\",\n \"Kenny Ortega\",\n \"https://www.youtube.com/watch?v=ukDLkkvZYFk\",\n \"https://upload.wikimedia.org/wikipedia/en/a/a5/HSMposter.jpg\",\n \"On New Year's Eve in 2006, high school \"\n \"juniors Troy Bolton (Zac Efron) and \"\n \"Gabriella Montez (Vanessa Hudgens) \"\n \"meet at a party while both teens are \"\n \"at a ski lodge during winter break. \"\n \"At the party, the two are called upon \"\n \"to sing karaoke together. They find that \"\n \"they have a connection and decide to \"\n \"exchange numbers before going their \"\n \"separate ways.\")\n#Lion King Instance\nlion_king = media.Movie(\"Lion King\",\n \"Rob Minkoff & Roger Allers\",\n \"https://www.youtube.com/watch?v=4sj1MT05lAA\",\n \"https://upload.wikimedia.org/wikipedia/en/3/3d/The_Lion_King_poster.jpg\",\n \"In the Pride Lands of Africa, a lion rules over the \"\n \"animal kingdom from Pride Rock. King \"\n \"Mufasa's newborn son, Simba, is \"\n \"presented to the assembled animals by \"\n \"Rafiki, a mandrill who serves as \"\n \"shaman and advisor. \"\n \"Mufasa shows young Simba the Pride Lands and \"\n \"explains to him the responsibilities of kingship and \"\n \"the \\\"circle of life\\\", which connects all \"\n \"living things.\")\n#Disneys Hercules Instance\ndisney_hercules = media.Movie(\"Disneys Hercules\",\n \"Ron Clements & John Musker\",\n \"https://www.youtube.com/watch?v=yIAvF8hFEYM\",\n \"https://upload.wikimedia.org/wikipedia/en/6/65/Hercules_%281997_film%29_poster.jpg\",\n \"After imprisoning the Titans beneath the \"\n \"ocean, the Greek gods Zeus and his wife, \"\n \"Hera, have a son named \"\n \"Hercules. While the other gods are joyful, \"\n \"Zeus' jealous brother Hades plots to \"\n \"overthrow Zeus and rule Mount Olympus.\")\n#Aladdin Instance\naladdin = media.Movie(\"Aladdin\",\n \"Ron Clements & John Musker\",\n \"https://www.youtube.com/watch?v=QapaqcDucmg\",\n \"https://upload.wikimedia.org/wikipedia/en/5/58/Aladdinposter.jpg\",\n \"In the city of Agrabah, Jafar, the Grand \"\n \"vizier of the Sultan, and his parrot Iago, \"\n \"seek the lamp hidden within the Cave \"\n \"of Wonders, but are told that only \"\n \"a \\\"diamond in the rough\\\" \"\n \"may enter.\")\n#Prince of Persia Instance\nprince_of_persia = media.Movie(\"Prince of Persia\",\n \"Mike Newell\",\n \"https://www.youtube.com/watch?v=ZgEt-4L3fKQ\",\n \"https://upload.wikimedia.org/wikipedia/en/d/df/Prince_of_Persia_poster.jpg\",\n \"Dastan, a street urchin in Persia, \"\n \"is adopted by King Sharaman \"\n \"after showing courage in the \"\n \"marketplace.\")\n#Instantiate and store instances of Movie in array\nmovies = [\n toy_story_3,\n high_school_musical,\n lion_king,\n disney_hercules,\n aladdin,\n prince_of_persia\n ]\n#Opens webpage populated with Movie instances via the fresh_tomatoes py file\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"379498099","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, render_template, g, redirect, url_for, flash, jsonify, request, send_from_directory\nfrom dataviva.apps.general.views import get_locale\n\nfrom sqlalchemy import desc\nfrom models import Article, AuthorScholar, KeyWord\nfrom dataviva import db\nfrom forms import RegistrationForm\nfrom datetime import datetime\n\nimport os\nimport simplejson\nimport shutil\nfrom upload_file import UploadFile\nfrom werkzeug import secure_filename\nfrom dataviva import app\nfrom dataviva.utils import upload_helper\n\napp.config['UPLOAD_FOLDER'] = os.path.join(os.getcwd(), 'dataviva/static/data/scholar/')\napp.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024\n\nALLOWED_EXTENSIONS = set(['pdf', 'doc', 'docx', 'png', 'jpeg'])\nIGNORED_FILES = set(['.gitignore'])\n\n\nmod = Blueprint('scholar', __name__,\n template_folder='templates',\n url_prefix='//scholar')\n\n\n@mod.before_request\ndef before_request():\n g.page_type = mod.name\n\n\n@mod.url_value_preprocessor\ndef pull_lang_code(endpoint, values):\n g.locale = values.pop('lang_code')\n\n\n@mod.url_defaults\ndef add_language_code(endpoint, values):\n values.setdefault('lang_code', get_locale())\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef gen_file_name(filename):\n \"\"\"\n If file was exist already, rename it and return a new name\n \"\"\"\n\n i = 1\n while os.path.exists(os.path.join(app.config['UPLOAD_FOLDER'], filename)):\n name, extension = os.path.splitext(filename)\n filename = '%s_%s%s' % (name, str(i), extension)\n i = i + 1\n\n return filename\n\n\n@mod.route('/', methods=['GET'])\ndef index():\n articles = Article.query.filter_by(approval_status=True).order_by(desc(Article.postage_date)).all()\n return render_template('scholar/index.html', articles=articles)\n\n\n@mod.route('/article/', methods=['GET'])\ndef show(id):\n article = Article.query.filter_by(id=id).first_or_404()\n return render_template('scholar/show.html', article=article)\n\n\n@mod.route('/admin', methods=['GET'])\ndef admin():\n articles = Article.query.all()\n return render_template('scholar/admin.html', articles=articles)\n\n\n@mod.route('/admin', methods=['POST'])\ndef admin_update():\n for id, approval_status in request.form.iteritems():\n article = Article.query.filter_by(id=id).first_or_404()\n article.approval_status = approval_status == u'true'\n db.session.commit()\n message = u\"Estudo(s) atualizados com sucesso!\"\n return message\n\n\n@mod.route('/admin/article//', methods=['POST'])\ndef admin_activate(status, status_value):\n for id in request.form.getlist('ids[]'):\n article = Article.query.filter_by(id=id).first_or_404()\n setattr(article, status, status_value == u'true')\n db.session.commit()\n\n message = u\"Artigo(s) alterada(s) com sucesso!\"\n return message, 200\n\n\n@mod.route('/admin/article/new', methods=['GET'])\ndef new():\n form = RegistrationForm()\n return render_template('scholar/new.html', form=form, action=url_for('scholar.create'))\n\n\n@mod.route('/admin/article/new', methods=['POST'])\ndef create():\n form = RegistrationForm()\n if form.validate() is False:\n return render_template('scholar/new.html', form=form)\n else:\n article = Article()\n article.title = form.title.data\n article.theme = form.theme.data\n article.abstract = form.abstract.data\n article.postage_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n article.approval_status = 0\n\n author_input_list = form.authors.data.split(',')\n for author_input in author_input_list:\n article.authors.append(AuthorScholar(author_input))\n\n keyword_input_list = form.keywords.data.split(',')\n for keyword_input in keyword_input_list:\n keyword = KeyWord.query.filter_by(name=keyword_input).first()\n\n if not keyword:\n article.keywords.append(KeyWord(keyword_input))\n else:\n article.keywords.append(keyword)\n\n db.session.add(article)\n db.session.flush()\n\n file_path = app.config['UPLOAD_FOLDER'] + request.form.get('csrf_token')\n\n file_name = [file for file in os.listdir(file_path)][0]\n\n upload_helper.upload_s3_file(\n os.path.join(file_path, file_name),\n 'dataviva',\n os.path.join('scholar/article/', str(article.id)),\n {\n 'ContentType': \"application/pdf\",\n 'ContentDisposition': 'attachment; filename=dataviva-article-' + str(article.id) + '.pdf'\n }\n )\n\n shutil.rmtree(file_path)\n\n db.session.commit()\n\n message = u'Muito obrigado! Seu estudo foi submetido com sucesso e será analisado pela equipe do DataViva. \\\n Em até 15 dias você receberá um retorno sobre sua publicação no site!'\n flash(message, 'success')\n return redirect(url_for('scholar.index'))\n\n\n@mod.route('/admin/article//edit', methods=['GET'])\ndef edit(id):\n form = RegistrationForm()\n article = Article.query.filter_by(id=id).first_or_404()\n form.title.data = article.title\n form.theme.data = article.theme\n form.authors.data = article.authors_str()\n form.keywords.data = article.keywords_str()\n form.abstract.data = article.abstract\n\n return render_template('scholar/edit.html', form=form, action=url_for('scholar.update', id=id))\n\n\n@mod.route('/admin/article//edit', methods=['POST'])\ndef update(id):\n form = RegistrationForm()\n if form.validate() is False:\n return render_template('scholar/edit.html', form=form)\n else:\n article = Article.query.filter_by(id=id).first_or_404()\n article.title = form.title.data\n article.theme = form.theme.data\n article.abstract = form.abstract.data\n article.postage_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n article.authors = []\n article.keywords = []\n\n author_input_list = form.authors.data.split(',')\n for author_input in author_input_list:\n article.authors.append(AuthorScholar(author_input))\n\n keyword_input_list = form.keywords.data.split(',')\n for keyword_input in keyword_input_list:\n keyword = KeyWord.query.filter_by(name=keyword_input).first()\n\n if not keyword:\n article.keywords.append(KeyWord(keyword_input))\n else:\n article.keywords.append(keyword)\n\n db.session.commit()\n\n message = u'Estudo editado com sucesso!'\n flash(message, 'success')\n return redirect(url_for('scholar.index'))\n\n\n@mod.route('/admin/article/delete', methods=['POST'])\ndef admin_delete():\n ids = request.form.getlist('ids[]')\n if ids:\n articles = Article.query.filter(Article.id.in_(ids)).all()\n for article in articles:\n db.session.delete(article)\n\n db.session.commit()\n return u\"Artigo(s) excluído(s) com sucesso!\", 200\n else:\n return u'Selecione algum artigo para excluí-lo.', 205\n\n\n@mod.route('/articles/all', methods=['GET'])\ndef all():\n result = Article.query.all()\n articles = []\n for row in result:\n articles += [(row.id, row.title, row.authors_str(),\n row.postage_date.strftime('%d/%m/%Y'), row.approval_status)]\n return jsonify(articles=articles)\n\n\n@mod.route('/admin/article/upload', methods=['GET', 'POST'])\n@mod.route('/admin/article//upload', methods=['GET', 'POST'])\ndef upload(id=None):\n file_path = app.config['UPLOAD_FOLDER'] + request.form.get('csrf_token')\n\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n if request.method == 'POST':\n file = request.files['file']\n\n if file:\n filename = secure_filename(file.filename)\n filename = gen_file_name(filename)\n mimetype = file.content_type\n\n if not allowed_file(file.filename):\n result = UploadFile(name=filename, type=mimetype, size=0, not_allowed_msg=\"Filetype not allowed\")\n\n else:\n # save file to disk\n uploaded_file_path = os.path.join(file_path, filename)\n file.save(uploaded_file_path)\n\n # get file size after saving\n size = os.path.getsize(uploaded_file_path)\n\n # return json for js call back\n result = UploadFile(name=filename, type=mimetype, size=size)\n\n return simplejson.dumps({\"files\": [result.get_file()]})\n\n if request.method == 'GET':\n # get all file in ./data directory\n files = [f for f in os.listdir(file_path) if os.path.isfile(\n os.path.join(file_path, f)) and f not in IGNORED_FILES]\n\n file_display = []\n\n for f in files:\n size = os.path.getsize(os.path.join(file_path, f))\n file_saved = UploadFile(name=f, size=size)\n file_display.append(file_saved.get_file())\n\n return simplejson.dumps({\"files\": file_display})\n\n return redirect(url_for('scholar.index'))\n\n\n@mod.route(\"/delete/\", methods=['DELETE'])\ndef delete(filename):\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n\n if os.path.exists(file_path):\n try:\n os.remove(file_path)\n return simplejson.dumps({filename: 'True'})\n except:\n return simplejson.dumps({filename: 'False'})\n\n\n# serve static files\n@mod.route(\"/data/\", methods=['GET'])\ndef get_file(filename):\n return send_from_directory(os.path.join(app.config['UPLOAD_FOLDER']), filename=filename)\n","sub_path":"dataviva/apps/scholar/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"387221467","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 7 10:09:46 2019\n\n@author: gnasses\n\n**Convert CSV list of devices to ansible hostname file**\n (Created to create ansible inventory files from a TVM Vulnerability group)\n 1. Open Vuln Group\n 2. Right click on Configuration item\n 3. Select Export, then Excel (large file will have to be emailed)\n 4. Open Excel file, filter out any servers or unwanted devices, delete all rows and columns except CI names\n 5. Save as CSV\n 6. Run script and enter source CSV filename and filename for Ansible inventory\n\"\"\"\nimport csv\nfrom netmiko import Netmiko\nimport util\n#util.py located on ns python github\n\n#prompt user for source and destination file\nsourcefile = input(\"Enter the Name of the CSV file to convert to Ansible Inventory: \")\ndstfile = input(\"Enter the Name of the destination Inventory: \")\nprint ()\nprint (\"-- Working on CSV File -- \")\n#open CSV file and generate list\nwith open(sourcefile, 'r') as f:\n reader = csv.reader(f)\n devlist = list(reader)\n print (devlist)\nprint (\"csv file processed\")\n#close out the file\nf.close() \n\nprint (\"-- Filtering Duplicates -- \")\n#filter out non-unique entries using a new list\ndevlist1 = []\nfor dev in devlist:\n dev = (str(dev)[2:-2])\n# print (dev)\n if dev not in devlist1:\n devlist1.append(dev) \ninv = []\nprint (\"duplicates filtered\")\n\"\"\"\nCode below use try/except/finally blocks to be sure to close router connections and revert lists\nand also continue the loops in case the script has a problem.\nLeverages the Netmiko function, and RO automation user in the utils.py, and the n9kswitch1 router to ping/resolve. \nPlease adapt to other routers or hosts as appropriate. \n\"\"\"\nprint(\"-- Collecting Device IP Addresses -- \") \n\ntry:\n n9kswitch1 = util.CiscoDeviceRO(host=\"cisctc01ipt01\")\n net_connect = Netmiko(**cisctc01ipt01.__dict__)\n\n for dev in devlist1:\n try:\n print (dev)\n ping1 = net_connect.send_command('ping ' + dev)\n if \"%\" not in ping1:\n ip = ping1.splitlines()[1].split()[6][:-1]\n else:\n ip = ()\n print (\"couldn't ping or couldn't resolve name!\")\n if ip:\n invline = (dev + \" ansible_host=\" + ip)\n inv.append(invline)\n print (\"ok\")\n net_connect.disconnect()\n except:\n print (\"unable to collect device IP\")\n net_connect.disconnect()\n print (\"inv generated\")\nexcept:\n print (\"it broke making inventory\")\nfinally:\n net_connect.disconnect()\n devlist = []\n devlist1 = []\nprint (\"addresses collected\")\nprint(\"-- Connecting to devices to sort by Network OS -- \") \n# sort this list into NXOS and non-ios devices\nnxos_inv = []\nios_inv = []\nunknown_inv = []\n\"\"\"\nNested Try/Except blocks are used here to keep the loops running if a host has a problem or timeout\nto print a messaage to the screen so you could investigate if you were watching as it ran. \n\"\"\"\ntry: \n for invline in inv:\n inv_host = invline.split()[0]\n print (inv_host)\n try:\n device = util.CiscoDeviceRO(host=inv_host)\n net_connect = Netmiko(**device.__dict__)\n ver = net_connect.send_command('show version')\n except:\n print (\"could not connect to device or run show version\")\n try: \n if \"NX-OS\" in ver:\n nxos_inv.append(invline)\n print (\"nxos device\")\n elif \"IOS\" in ver:\n ios_inv.append(invline)\n print (\"ios device\")\n elif \"ios\" in ver:\n ios_inv.append(invline)\n print (\"ios device\")\n else:\n# sw_ver = \"unknown\"\n unknown_inv.append(invline) \n print (\"neither nxos, nor ios\")\n except:\n print (\"could not sort, device not added to Inventory\")\n print (\"inv sorted by OS\")\nexcept:\n print (\"it broke checking Network OS\")\nfinally:\n net_connect.disconnect() \n \n# write line to output file and reset vars\noutF = open(dstfile, 'w') \noutF.write(\"[NXOS]\")\noutF.write(\"\\n\") \nfor item in nxos_inv:\n outF.write(item)\n outF.write(\"\\n\")\noutF.write(\"\\n\") \noutF.write(\"[IOS]\")\noutF.write(\"\\n\") \nfor item in ios_inv:\n outF.write(item)\n outF.write(\"\\n\")\noutF.write(\"\\n\") \noutF.write(\"[UNKNOWN]\")\noutF.write(\"\\n\") \nfor item in unknown_inv:\n outF.write(item)\n outF.write(\"\\n\")\noutF.write(\"\\n\") \noutF.close()\nprint (\" ** Inventory file created successfully ** \")\ninv = []\nnxos_inv = []\nios_inv = []\nunknown_inv = []\n","sub_path":"csv_to_ansible_inv.py","file_name":"csv_to_ansible_inv.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"482195650","text":"\"\"\"\n思路:pow(x,n) = pow(x*x,n/2)\n\"\"\"\n\nclass Solution(object):\n def myPow(self, x, n):\n \"\"\"\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n if n==0:\n return 1.0\n if n<0:\n n,x=-n,1.0/x\n if n%2:\n return x*self.myPow(x*x,n/2)\n else:\n return self.myPow(x*x,n/2)\n \n","sub_path":"medium/50.Pow.py","file_name":"50.Pow.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"225624352","text":"import tensorflow as tf\n\nif __name__ == \"__main__\":\n node_1 = tf.constant(3.0)\n node_2 = tf.constant(5.0)\n print(node_1,node_2)\n node_3 = tf.add(node_1,node_2)\n\n a = tf.placeholder(dtype=tf.float32,shape=[3,],name=\"a\")\n b = tf.placeholder(dtype=tf.float32,shape=[3,],name=\"a\")\n a_mul_b = tf.multiply(a,b)\n print(a)\n print(a_mul_b)\n W1 = tf.Variable(initial_value=[3.0],dtype=tf.float32)\n b1 = tf.Variable(initial_value=[-1],dtype=tf.float32)\n x = tf.placeholder(dtype=tf.float32)\n linear_model = W1 * x + b1\n input_dic = {a:[3,4,5],b:[5,4,3]}\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n print(sess.run(node_3))\n print(sess.run(fetches=a_mul_b,feed_dict=input_dic))\n print(sess.run(fetches=linear_model,feed_dict={x:[5.0,4.0]}))","sub_path":"get_started.py","file_name":"get_started.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"228429166","text":"import json\nimport subprocess\nfrom typing import Optional, cast\n\nfrom packaging.version import Version\n\nfrom .compat import TypedDict, resource_path, shlex_join\nfrom .utils import log_error, log_warning\n\nEnvInfo = TypedDict(\n \"EnvInfo\",\n {\n \"in_virtualenv\": Optional[bool],\n \"include_system_site_packages\": Optional[bool],\n \"has_pkg_resources\": Optional[bool],\n \"has_importlib_metadata\": Optional[bool],\n \"pip_version\": Optional[str],\n \"setuptools_version\": Optional[str],\n \"wheel_version\": Optional[str],\n },\n total=False,\n)\n\n\ndef _get_env_info(python: str) -> EnvInfo:\n with resource_path(\"pip_deepfreeze\", \"env_info_json.py\") as env_info_json_script:\n try:\n env_info_json = subprocess.check_output(\n [python, str(env_info_json_script)], universal_newlines=True\n )\n except subprocess.CalledProcessError:\n return EnvInfo(in_virtualenv=False)\n else:\n return cast(EnvInfo, json.loads(env_info_json))\n\n\ndef check_env(python: str) -> bool:\n env_info = _get_env_info(python)\n if not env_info.get(\"in_virtualenv\"):\n log_error(\n f\"{python} is not in a virtualenv, refusing to start. \"\n f\"See https://github.com/sbidoul/pip-deepfreeze/issues/47 \"\n f\"for hints and discussion.\"\n )\n return False\n if env_info.get(\"include_system_site_packages\"):\n log_error(\n f\"{python} is in a virtualenv that includes system site packages, \"\n f\"refusing to start.\"\n )\n return False\n if not env_info.get(\"has_pkg_resources\"):\n setuptools_install_cmd = shlex_join(\n [python, \"-m\", \"pip\", \"install\", \"setuptools\"]\n )\n log_error(\n f\"pkg_resources is not available to {python}. It is currently \"\n f\"required by pip-deepfreeze. \"\n f\"You can install it with {setuptools_install_cmd}.\"\n )\n return False\n pip_version = env_info.get(\"pip_version\")\n if not pip_version:\n log_error(f\"pip is not available to {python}. Please install it.\")\n return False\n if Version(pip_version) < Version(\"20.1\"):\n pip_install_cmd = shlex_join([python, \"-m\", \"pip\", \"install\", \"pip>=20.1\"])\n log_warning(\n f\"pip-deepfreeze works best with pip>=20.1, \"\n f\"in particular if you use direct URL references. \"\n f\"You can upgrade pip it with {pip_install_cmd}.\"\n )\n if not env_info.get(\"wheel_version\"):\n wheel_install_cmd = shlex_join([python, \"-m\", \"pip\", \"install\", \"wheel\"])\n log_warning(\n f\"wheel is not available to {python}. \"\n f\"pip currently works best when the wheel package is installed, \"\n f\"in particular if you use direct URL references. \"\n f\"You can install it with {wheel_install_cmd}.\"\n )\n return True\n","sub_path":"src/pip_deepfreeze/sanity.py","file_name":"sanity.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419534661","text":"import numpy as np\nfrom numba import guvectorize\nimport matplotlib.pyplot as plt\n\n\n@guvectorize([\"void(float32[:], int32, int32, float32[:])\",\n \"void(float64[:], int32, int32, float64[:])\",\n \"void(int32[:], int32, int32, int32[:])\",\n \"void(int64[:], int32, int32, int64[:])\"],\n \"(n),(),()->(n)\", forceobj=True, cache=True)\ndef trap_filter(wf_in, rise, flat, wf_out):\n \"\"\"\n Symmetric trapezoidal filter\n \"\"\"\n wf_out[:] = wf_in[:]\n wf_out[rise:] -= wf_in[:-rise]\n wf_out[rise+flat:] -= wf_in[:-(rise+flat)]\n wf_out[2*rise+flat:] += wf_in[:-(2*rise+flat)]\n wf_out = np.cumsum(wf_out, out=wf_out, axis=0)\n\n\n\n #Plot every line\n","sub_path":"pygama/dsp/_processors/trap_filter.py","file_name":"trap_filter.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"88407438","text":"#!/usr/bin/env python\n#\n# Copyright 2020 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\"\"\"Helps generate enums.xml from ProductionSupportedFlagList.\n\nThis is only a best-effort attempt to generate enums.xml values for the\nLoginCustomFlags enum. You need to verify this script picks the right string\nvalue for the new features and double check the hash value by running\n\"AboutFlagsHistogramTest.*\".\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport re\nimport hashlib\nimport ctypes\nimport xml.etree.ElementTree as ET\nimport logging\nimport sys\n\n_CHROMIUM_SRC = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)\nsys.path.append(os.path.join(_CHROMIUM_SRC, 'third_party', 'catapult', 'devil'))\nfrom devil.utils import logging_common # pylint: disable=wrong-import-position\n\n_FLAG_LIST_FILE = os.path.join(_CHROMIUM_SRC, 'android_webview', 'java', 'src',\n 'org', 'chromium', 'android_webview', 'common',\n 'ProductionSupportedFlagList.java')\n_ENUMS_XML_FILE = os.path.join(_CHROMIUM_SRC, 'tools', 'metrics', 'histograms',\n 'enums.xml')\n\n# This script tries to guess the commandline switch/base::Feature name from the\n# generated Java constant (assuming the constant name follows typical\n# conventions), but sometimes the script generates the incorrect name.\n# In this case, you can teach the\n# script the right name is by editing this dictionary. The perk of editing\n# here instead of fixing enums.xml by hand is this script *should* generate the\n# correct hash value once you add the right name, so you can just rerun the\n# script to get the correct set of enum entries.\n#\n# Keys are the names autogenerated by this script's logic, values are the\n# base::Feature/switch string names as they would appear in Java/C++ code.\nKNOWN_MISTAKES = {\n # 'AutogeneratedName': 'CorrectName',\n 'WebViewAccelerateSmallCanvases': 'WebviewAccelerateSmallCanvases',\n 'EnableSharedImageForWebView': 'EnableSharedImageForWebview',\n}\n\n\ndef GetSwitchId(label):\n \"\"\"Generate a hash consistent with flags_ui::GetSwitchUMAId().\"\"\"\n digest = hashlib.md5(label).hexdigest()\n first_eight_bytes = digest[:16]\n long_value = int(first_eight_bytes, 16)\n signed_32bit = ctypes.c_int(long_value).value\n return signed_32bit\n\n\ndef _Capitalize(value):\n value = value[0].upper() + value[1:].lower()\n if value == 'Webview':\n value = 'WebView'\n return value\n\n\ndef FormatName(name, convert_to_pascal_case):\n \"\"\"Converts name to the correct format.\n\n If name is shouty-case (ex. 'SOME_NAME') like a Java constant, then:\n * it converts to pascal case (camel case, with the first letter capitalized)\n if convert_to_pascal_case == True (ex. 'SomeName')\n * it converts to hyphenates name and converts to lower case (ex.\n 'some-name')\n raises\n ValueError if name contains quotation marks like a Java literal (ex.\n '\"SomeName\"')\n \"\"\"\n has_quotes_re = re.compile(r'\".*\"')\n if has_quotes_re.match(name):\n raise ValueError('String literals are not supported (got {})'.format(name))\n name = re.sub(r'^[^.]+\\.', '', name)\n sections = name.split('_')\n\n if convert_to_pascal_case:\n sections = [_Capitalize(section) for section in sections]\n return ''.join(sections)\n\n sections = [section.lower() for section in sections]\n return '-'.join(sections)\n\n\ndef ConvertNameIfNecessary(name):\n \"\"\"Fixes any names which are known to be autogenerated incorrectly.\"\"\"\n if name in KNOWN_MISTAKES.keys():\n return KNOWN_MISTAKES.get(name)\n return name\n\n\nclass Flag(object):\n \"\"\"Simplified python equivalent of the Flag java class.\n\n See //android_webview/java/src/org/chromium/android_webview/common/Flag.java\n \"\"\"\n\n def __init__(self, name, is_base_feature):\n self.name = name\n self.is_base_feature = is_base_feature\n\n\nclass EnumValue(object):\n def __init__(self, label):\n self.label = label\n self.value = GetSwitchId(label)\n\n def ToXml(self):\n return ''.format(value=self.value,\n label=self.label)\n\n\ndef _GetExistingFlagLabels():\n with open(_ENUMS_XML_FILE) as f:\n root = ET.fromstring(f.read())\n all_enums = root.find('enums')\n login_custom_flags = all_enums.find('enum[@name=\"LoginCustomFlags\"]')\n return [item.get('label') for item in login_custom_flags]\n\n\ndef _RemoveDuplicates(enums, existing_labels):\n return [enum for enum in enums if enum.label not in existing_labels]\n\n\ndef ExtractFlagsFromJavaLines(lines):\n flags = []\n\n hanging_name_re = re.compile(\n r'(?:\\s*Flag\\.(?:baseFeature|commandLine)\\()?(\\S+),')\n pending_feature = False\n pending_commandline = False\n\n for line in lines:\n if 'baseFeature(' in line:\n pending_feature = True\n if 'commandLine(' in line:\n pending_commandline = True\n\n if pending_feature and pending_commandline:\n raise RuntimeError('Somehow this is both a baseFeature and commandLine '\n 'switch: ({})'.format(line))\n\n # This means we saw Flag.baseFeature() or Flag.commandLine() on this or a\n # previous line but haven't found that flag's name yet. Check if we can\n # find a name in this line.\n if pending_feature or pending_commandline:\n m = hanging_name_re.search(line)\n if m:\n name = m.group(1)\n try:\n formatted_name = FormatName(name, pending_feature)\n formatted_name = ConvertNameIfNecessary(formatted_name)\n flags.append(Flag(formatted_name, pending_feature))\n pending_feature = False\n pending_commandline = False\n except ValueError:\n logging.warning('String literals are not supported, skipping %s',\n name)\n return flags\n\n\ndef _GetMissingWebViewEnums():\n with open(_FLAG_LIST_FILE, 'r') as f:\n lines = f.readlines()\n flags = ExtractFlagsFromJavaLines(lines)\n\n enums = []\n for flag in flags:\n if flag.is_base_feature:\n enums.append(EnumValue(flag.name + ':enabled'))\n enums.append(EnumValue(flag.name + ':disabled'))\n else:\n enums.append(EnumValue(flag.name))\n\n existing_labels = set(_GetExistingFlagLabels())\n enums_to_add = _RemoveDuplicates(enums, existing_labels)\n return enums_to_add\n\n\ndef CheckMissingWebViewEnums(input_api, output_api):\n \"\"\"A presubmit check to find missing flag enums.\"\"\"\n sources = input_api.AffectedSourceFiles(\n lambda affected_file: input_api.FilterSourceFile(\n affected_file,\n files_to_check=(r'.*\\bProductionSupportedFlagList\\.java$', )))\n if not sources:\n return []\n\n enums_to_add = _GetMissingWebViewEnums()\n if not enums_to_add:\n return []\n\n script_path = '//android_webview/tools/PRESUBMIT.py'\n enums_path = '//tools/metrics/histograms/enums.xml'\n xml_strs = sorted([' ' + enum.ToXml() for enum in enums_to_add])\n\n return [\n output_api.PresubmitPromptWarning(\"\"\"\nIt looks like new flags have been added to ProductionSupportedFlagList but the\nlabels still need to be added to LoginCustomFlags enum in {enums_path}.\nIf you believe this\nwarning is correct, please update enums.xml by pasting the following lines under\nLoginCustomFlags and running `git-cl format` to correctly sort the changes:\n\n{xml_strs}\n\nYou can run this check again by running the {script_path} tool.\n\nIf you believe this warning is a false positive, you can silence this warning by\nupdating KNOWN_MISTAKES in {script_path}.\n\"\"\".format(xml_strs='\\n'.join(xml_strs),\n enums_path=enums_path,\n script_path=script_path))\n ]\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n logging_common.AddLoggingArguments(parser)\n args = parser.parse_args()\n logging_common.InitializeLogging(args)\n\n enums_to_add = _GetMissingWebViewEnums()\n\n xml_strs = sorted([' ' + enum.ToXml() for enum in enums_to_add])\n if not xml_strs:\n print('enums.xml is already up-to-date!')\n return\n\n message = \"\"\"\\\nThis is a best-effort attempt to generate missing enums.xml entries. Please\ndouble-check this picked the correct labels for your new features (labels are\ncase-sensitive!), add these to enums.xml, run `git-cl format`, and then follow\nthese steps as a final check:\n\nhttps://chromium.googlesource.com/chromium/src/+/master/tools/metrics/histograms/README.md#flag-histograms\n\nIf any labels were generated incorrectly, please edit this script and change\nKNOWN_MISTAKES.\n\"\"\"\n print(message)\n\n for xml_str in xml_strs:\n print(xml_str)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"android_webview/tools/generate_flag_labels.py","file_name":"generate_flag_labels.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"186782669","text":"from django import forms\n\nfrom warehouse.models import Warehouse, BoardGameContainer\n\n\nclass WarehouseForm(forms.ModelForm):\n class Meta:\n model = Warehouse\n fields = ('name', 'desc')\n labels = {'name': 'nazwa', 'desc': 'opis'}\n placeholders = {'name': 'dodaj nazwę', 'desc': ''}\n widgets = {'name': forms.TextInput({'class': 'textinputclass'}),\n 'desc': forms.Textarea()}\n\n\nclass BoardGameContainerForm(forms.ModelForm):\n class Meta:\n model = BoardGameContainer\n fields = ('warehouse', 'commodity', 'total')\n labels = {'warehouse': 'magazyn',\n 'commodity': 'tytuł',\n 'total': 'liczba egzemplarzy'}\n","sub_path":"warehouse/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"213225362","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nfrom xoloitzcuintle import *\n\nclear()\nderechos()\n\nclass Parsero(object):\n if os.name == \"posix\":\n PROMPT = chr(27)+\"[1;32m\"+\"Asquelito$ \"+chr(27)+\"[0;37m\"\n ALERTA = chr(27)+\"[0;91m\"+\"Error: La expresión está mal denotada.\"+chr(27)+\"[0;37m\"+\"\\n\"\n else:\n PROMPT = \"Asquelito$ \"\n ALERTA = \"Error: La expresión está mal denotada.\\n\"\n\n def asquelito(self):\n while True:\n try:\n resultado = evaluar(self.PROMPT)\n if os.name == \"posix\":\n print(chr(27)+\"[1;37m\"\"{}\\n\".format(resultado)+chr(27)+\"[0;37m\")\n else:\n print(\"{}\\n\".format(resultado))\n except EOFError:\n break\n except:\n print(self.ALERTA)\n\nif __name__ == \"__main__\":\n Parsero().asquelito()\n","sub_path":"src/asquel.py","file_name":"asquel.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"455520951","text":"from twarc import Twarc2, expansions\nimport json\n\n# Replace your bearer token below\nclient = Twarc2(bearer_token=\"XXXXX\")\n\n\ndef main():\n # List of Tweet IDs you want to lookup\n tweet_ids = ['1404192093803741184', '1403738886275096605', '1397216898593525762']\n # The tweet_lookup function allows\n lookup = client.tweet_lookup(tweet_ids=tweet_ids)\n for page in lookup:\n # The Twitter API v2 returns the Tweet information and the user, media etc. separately\n # so we use expansions.flatten to get all the information in a single JSON\n result = expansions.flatten(page)\n for tweet in result:\n # Here we are printing the full Tweet object JSON to the console\n print(json.dumps(tweet))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"labs-code/python/standard-product-track/tweets_lookup.py","file_name":"tweets_lookup.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"239291999","text":"#!/usr/bin/env python\n#coding=utf-8\n\nimport MySQLdb\n\n#打开数据库连接\ndb = MySQLdb.connect(\"localhost\",\"testuser\",\"test123!\",\"TESTDB\",charset=\"utf8\")\n\n#使用cursor方法获取操作游标\ncursor=db.cursor()\n\n#如果数据库表已经存在使用execute()方法删除表\ncursor.execute(\"DROP TABLE IF EXISTS EMPLOYEE\")\n\n#创建数据表SQL语句\nsql = \"\"\"CREATE TABLE EMPLOYEE (\n\tFIRST_NAME CHAR(20) NOT NULL,\n\tLAST_NAME CHAR(20),\n\tAGE INT,\n\tSEX CHAR(1),\n\tINCOME FLOAT)\"\"\"\n\ncursor.execute(sql)\n\n#关闭数据库连接\ndb.close()\n","sub_path":"mysql_createtable.py","file_name":"mysql_createtable.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"47460516","text":"import sys\ntry:\n with open('file.txt', 'a') as fh:\n file = fh.read()\n print(file)\n\nexcept FileNotFoundError:\n print('Data is missing')\n\nexcept PermissionError:\n print('This is not allowed')\n\nexcept Exception as err:\n print('Some other error occured:', str(err))\n\ntry:\n 1/0\nexcept:\n err = sys.exc_info()\n for e in err:\n print(e)\n\n","sub_path":"myfirstwebapp/try_test.py","file_name":"try_test.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"634634094","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ups', '0010_auto_20150101_1033'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='custorderqueryrow',\n name='headers',\n field=models.TextField(default=b''),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='custorderqueryrow',\n name='purchaseDate',\n field=models.DateTimeField(default=datetime.datetime(2015, 1, 1, 15, 50, 2, 264700, tzinfo=utc), blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='pickticket',\n name='DOC_DATE',\n field=models.CharField(default=b'01/01/15 15:50:02', max_length=17),\n preserve_default=True,\n ),\n ]\n","sub_path":"ups/migrations/0011_auto_20150101_1050.py","file_name":"0011_auto_20150101_1050.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"653305936","text":"# -*- coding: utf-8 -*-\n#---------------------------------------------------------------------------\n# GrapheneSaveDialog.py\n# Version 0.1, October, 2011\n#\n# Part of the single-wall graphene builder.\n# Gets PDF and PSF files names.\n\"\"\"\n Copyright 2011, 2012: José O. Sotero Esteva, Melissa López Serrano, \n\n Computational Science Group, Department of Mathematics, \n University of Puerto Rico at Humacao \n .\n\n (On last names: Most hispanic people, Puerto Ricans included, use two surnames; \n one from the father and one from the mother. We have separated first names from \n surnames with two spaces.)\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License version 3 as published by\n the Free Software Foundation.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program (gpl.txt). If not, see .\n\n Acknowledgements: The main funding source for this project has been provided\n by the UPR-Penn Partnership for Research and Education in Materials program, \n USA National Science Foundation grant number DMR-0934195. \n\"\"\"\n\nimport os, sys\nfrom PyQt4 import QtGui, QtCore\nfrom ui_grapheneSaveDialog import Ui_GrapheneSaveDialog\n\n# find CNT class and import\nsys.path.append(os.path.dirname(os.path.realpath(__file__))+'/../../conf')\nfrom Wolffia_conf import NANOCAD_MOLECULES\n\nfrom Graphene import Graphene\n\n\nclass GrapheneSaveDialog(QtGui.QDialog):\n\t# Class Fields:\n\t# ui: stores reference tu user interface\n\t# pdbFile: string, PDF filename\n\t# graphene: Mixture, a Graphene\n\n\tdef __init__(self,graphene,files=None, parent=None):\n\t\tsuper(GrapheneSaveDialog, self).__init__(parent, modal = 1)\n\n\t\tself.ui = Ui_GrapheneSaveDialog()\n\t\tself.ui.setupUi(self)\n\n\t\tif files != None:\n\t\t\tself.ui.pdbFilename.setText(files[0])\n\t\t\tself.ui.psfFilename.setText(files[1])\n\t\tself.graphene = graphene\n\n\tdef getFileNames(self):\n\t\tif self.ui.pdbFilename.text() == '':\n\t\t\treturn None\n\t\telse:\n\t\t\treturn [str(self.ui.pdbFilename.text()), str(self.ui.psfFilename.text())]\n\n\t@QtCore.pyqtSlot()\n\tdef on_okButton_pressed(self):\n\t\tpdbFile = str(self.ui.pdbFilename.text())\n\t\tif pdbFile == '':\n\t\t\tQtGui.QMessageBox.warning(self,\n \"Error\",\n \"You must enter at least a PDF filename.\")\n\t\telse:\n\t\t\t#print \"CNT(\", self.n, \", \", self.m, \", \", self.grapheneLength \n\t\t\t#graphene = Graphene(self.n, self.m, self.grapheneLength)\n\t\t\tself.graphene.writePDB(pdbFile)\n\t\t\tpsfFile = str(self.ui.psfFilename.text())\n\t\t\tif psfFile != '':\n\t\t\t\tself.graphene.writePSF(psfFile)\n\t\t\tself.close()\n\n\t@QtCore.pyqtSlot()\n\tdef on_browsePDBButton_pressed(self):\n\t\tfn = str(QtGui.QFileDialog.getSaveFileName(self, 'Save file', os.getcwd(),\"PDB (*.pdb *.PDB)\"))\n\t\tif fn.find(\".pdb\") != len(fn) - 4 and fn.find(\".PDB\") != len(fn) - 4 :\n\t\t\tfn += \".pdb\"\n\t\tself.ui.pdbFilename.setText(fn)\n\n\t@QtCore.pyqtSlot()\n\tdef on_browsePSFButton_pressed(self):\n\t\tfn = str(QtGui.QFileDialog.getSaveFileName(self, 'Save file', os.getcwd(),\"PSF (*.psf *.PSF)\"))\n\t\tif fn.find(\".psf\") != len(fn) - 4 and fn.find(\".PSF\") != len(fn) - 4 :\n\t\t\tfn += \".psf\"\n\t\tself.ui.psfFilename.setText(fn)\n\n\t@QtCore.pyqtSlot()\n\tdef on_cancelButton_pressed(self):\n\t\t#self.ui.pdbFilename.setText('')\n\t\t#self.ui.psfFilename.setText('')\n\t\tself.close()\n\n","sub_path":"interface/grapheneEditor/GrapheneSaveDialog.py","file_name":"GrapheneSaveDialog.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"151552155","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport sys\r\nimport glob\r\nimport argparse\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom keras import __version__\r\n\r\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\r\n# from keras.applications.inception_v3_matt import InceptionV3, preprocess_input\r\n\r\nfrom keras.models import Model\r\nfrom keras.layers import Dense, GlobalAveragePooling2D\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.optimizers import SGD\r\nfrom PIL import ImageFile\r\nImageFile.LOAD_TRUNCATED_IMAGES = True\r\n\r\n\r\n\r\n\r\ndef get_nb_files(directory):\r\n \"\"\"Get number of files by searching directory recursively\"\"\"\r\n if not os.path.exists(directory):\r\n return 0\r\n cnt = 0\r\n for r, dirs, files in os.walk(directory):\r\n for dr in dirs:\r\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\")))\r\n return cnt\r\n# get_nb_files('/home/ubuntu/keras/animal5/train')\r\n\r\n# 数据准备\r\nIM_WIDTH, IM_HEIGHT = 299, 299 #InceptionV3指定的图片尺寸\r\nFC_SIZE = 1024 # 全连接层的节点个数\r\nNB_IV3_LAYERS_TO_FREEZE = 172 # 冻结层的数量\r\n\r\n\r\n#train_dir = 'C:\\Sunaoxue\\IT_Project/NSM/Data/Train' # 训练集数据\r\n#val_dir = 'C:\\Sunaoxue\\IT_Project/NSM/Data/Valid' # 验证集数据\r\ntrain_dir = '/root/DD/NSM/Train_0530' # 训练集数据\r\nval_dir = '/root/DD/NSM/Valid_0530' # 验证集数据\r\nnb_classes= 25\r\nnb_epoch = 50\r\nbatch_size = 32\r\n\r\nnb_train_samples = get_nb_files(train_dir) # 训练样本个数\r\nnb_classes = len(glob.glob(train_dir + \"/*\")) # 分类数\r\nnb_val_samples = get_nb_files(val_dir) #验证集样本个数\r\nnb_epoch = int(nb_epoch) # epoch数量\r\nbatch_size = int(batch_size)\r\n\r\n# 图片生成器\r\ntrain_datagen = ImageDataGenerator(\r\n preprocessing_function=preprocess_input,\r\n rotation_range=30,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True\r\n)\r\ntest_datagen = ImageDataGenerator(\r\n preprocessing_function=preprocess_input,\r\n rotation_range=30,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True\r\n)\r\n\r\n# 训练数据与测试数据\r\ntrain_generator = train_datagen.flow_from_directory(\r\ntrain_dir,\r\ntarget_size=(IM_WIDTH, IM_HEIGHT),\r\nbatch_size=batch_size,class_mode='categorical')\r\n\r\nvalidation_generator = test_datagen.flow_from_directory(\r\nval_dir,\r\ntarget_size=(IM_WIDTH, IM_HEIGHT),\r\nbatch_size=batch_size,class_mode='categorical')\r\n\r\n# 添加新层\r\ndef add_new_last_layer(base_model, nb_classes):\r\n \"\"\"\r\n 添加最后的层\r\n 输入\r\n base_model和分类数量\r\n 输出\r\n 新的keras的model\r\n \"\"\"\r\n x = base_model.output\r\n x = GlobalAveragePooling2D()(x)\r\n x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init\r\n predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer\r\n model = Model(input=base_model.input, output=predictions)\r\n return model\r\n\r\n# 冻上base_model所有层,这样就可以正确获得bottleneck特征\r\ndef setup_to_transfer_learn(model, base_model):\r\n \"\"\"Freeze all layers and compile the model\"\"\"\r\n for layer in base_model.layers:\r\n layer.trainable = False\r\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n# 定义网络框架\r\nbase_model = InceptionV3(weights='imagenet', include_top=False) # 预先要下载no_top模型\r\nmodel = add_new_last_layer(base_model, nb_classes) # 从基本no_top模型上添加新层\r\nsetup_to_transfer_learn(model, base_model) # 冻结base_model所有层\r\n\r\n# 模式一训练\r\nhistory_tl = model.fit_generator(\r\ntrain_generator,\r\nnb_epoch=nb_epoch,\r\nsamples_per_epoch=nb_train_samples,\r\nvalidation_data=validation_generator,\r\nnb_val_samples=nb_val_samples,\r\nclass_weight='auto')\r\n'''\r\n\r\n\r\n# 冻上NB_IV3_LAYERS之前的层\r\ndef setup_to_finetune(model):\r\n \"\"\"Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers.\r\n\r\n note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the inceptionv3 arch\r\n\r\n Args:\r\n model: keras model\r\n \"\"\"\r\n for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:\r\n layer.trainable = False\r\n for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:\r\n layer.trainable = True\r\n model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n# 设置网络结构\r\nsetup_to_finetune(model)\r\n\r\n# 模式二训练\r\nhistory_ft = model.fit_generator(\r\ntrain_generator,\r\nsamples_per_epoch=nb_train_samples,\r\nnb_epoch=nb_epoch,\r\nvalidation_data=validation_generator,\r\nnb_val_samples=nb_val_samples,\r\nclass_weight='auto')\r\n'''\r\n\r\n# 模型保存\r\nmodel.save('Nsm_0602.h5')\r\n","sub_path":"Nsm_test_V3.py","file_name":"Nsm_test_V3.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"488942416","text":"import os\nimport sys\n\ndef fun_one():\n print(\"Hello Program\")\n\ndef fun_two():\n print(\"Second_function\")\n\nmy_list = [1,2,3,4,5,6]\n\nprint(\"Hello\" ,\"hi\")\nx= 3*5 + 23*6\n\ncheck = True\n\nif check == True:\n print(\"my program is True\")\n\nfun_one();\nfun_two();\n\n\n","sub_path":"PEP guidliness.py","file_name":"PEP guidliness.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"578474008","text":"\"\"\"\n@author: Shuyin Chen\n@contact: shuyin_chen@shannonai.com\n\n@version: 1.0\n@file: set seed\n@time: 2019/11/30 14:50\n\"\"\"\n\nimport os\nimport math\nimport time\nimport shutil\nfrom tqdm import tqdm\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import AdamW\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data import RandomSampler, BatchSampler, DataLoader, SequentialSampler\n\nfrom torch.nn.parallel import DistributedDataParallel\nfrom tensorboardX import SummaryWriter\n\nfrom transformers import get_linear_schedule_with_warmup\n\nfrom bert_ner.utils.logger import logger\nfrom bert_ner.dataset_readers.load_data import SequenceLabelingDataset\nfrom bert_ner.utils.grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups\nfrom bert_ner.evaluators.evaluator_old import Evaluator\nfrom bert_ner.losses.loss import ShannonLoss\n\n\nclass Trainer:\n \"\"\"\n Trainer\n \"\"\"\n def __init__(self,\n params: dict,\n model: nn.Module,\n tokenizer: nn.Module):\n logger.info('Initializing Trainer')\n self.params = params\n self.data_dir = params.data_dir\n self.output_dir = params.output_dir\n self.multi_gpu = params.multi_gpu\n self.n_gpu = params.n_gpu\n self.fp16 = params.fp16\n self.do_eval = params.do_eval\n self.is_master = params.is_master\n self.early_stop_patience = params.early_stop_patience\n\n self.model = model\n self.tokenizer = tokenizer # only for saving model\n self.model_config = model.config\n\n self.loss = ShannonLoss(self.params.loss_type)\n\n\n logger.info(f'Loading data from {self.data_dir}')\n train_dataset = self.load_mmap_dataset(evaluate=False)\n if self.do_eval:\n self.evaluator = Evaluator(params, model)\n self.eval_loss_epoch = 0\n self.eval_loss_globel = 1e8\n self.eval_span_f1 = 0.\n self.cur_eval_span_f1 = 0.\n self.patience = 0\n self.best_k = -1\n self.save_best_checkpoints = True\n self.last_k = self.params.last_k_checkpoints if self.params.last_k_checkpoints > 0 else self.params.n_epoch\n self.eval_result_path = os.path.join(self.output_dir, 'eval_result.txt')\n with open(self.eval_result_path, 'w') as eval_result_f:\n eval_result_f.write(\"eval_result: \" + '\\n')\n\n\n if params.n_gpu <= 1:\n # sampler = RandomSampler(train_dataset)\n sampler = SequentialSampler(train_dataset)\n else:\n sampler = DistributedSampler(train_dataset)\n\n if params.group_by_size:\n groups = create_lengths_groups(lengths=train_dataset.lengths, k=512)\n sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.train_batch_size)\n else:\n sampler = BatchSampler(sampler=sampler, batch_size=params.train_batch_size, drop_last=False)\n\n self.train_dataloader = DataLoader(dataset=train_dataset,\n batch_sampler=sampler,\n num_workers=self.params.num_workers)\n\n self.epoch = 0\n self.last_loss = 0\n self.total_loss_epoch = 0\n self.total_loss_globel = 0\n self.n_step_epoch = 0\n self.n_step_globel = 0\n self.n_sequences_epoch = 0\n self.last_log = 0\n\n\n logger.info('--- Initializing model optimizer')\n assert params.gradient_accumulation_steps >= 1\n self.num_steps_epoch = len(self.train_dataloader)\n self.t_total = int(\n self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1\n\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if\n not any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': params.weight_decay},\n {'params': [p for n, p in model.named_parameters() if\n any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0}\n ]\n\n if self.is_master:\n logger.info(\"------ Number of trainable parameters (model): %i\" % sum([p.numel() for p in self.model.parameters() if p.requires_grad]))\n logger.info(\"------ Number of parameters (model): %i\" % sum([p.numel() for p in self.model.parameters()]))\n\n self.optimizer = AdamW(optimizer_grouped_parameters,\n lr=params.learning_rate,\n eps=params.adam_epsilon,\n betas=(0.9, 0.98))\n\n self.warmup_steps = math.ceil(self.t_total * params.warmup_prop)\n self.scheduler = get_linear_schedule_with_warmup(self.optimizer,\n num_warmup_steps=self.warmup_steps,\n num_training_steps=self.t_total)\n\n if self.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n logger.info(f\"Using fp16 training: {self.params.fp16_opt_level} level\")\n self.model, self.optimizer = amp.initialize(self.model,\n self.optimizer,\n opt_level=self.params.fp16_opt_level)\n\n if self.multi_gpu:\n logger.info(\"Using nn.parallel.DistributedDataParallel for distributed training.\")\n self.model = DistributedDataParallel(self.model,\n device_ids=[params.local_rank],\n output_device=params.local_rank,\n find_unused_parameters=True)\n\n if self.is_master:\n logger.info('--- Initializing Tensorboard')\n self.tensorboard = SummaryWriter(log_dir=os.path.join(self.output_dir, 'log', 'train'))\n self.tensorboard.add_text(tag='config/training', text_string=str(self.params), global_step=0)\n self.tensorboard.add_text(tag='config/model', text_string=str(self.model_config), global_step=0)\n\n\n def load_mmap_dataset(self, evaluate=False):\n \"\"\"load_mmap_dataset\"\"\"\n phase = \"dev\" if evaluate else \"train\"\n return SequenceLabelingDataset(directory=os.path.join(self.data_dir, \"bin\"), prefix=phase)\n\n\n def train(self):\n \"\"\"\n The real training loop.\n \"\"\"\n if self.is_master:\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", self.num_steps_epoch)\n logger.info(\" Num Epochs = %d\", self.params.n_epoch)\n logger.info(\" Num GPUs = %d\", self.params.n_gpu)\n logger.info(\" Total warmup steps = %d\", self.warmup_steps)\n logger.info(\" Total optimization steps = %d\", self.t_total)\n logger.info(\" Gradient Accumulation steps = %d\", self.params.gradient_accumulation_steps)\n logger.info(\" Instantaneous batch size per GPU = %d\", self.params.train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n self.params.train_batch_size * self.params.gradient_accumulation_steps * self.params.n_gpu)\n\n self.last_log = time.time()\n self.model.train()\n self.model.zero_grad()\n\n for _ in range(self.params.n_epoch):\n if self.is_master: logger.info(f'--- Starting epoch {self.epoch}/{self.params.n_epoch-1}')\n # if self.multi_gpu:\n # torch.distributed.barrier()\n\n iter_bar = tqdm(self.train_dataloader, desc=\"-Iter\", disable=self.params.local_rank not in [-1, 0])\n for batch in iter_bar:\n if self.params.n_gpu > 0:\n batch = tuple(t.to(f'cuda:{self.params.local_rank}') for t in batch)\n inputs = {'input_ids': batch[0],\n 'labels': batch[1],\n 'label_mask': batch[2],\n 'token_type_ids': batch[4],\n 'attention_mask': batch[3],\n }\n self.step(inputs)\n\n iter_bar.update()\n current_lr = self.scheduler.get_lr()[0]\n iter_bar.set_postfix({'lr': f'{current_lr:.5f}',\n 'loss_cur': f'{self.last_loss:.3f}',\n 'loss_epo': f'{self.total_loss_epoch * self.params.gradient_accumulation_steps / self.n_step_epoch:.3f}',\n 'loss_glo': f'{self.total_loss_globel * self.params.gradient_accumulation_steps / self.n_step_globel:.3f}'})\n iter_bar.close()\n\n if self.is_master:\n logger.info(f'--- Ending epoch {self.epoch} / {self.params.n_epoch-1}')\n self.end_epoch()\n\n if self.early_stop_patience > 0 and self.patience > self.early_stop_patience:\n print(\"training stopped because of early stopping!!!\")\n break\n\n\n def step(self, inputs):\n \"\"\"\n One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation),\n and possibly a parameter update (depending on the gradient accumulation).\n\n Input:\n ------\n input_ids: `torch.tensor(bs, seq_length)` - The token ids.\n labels: `torch.tensor(bs, seq_length)`\n label_mask: `torch.tensor(bs, seq_length)`\n attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention.\n token_type_ids: `torch.tensor(bs, seq_length)`\n \"\"\"\n outputs = self.model(**inputs)\n logits = outputs[0]\n labels = inputs['labels']\n mask = inputs['label_mask']\n\n # todo 注意 若是bert_crf_tagger 那么 loss 需要在 model 内部计算\n loss = self.calculate_loss(logits, labels, mask)\n assert loss.item() >= 0\n\n self.n_sequences_epoch += inputs['input_ids'].size(0)\n self.optimize(loss)\n self.end_step()\n\n\n def calculate_loss(self, logits=None, labels=None, mask=None):\n \"\"\"\n calculate loss using self.loss\n \"\"\"\n if mask is not None:\n loss_mask = (mask == 1).float()\n else:\n loss_mask = None\n loss = self.loss(logits, labels, loss_mask)\n return loss\n\n\n def optimize(self, loss):\n \"\"\"\n Normalization on the loss (gradient accumulation or distributed training), followed by\n backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation).\n Also update the metrics for tensorboard.\n \"\"\"\n # Check for NaN\n if (loss != loss).data.any():\n logger.error('NaN detected')\n exit()\n\n if self.multi_gpu:\n loss = loss.mean()\n if self.params.gradient_accumulation_steps > 1:\n loss = loss / self.params.gradient_accumulation_steps\n\n if self.fp16:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n self.last_loss = loss.item()\n self.total_loss_epoch += loss.item()\n self.total_loss_globel += loss.item()\n\n if (self.n_step_epoch + 1) % self.params.gradient_accumulation_steps == 0:\n if self.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.params.max_grad_norm)\n self.optimizer.step()\n self.optimizer.zero_grad()\n self.scheduler.step()\n\n self.n_step_epoch += 1\n self.n_step_globel += 1\n\n\n def end_step(self):\n \"\"\"\n write to tensorboard and save checkpoint.\n \"\"\"\n if self.params.log_interval > 0 and self.n_step_globel % self.params.log_interval == 0:\n self.log_tensorboard()\n self.last_log = time.time()\n if self.params.checkpoint_interval > 0 and self.n_step_globel % self.params.checkpoint_interval == 0:\n self.save_checkpoint()\n\n\n def log_tensorboard(self):\n \"\"\"\n Log into tensorboard. Only by the master process.\n \"\"\"\n if not self.is_master:\n return\n\n for param_name, param in self.model.named_parameters():\n self.tensorboard.add_scalar(tag='parameter_mean/' + param_name, scalar_value=param.data.mean(), global_step=self.n_step_globel)\n self.tensorboard.add_scalar(tag='parameter_std/' + param_name, scalar_value=param.data.std(), global_step=self.n_step_globel)\n if param.grad is None:\n continue\n self.tensorboard.add_scalar(tag=\"grad_mean/\" + param_name, scalar_value=param.grad.data.mean(), global_step=self.n_step_globel)\n self.tensorboard.add_scalar(tag=\"grad_std/\" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_step_globel)\n\n self.tensorboard.add_scalar(tag=\"losses/cum_avg_loss_epoch\", scalar_value=self.total_loss_epoch / self.n_step_epoch, global_step=self.n_step_globel)\n self.tensorboard.add_scalar(tag=\"losses/loss\", scalar_value=self.last_loss, global_step=self.n_step_globel)\n self.tensorboard.add_scalar(tag=\"learning_rate/lr\", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_step_globel)\n\n\n def make_dirs(self, path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\n def save_checkpoint(self, save_for_best=False):\n \"\"\"\n Save the best and last K checkpoints.\n Only by the master process.\n \"\"\"\n if not self.is_master:\n return\n\n logger.info(f\"******* Saving checkpoints for epoch {self.epoch} *******\")\n model_to_save = self.model.module if hasattr(self.model, 'module') else self.model\n\n if save_for_best == True:\n best_checkpoint_path = os.path.join(self.output_dir, 'checkpoints_best')\n self.make_dirs(best_checkpoint_path)\n model_to_save.save_pretrained(best_checkpoint_path)\n self.tokenizer.save_pretrained(best_checkpoint_path)\n logger.info(\"Saving ***best*** checkpoint to %s\", best_checkpoint_path)\n\n cur_checkpoint_path = os.path.join(self.output_dir, f'checkpoint_{self.epoch}')\n self.make_dirs(cur_checkpoint_path)\n model_to_save.save_pretrained(cur_checkpoint_path)\n self.tokenizer.save_pretrained(cur_checkpoint_path)\n logger.info(\"Saving last checkpoint to %s\", cur_checkpoint_path)\n\n if self.epoch >= self.last_k:\n past_checkpoint_path = os.path.join(self.output_dir, f'checkpoint_{self.epoch-self.last_k}')\n shutil.rmtree(past_checkpoint_path, ignore_errors=True)\n logger.info(\"!!!Removing!!! model checkpoint %s\", past_checkpoint_path)\n\n if self.do_eval:\n with open(self.eval_result_path, 'a') as eval_result_f:\n result = f\"epoch: {self.epoch}\\t\"\n result += f\"loss: {self.eval_loss_epoch}\\t\"\n result += f\"span f1: {self.cur_eval_span_f1}\\t\"\n result += f\"best={self.best_k}\\t\"\n eval_result_f.write(result + '\\n')\n\n if self.epoch == self.params.n_epoch-1:\n eval_result_f.write(f\"\\nbest epoch: {self.best_k}\\t best span f1: {self.eval_span_f1}\\n\")\n\n label_map_path = os.path.join(self.output_dir, 'label_map.txt')\n with open(label_map_path, 'w') as label_map_f:\n for label in self.evaluator.label_list:\n label_map_f.write(label + '\\n')\n\n\n\n def end_epoch(self):\n \"\"\"\n Finally arrived at the end of epoch (full pass on dataset).\n Do some tensorboard logging and checkpoint saving.\n \"\"\"\n if self.is_master:\n logger.info(f'{self.n_sequences_epoch} sequences have been trained during this epoch.')\n\n if self.do_eval:\n save_for_best = False\n print(\"********** Eval start **********\")\n self.eval_loss_epoch, results = self.evaluator.eval()\n self.cur_eval_span_f1 = results['span-f1']\n print(\"********** Eval end **********\")\n\n print(\"cur_eval_span_f1: \", self.cur_eval_span_f1)\n print(\"self.eval_span_f1: \", self.eval_span_f1)\n\n if self.cur_eval_span_f1 > self.eval_span_f1:\n self.patience = 0\n self.eval_span_f1 = self.cur_eval_span_f1\n self.best_k = self.epoch\n save_for_best = True\n\n self.save_checkpoint(save_for_best=save_for_best)\n self.patience += 1\n\n # print(\"eval_loss_epoch: \", self.eval_loss_epoch)\n # print(\"eval_loss_globel: \", self.eval_loss_globel)\n\n # if self.eval_loss_epoch < self.eval_loss_globel:\n # self.eval_loss_globel = self.eval_loss_epoch\n # self.save_checkpoint()\n\n self.tensorboard.add_scalar(tag='epoch/loss', scalar_value=self.total_loss_epoch/self.n_step_epoch, global_step=self.epoch)\n\n self.epoch += 1\n self.n_sequences_epoch = 0\n self.n_step_epoch = 0\n self.total_loss_epoch = 0","sub_path":"bert_ner/trainers/trainer_old.py","file_name":"trainer_old.py","file_ext":"py","file_size_in_byte":17828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"244716148","text":"import shutil\nimport os\n\n\nclass Paste:\n def __init__(self, **kwargs):\n self.dst_dir = kwargs.get('paste_dir', None)\n\n @staticmethod\n def is_valid_path(path):\n if os.path.isdir(path):\n return True\n else:\n return False\n\n @staticmethod\n def is_valid_file(file_name):\n if os.path.isfile(file_name):\n return True\n else:\n return False\n\n def paste_dir(self):\n if self.is_valid_path(self.dst_dir):\n temp = os.path.join(os.getenv('HOMEPATH'), '.tmp')\n shutil.move(temp, self.dst_dir)\n elif self.is_valid_file(self.dst_dir):\n self.paste_file()\n\n def paste_file(self):\n if self.is_valid_file(self.dst_dir):\n temp = os.path.join(os.getenv('HOMEPATH'), '.txt')\n shutil.move(temp, self.dst_dir)\n elif self.is_valid_file(self.dst_dir):\n self.paste_dir()\n","sub_path":"new_paste.py","file_name":"new_paste.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"478741536","text":"import os, shutil, pkg_resources, subprocess, sys\n\ndef parse_fid(f):\n #filename is FILENAME.bam i.e.\n parsed = str(f).split(\".\")\n return parsed[0]\n\n#ensure that all files in starting directory only has one period for the filename's extension, i.e. FILENAME.bam\ndef correct_format(f):\n return str(f).count('.') < 2\n\n#checks that directory exists and mito directory contains the \"steps\" folder\ndef is_valid_directories(directory, tools, refs, steps, softwares):\n if not os.path.isdir(str(directory)):\n raise ValueError('Building the pipeline requires a file/directory to run on')\n if not os.path.isdir(str(refs)) and (\"gatk\" in steps or \"removenumts\" in steps):\n raise ValueError('GATK and RemoveNuMTs steps require a directory for the reference genomes')\n \n#checks that the file format follows our naming convenction\ndef check_file_format(directory):\n for f in os.listdir(directory):\n #ignore hidden files\n if not f.startswith('.') and not correct_format(f) and \"bai\" not in f:\n raise ValueError(\n \"All files saved in user-specified directory must follow the format 'FILENAME.bam' with NO periods allowed in FILENAME\")\n\n#check that all tools required in steps are in the tools directory\ndef check_tools_exist(tools_dir, steps, dependencies):\n for step in steps:\n for dep in dependencies[step]:\n if not found_loc(dep, tools_dir):\n raise ValueError('Can\\'t find ' + dep + ' in ' + tools_dir + \". Please download using -d option or make sure your tools directory has a folder called \" + dep)\n\n#function to check annovar dependencies, can't make it general since the files are specific\ndef is_annovar_downloaded(software, tools_dir):\n return os.path.isfile(tools_dir + \"/\" + software)\n \ndef found_loc(software, tools_dir):\n #if available from command line\n if (software == 'samtools' or software == 'bwa'):\n if shutil.which(software):\n return True\n else:\n raise ValueError(software + \" is not able to be run from the command line. Please refer to documentation on instructions for how to set up \" + software + \" or 'module load' it if your server uses Lmod\")\n elif 'GenomeAnalysisTK' in software:\n return is_downloaded(software, tools_dir + \"/gatk\")\n elif 'snpEff' in software:\n return is_downloaded(software, tools_dir + \"/snpEff\")\n elif 'annovar' in software:\n return is_annovar_downloaded(software, tools_dir + \"/annovar\")\n else:\n return is_downloaded(software, tools_dir + \"/\" + software)\n\ndef get_dir_name(software, dir):\n for name in os.listdir(dir):\n if os.path.isdir(dir + \"/\" + name) and software in name:\n return dir + \"/\" + name\n return None\n\ndef is_exe(fpath):\n return os.path.isfile(fpath)\n # and os.access(fpath, os.X_OK)\n\n#checks if there is an executable called on the path\\\n#software should be the software executable name\ndef is_downloaded(software, dir):\n return is_exe(dir + \"/\" + software)\n\n#creates subdirectories for all the requested steps within the specified output directory\ndef make_subdirectories(output, task_names, steps, slurm):\n #create output folder that holds the mitopipeline output in the tool's directory\n if not os.path.isdir(output):\n os.makedirs(output)\n #TODO: fill in subdirectories for parts within each step\n subdirectories = {'removenumts': ['fastqs', 'pileups', 'numt_removal_stor', 'counts'],\n 'splitgap': [],\n 'clipping': [],\n 'extractmito': [],\n 'downsample': [],\n 'gatk': ['gatk_stor'],\n 'annovar': [],\n 'haplogrep': [],\n 'snpeff': [],\n }\n for step in steps:\n folder_name = 0\n task_folder = output + \"/\" + task_names[step][folder_name]\n if not os.path.isdir(task_folder):\n os.makedirs(task_folder)\n for sub in subdirectories[step]:\n task_subfolder = task_folder + \"/\" + sub\n if not os.path.isdir(task_subfolder):\n os.makedirs(task_subfolder)\n if slurm:\n os.makedirs(output + \"/slurm\")\n\n#returns either all of the softwares after gatk or the latest step before or\ndef get_wrapper_tasks(task_names, steps, softwares):\n folder_name = 0\n tasks = list(task_names[step][folder_name] for step in steps if step in softwares)\n if not tasks:\n for task_name in reversed(list(task_names.keys())):\n #return the latest task that is not a software step\n if task_name not in softwares and task_name in steps:\n #return the name of function in template instead of the step name\n return [task_names[task_name][folder_name]]\n #if snpeff and/or annovar are in tasks\n elif len(tasks) > 1 and \"GATK\" in tasks:\n tasks.remove(\"GATK\")\n return tasks\n \ndef which(file):\n if shutil.which(file):\n return True\n for path in os.environ[\"PATH\"].split(os.pathsep):\n if is_exe(os.path.join(path, file)):\n return True\n return False\n\ndef execute(cmd):\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, \"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, cmd)\n\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits .\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\nclass cd:\n \"\"\"Context manager for changing the current working directory\"\"\"\n\n def __init__(self, newPath):\n self.newPath = os.path.expanduser(newPath)\n\n def __enter__(self):\n self.savedPath = os.getcwd()\n os.chdir(self.newPath)\n\n def __exit__(self, etype, value, traceback):\n os.chdir(self.savedPath)\n","sub_path":"mitopipeline/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"576977006","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom multiprocessing import Pool\nimport requests\nimport pandas as pd\nimport math\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport plotly\nfrom plotly.subplots import make_subplots\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom tqdm import tqdm\nimport imageio\nimport json\nimport locale\nimport src.france.berryllium_france_data_management as data\nimport numpy as np\nimport cv2\n\nlocale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')\ncolors = px.colors.qualitative.D3 + plotly.colors.DEFAULT_PLOTLY_COLORS + px.colors.qualitative.Plotly + px.colors.qualitative.Dark24 + px.colors.qualitative.Alphabet\nshow_charts = False\nPATH_STATS = \"../../data/france/stats/\"\nPATH = \"../../\"\nnow = datetime.now()\nimport pandas as pd\n\n\n# In[2]:\n\n\nconfirmed_cases_url = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv\"\n \n# Dataset is now stored in a Pandas Dataframe\n\nrecovered_cases_url = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv\"\n\n# Dataset is now stored in a Pandas Dataframe\n\ndeath_cases_url = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv\"\n \n# Dataset is now stored in a Pandas Dataframe\n\n\n# In[3]:\n\n\ndef get_n_melt_data(data_url,case_type):\n df = pd.read_csv(data_url)\n melted_df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'])\n melted_df.rename(columns={\"variable\":\"Date\",\"value\":case_type},inplace=True)\n return melted_df\n\ndef merge_data(confirm_df,recovered_df,deaths_df):\n\tnew_df = confirm_df.join(recovered_df['Recovered']).join(deaths_df['Deaths'])\n\treturn new_df\n\n\n# In[4]:\n\n\nconfirm_df = get_n_melt_data(confirmed_cases_url,\"Confirmed\")\nrecovered_df = get_n_melt_data(recovered_cases_url,\"Recovered\")\ndeaths_df = get_n_melt_data(death_cases_url,\"Deaths\")\n\n\n# In[5]:\n\n\nconfirm_df.tail()\n\n\n# In[6]:\n\n\ndf = merge_data(confirm_df,recovered_df,deaths_df)\n\n\n# In[7]:\n\n\ndf.head()\n\n\n# In[8]:\n\n\ndf = df[['Country/Region','Date','Confirmed','Recovered','Deaths']]\n\n\n# In[9]:\n\n\ndf.tail()\n\n\n# In[10]:\n\n\ndf = df.groupby(\"Date\")[['Confirmed','Recovered', 'Deaths']].sum()\n\n\n# In[11]:\n\n\ndf.tail()\n\n\n# In[12]:\n\n\ndf_per_day = df.groupby(\"Date\")[['Confirmed','Recovered', 'Deaths']].sum()\ndf_per_day.tail()\n\n\n# In[13]:\n\n\ndf_per_day.plot(kind='line',figsize=(20,5))\n\n\n# In[14]:\n\n\n#Facebook Forecasting Library\nimport fbprophet\n\n\n# In[15]:\n\n\n# Model Initialize\nfrom fbprophet import Prophet\nm = Prophet()\n\n\n# In[16]:\n\n\nm.add_seasonality(name=\"monthly\",period=30.5,fourier_order=5)\n\n\n# In[17]:\n\n\n# Split Dataset\ndf\n\n\n# In[18]:\n\n\nWorld_cases = df.reset_index()\n\n\n# In[19]:\n\n\nWorld_cases.head()\n\n\n# In[20]:\n\n\nWorld_cases.tail()\n\n\n# In[21]:\n\n\nconfirmed_cases = World_cases[[\"Date\",\"Confirmed\"]]\nrecovered_cases = World_cases[[\"Date\",\"Recovered\"]]\n\n\n# In[22]:\n\n\nconfirmed_cases.shape\n\n\n# In[23]:\n\n\nconfirmed_cases.rename(columns={\"Date\":\"ds\",\"Confirmed\":\"y\"},inplace=True)\n\n\n# In[34]:\n\n\ntrain = confirmed_cases[:6]\ntest = confirmed_cases[6:]\n\n\n# In[25]:\n\n\ntrain \n\n\n# In[26]:\n\n\ntest\n\n\n# In[27]:\n\n\n# Fit Model\nm.fit(train)\n\n\n# In[28]:\n\n\n# Future Date\nfuture_dates = m.make_future_dataframe(periods=200)\n\n\n# In[29]:\n\n\nfuture_dates\n\n\n# In[30]:\n\n\n# Prediction\nprediction = m.predict(future_dates)\n\n\n# In[31]:\n\n\n# Plot Prediction\nm.plot(prediction)\n\n\n# In[32]:\n\n\n# Find Point/Dates For Change\nfrom fbprophet.plot import add_changepoints_to_plot\n\n\n# In[33]:\n\n\nfig = m.plot(prediction)\nc = add_changepoints_to_plot(fig.gca(),m,prediction)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"src/Berryllium_Forecasting/Berryllium_world_pred.py","file_name":"Berryllium_world_pred.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"393689796","text":"# Copyright (c) 2017-2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nPretty print format that orders templates by ID, and attempts to render a more compact\nrepresentation.\n\"\"\"\n\nimport re\n\nfrom collections import defaultdict\nfrom typing import Iterable, Optional\n\nfrom .client_app import LedgerCapture, LedgerCaptureEntry\nfrom ...util.termcap import termsize\nfrom ...model.types import Type, RecordType\n\nBOX_B3 = '|'\nBOX_C4 = '-'\nBOX_DA = '+'\n\n\n# a list of template parameter names that are long and make it tough to understand\n# what is going on when written out as a single line\n_COMMON_PREFIXES = ['invitedAs']\n\n\nclass _TemplateKey:\n \"\"\"\n Type that identifies templates for display purposes.\n \"\"\"\n @staticmethod\n def unknown(identifier: str) -> '_TemplateKey':\n return _TemplateKey(identifier, None)\n\n @staticmethod\n def for_record_type(record_type: RecordType) -> '_TemplateKey':\n return _TemplateKey(str(record_type.name.con), record_type)\n\n def __init__(self, template_name: str, data_type: Optional[Type]):\n self.template_name = template_name\n self.data_type = data_type\n\n\ndef format_error(error: str) -> Iterable[str]:\n return [error]\n\n\ndef format_entries(capture: LedgerCapture,\n parties: Iterable[str],\n entries: Optional[Iterable[LedgerCaptureEntry]]=None,\n color: bool=True,\n columns: Optional[int]=None) -> Iterable[str]:\n if columns is None:\n _, columns = termsize()\n if columns is None:\n columns = 120\n\n sort = ByPartySort(parties)\n\n entries_by_template = defaultdict(list)\n if entries is None:\n entries = capture\n\n for entry in entries:\n entries_by_template[entry.template_id].append(entry)\n\n grouped_entries = []\n for template_id, entries in entries_by_template.items():\n # try to resolve a template for the template ID\n template_type = None\n if capture.store is not None:\n candidates = capture.store.resolve_template_type(template_id)\n if len(candidates) == 1:\n template_type = next(iter(candidates.values()))\n\n # if we can successfully resolve metadata and render instances of this template nicely,\n # then use that rendering; otherwise resort to a rougher display\n key = _TemplateKey.for_record_type(template_type) if template_type is not None \\\n else _TemplateKey.unknown(template_id)\n grouped_entries.append((key, entries))\n\n grouped_entries.sort(key=lambda t: t[0].template_name)\n\n entry_count = sum(len(entries) for entries in entries_by_template.values())\n\n yield \"{} total contracts over {} templates\".format(entry_count, len(entries_by_template))\n yield from party_header(parties)\n for template, entries in grouped_entries:\n # for each column for all entries in the data set, determine the most\n # compact representation here\n yield ''\n\n renderer = _TemplateEntryRenderer(template, parties)\n renderer.measure(entries)\n\n yield from renderer.render_header(color=color, columns=columns)\n yield from renderer.render_entries(sorted(entries, key=sort.key))\n\n\ndef party_header(parties):\n parties = tuple(parties)\n party_count = len(parties)\n\n for index, party in enumerate(parties):\n ascii_art = (BOX_B3 * index) + BOX_DA + (BOX_C4 * (party_count - index - 1))\n yield f\"{ascii_art} party '{party}'\"\n yield (('|' * party_count))\n\n\nclass ByPartySort:\n def __init__(self, parties):\n self.parties = parties\n\n def key(self, entry):\n party_vis = [1 if entry.parties.get(party) is not None else 0 for party in self.parties]\n return sum(party_vis), ''.join(map(str, reversed(party_vis)))\n\n\ndef split_header_name(name, max_length=None):\n \"\"\"\n Splits a name in a header so that it is as short as possible while still being readable.\n \"\"\"\n candidate = [name]\n for prefix in _COMMON_PREFIXES:\n if name.startswith(prefix):\n candidate = [prefix, name[len(prefix):]]\n break\n\n if max_length is not None and any(len(line) > max_length for line in candidate):\n # this is still a little long; try slicing on all uppercase letters\n split_half_index = 1 if len(candidate) >= 1 else 0\n\n substitution = candidate[0:split_half_index]\n if split_half_index < len(candidate):\n next_upper = re.match('[A-Z]', candidate[split_half_index])\n if next_upper is not None:\n idx = next_upper.start(0)\n if idx > 0:\n substitution.append(candidate[split_half_index][0:idx])\n for line in candidate[split_half_index:]:\n substitution.extend(re.findall('[A-Z][^A-Z]*', line))\n return substitution\n\n return candidate\n\n\nclass _TemplateEntryRenderer:\n \"\"\"\n A template-specific renderer.\n \"\"\"\n\n def __init__(self, template: _TemplateKey, parties):\n self.max_widths = {}\n if template.data_type is not None:\n self.headers = [_Header(name, param, parties)\n for name, param in template.data_type.as_args_list()]\n self.template_name = template.template_name\n else:\n self.headers = [_Header(\".cdata\", None, parties)]\n self.template_name = template.template_name\n\n self.headers.insert(0, _Header(\".time\", None, parties))\n self.headers.insert(0, _Header(\".cid\", None, parties))\n self.headers.insert(0, _Header(\".party\", None, parties, colsize=len(parties)))\n self.entry_count = 0\n\n def measure(self, entries):\n for entry in entries:\n self.entry_count += 1\n for header in self.headers:\n header.measure_cell(entry)\n\n def render_header(self, color, columns=None):\n \"\"\"\n Return an iterable over header rows.\n \"\"\"\n if self.entry_count == 1:\n template_header_row = '{!s} (1 contract)'.format(self.template_name)\n else:\n template_header_row = '{!s} ({!s} contracts)'.format(self.template_name, self.entry_count)\n\n if columns is not None and len(template_header_row) < (columns - 1):\n yield template_header_row + ' ' + ''.rjust(columns - len(template_header_row) - 1, '-')\n else:\n yield template_header_row\n\n header_row_height = max(len(header.header_lines) for header in self.headers)\n for i in range(header_row_height):\n yield ' '.join(header.header_cell(i, header_row_height) for header in self.headers)\n\n def render_entries(self, entries):\n \"\"\"\n Return an iterable that renders entries in order of their visibility to parties.\n \"\"\"\n for entry in entries:\n yield ' '.join(header.render_cell(entry) for header in self.headers)\n\n\nclass _Header:\n def __init__(self, name, param, parties, colsize=None):\n self.name = name\n\n if name == '.cid':\n self.header_lines = ['#cid']\n self._value_from_entry = lambda entry: entry.contract_id\n elif name == '.time':\n self.header_lines = ['#time']\n self._value_from_entry = lambda entry: entry.time\n elif name == '.party':\n self.header_lines = ['']\n self._value_from_entry = lambda entry: render_parties(parties, entry)\n elif name == '.cdata':\n self.header_lines = ['#cdata (metadata missing)']\n self._value_from_entry = lambda entry: entry.contract_args\n else:\n self.header_lines = split_header_name(name, 8 if hasattr(param, 'name') and param.name == 'Bool' else None)\n self._value_from_entry = lambda entry: entry.contract_args[name] if entry.contract_args is not None else None\n\n self.colsize = colsize if colsize is not None else max(len(header) for header in self.header_lines)\n\n def header_cell(self, index, count):\n offset = count - len(self.header_lines)\n actual_index = index - offset\n return (self.header_lines[actual_index] if actual_index >= 0 else '').ljust(self.colsize, ' ')\n\n def measure_cell(self, data):\n data_size = len(str(self._value_from_entry(data)))\n self.colsize = max(self.colsize, data_size)\n\n def render_cell(self, entry):\n return str(self._value_from_entry(entry)).ljust(self.colsize, ' ')\n\n def __repr__(self):\n return '<_Header(name={!r}, colsize={!r})>'.format(self.name, self.colsize)\n\n\ndef render_parties(all_parties, entry):\n return ''.join(_render_party_bool(entry.parties.get(party)) for party in all_parties)\n\n\ndef _render_party_bool(value):\n if value is not None:\n return 'C' if value else 'A'\n return ' '\n","sub_path":"python/dazl/pretty/table/fmt_pretty.py","file_name":"fmt_pretty.py","file_ext":"py","file_size_in_byte":8887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"337049342","text":"#!/usr/bin/env python3\r\n# written by Karthik Subramanya Karvaje\r\n# date: 1st July 2019\r\n\r\n''' This code will keep the position\r\nof the shaft at a constant level\r\nwith respect to the ground no matter\r\nwhat orientation the actuator has'''\r\n\r\nimport hebi\r\nimport math\r\nfrom time import sleep, time\r\n\r\nlookup = hebi.Lookup()\r\n\r\n# Wait 2 seconds for the module list to populate\r\nsleep(2.0)\r\n\r\nfamily_name = \"X5-9\"\r\nmodule_name = \"Actuator1\"\r\n\r\ngroup = lookup.get_group_from_names([family_name], [module_name])\r\n\r\nif group is None:\r\n print('Group not found! Check that the family and name of a module on the network')\r\n print('matches what is given in the source file.')\r\n exit(1)\r\n\r\n# This is by default 100 Hz.\r\n#group.feedback_frequency = 1000.0\r\ngroup_command = hebi.GroupCommand(group.size)\r\ngroup_feedback = hebi.GroupFeedback(group.size)\r\n\r\n# Start logging in the background\r\ngroup.start_log('logs')\r\n\r\nprint(' Move the module to make the output move...')\r\n\r\nduration = 60.0 # [sec]\r\nstart = time()\r\nt = time() - start\r\n\r\nwhile t < duration:\r\n # Even though we don't use the feedback, getting feedback conveniently\r\n # limits the loop rate to the feedback frequency\r\n group.get_next_feedback(reuse_fbk=group_feedback)\r\n t = time() - start\r\n\r\n acc=group_feedback.accelerometer\r\n myacc=acc[0].tolist()\r\n g=myacc[1]\r\n h=myacc[0]\r\n #print(g)\r\n if -9.8=0:\r\n group_command.position = -theta\r\n group.send_command(group_command)\r\n if h<0:\r\n group_command.position = theta\r\n group.send_command(group_command)\r\n else:\r\n print ('g>9.8')\r\n\r\n# Stop logging. `log_file` contains the contents of the file\r\nlog_file = group.stop_log()\r\n","sub_path":"position_feedback_control_code.py","file_name":"position_feedback_control_code.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"121754999","text":"if __name__ == '__main__':\n arr = [int(_) for _ in input().split()]\n res = []\n for i in arr:\n if i != 0:\n res.append(i)\n for i in range(len(arr) - len(res)):\n res.append(0)\n for i in res:\n print(i, end=' ')\n\n","sub_path":"test/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"187554025","text":"#!/usr/bin/python -tt\n#**********************\n#* Author: Jigar S. Rudani\n#* Progam Name: NaiveBayesClassificationFramework.py\n#* Version: 1.0\n#*\n#***********************\n__author__ = 'JigarRudani'\nfrom collections import defaultdict\n\ndef naivebayes_classifier(trainingLst, testLst):\n\n _total_plus_one = 0\n _total_minus_one = 0\n _count_of_each_element = defaultdict()\n _count_element_for_plusone_label = defaultdict()\n _count_element_for_minusone_label = defaultdict()\n _conditinal_prob_for_minusone_label = defaultdict()\n _conditinal_prob_for_plusone_label = defaultdict()\n _attribute_train_dict = defaultdict()\n _attribute_test_dict = defaultdict()\n\n # Get the unique label from both the Training and Test data\n _attribute_train_dict, _attribute_test_dict = _get_unique_label_for_each_attribute(trainingLst, testLst)\n\n # Initialise all data structures to default value of 0 which includes all the attributes along with its unique values\n _count_of_each_element = _get_combine_list(_attribute_train_dict,_attribute_test_dict)\n _count_element_for_plusone_label = _get_combine_list(_attribute_train_dict,_attribute_test_dict)\n _count_element_for_minusone_label = _get_combine_list(_attribute_train_dict,_attribute_test_dict)\n\n _total_observation = len(trainingLst)\n\n # Traverse Training list to count the number of each unique Label\n for rows in trainingLst:\n if (rows[0] == -1):\n _total_minus_one += 1\n else:\n _total_plus_one += 1\n\n # Count the number of each unique label of each attribute for each label +1 and -1\n for rows in trainingLst:\n for element in range(1, len(trainingLst[0])):\n if (rows[element] != 0):\n index = _get_index_of_key_from_dict(_count_of_each_element[element],rows[element])\n if (index != -1):\n _count_of_each_element[element][index][rows[element]] += 1\n if (rows[0] == -1):\n index = _get_index_of_key_from_dict(_count_element_for_minusone_label[element], rows[element])\n if (index != -1):\n _count_element_for_minusone_label[element][index][rows[element]] += 1\n elif (rows[0] == 1):\n index = _get_index_of_key_from_dict(_count_element_for_plusone_label[element],rows[element])\n if (index != -1):\n _count_element_for_plusone_label[element][index][rows[element]] += 1\n\n # Calculate Probablity of each label\n prob_minus_one = float(_total_minus_one)/float(_total_observation)\n prob_plus_one = float(_total_plus_one)/float(_total_observation)\n #print(\"Probablity of +1 and -1 is %f %f\" % (prob_minus_one, prob_plus_one))\n\n _conditinal_prob_for_minusone_label[0] = prob_minus_one\n _conditinal_prob_for_plusone_label[0] = prob_plus_one\n\n # Calculate the probablity of each unique Label of each attribute\n for attribute_key, attribute_value in _count_of_each_element.items():\n isLaplacian_Negative = _get_laplacian_flag(_count_element_for_minusone_label[attribute_key])\n isLaplacian_Positive = _get_laplacian_flag(_count_element_for_plusone_label[attribute_key])\n _conditinal_prob_for_minusone_label[attribute_key] = _get_probablity(_count_element_for_minusone_label[attribute_key], isLaplacian_Negative, _total_minus_one)\n _conditinal_prob_for_plusone_label[attribute_key] = _get_probablity(_count_element_for_plusone_label[attribute_key], isLaplacian_Positive, _total_plus_one)\n\n # Return the Conditional Probablity model for each Label\n return (_conditinal_prob_for_minusone_label, _conditinal_prob_for_plusone_label)\n\ndef predict_label(_attribute_list, _attribute_uniq_val_list, _conditinal_prob_for_minusone_label, _conditinal_prob_for_plusone_label):\n\n _predicted_attr_label_list = []\n\n # Predict the Label for _attribute_list data with model trained by Training data\n for attr_rows,uniq_rows in zip(_attribute_list,_attribute_uniq_val_list):\n _prob_result_minus_one = _conditinal_prob_for_minusone_label[0]\n _prob_result_plus_one = _conditinal_prob_for_plusone_label[0]\n for attr_element, uniq_element in zip(attr_rows, uniq_rows):\n index = _get_index_of_key_from_dict(_conditinal_prob_for_minusone_label[attr_element], uniq_element)\n if (index != -1):\n _prob_result_minus_one *= float(_conditinal_prob_for_minusone_label[attr_element][index][uniq_element])\n\n index = _get_index_of_key_from_dict(_conditinal_prob_for_plusone_label[attr_element], uniq_element)\n if (index != -1):\n _prob_result_plus_one *= float(_conditinal_prob_for_plusone_label[attr_element][index][uniq_element])\n\n if (_prob_result_plus_one > _prob_result_minus_one):\n _predicted_attr_label_list.append(1)\n else:\n _predicted_attr_label_list.append(-1)\n\n # Return predicted label for attribute list\n return _predicted_attr_label_list\n\ndef _get_unique_label_for_each_attribute(trainingLst, testLst):\n\n _attribute_train_dict = defaultdict()\n _attribute_test_dict = defaultdict()\n\n # Get the unique label from Test data and prepare dictionary --> attribute : [{uniq1:label},{uniq2:label}...,{uniqn:label}}]\n for attribute in range(1, len(testLst[0])):\n dummylist = []\n for rows in testLst:\n if ({rows[attribute]:rows[0]} not in dummylist):\n if (rows[0] == 1):\n dummylist.append({rows[attribute]:rows[0]})\n elif (rows[0] == -1):\n dummylist.append({rows[attribute]:rows[0]})\n _attribute_test_dict[attribute] = dummylist\n\n # Get the unique label from Training data and prepare dictionary --> attribute : [uniq1,uniq2...,uniqn]\n for attribute in range(1, len(trainingLst[0])):\n dummylist = []\n for rows in trainingLst:\n if (rows[attribute] not in dummylist):\n dummylist.append(rows[attribute])\n _attribute_train_dict[attribute] = dummylist\n\n # Return the structure prepared\n return (_attribute_train_dict, _attribute_test_dict)\n\ndef _get_combine_list(_attribute_train_dict,_attribute_test_dict):\n\n _combine_attribute_list_prepared = defaultdict()\n\n # Combine each attribute unique values from both the training and test data\n for train_key, train_val in _attribute_train_dict.items():\n dummy_list = []\n for test_key, test_val in _attribute_test_dict.items():\n if (test_key == train_key):\n for each_item in train_val:\n for items in test_val:\n for key in items.keys():\n if (key == each_item):\n if({key:0} not in dummy_list):\n dummy_list.append({key:0})\n elif({each_item:0} not in dummy_list):\n dummy_list.append({each_item:0})\n elif({key:0} not in dummy_list):\n dummy_list.append({key:0})\n break\n _combine_attribute_list_prepared[train_key] = dummy_list\n break\n return _combine_attribute_list_prepared\n\ndef _get_index_of_key_from_dict(mapList, search_key):\n\n i = 0\n for items in mapList:\n for keys in items.keys():\n if(keys == search_key):\n return i\n else:\n i += 1\n return -1\n\ndef _get_laplacian_flag(attribute_value_list):\n\n # Length of list is 1 then check for key == 0\n if (len(attribute_value_list) == 1):\n if (0 in attribute_value_list[0].keys()):\n return True\n elif (0 in attribute_value_list[0].values()):\n return True\n else:\n for items in attribute_value_list:\n for key, values in items.items():\n if (values == 0 and key != 0):\n return True\n return False\n\ndef _get_probablity(attribute_value_list, isLaplacian, _total_count):\n\n prob_list = []\n\n # Get the total count of unique label for each attribute\n if ({0: 0} in attribute_value_list):\n _total_count_uniq_label = len(attribute_value_list) - 1\n else:\n _total_count_uniq_label = len(attribute_value_list)\n\n # calculate the conditional probabality for each attribute uniq values\n for attribute in attribute_value_list:\n for key, value in attribute.items():\n if (key != 0):\n # Check if isLaplacian flag is True or not.\n # It True then increment the count of each attributes unique value and also increment count of dividing factor by count of uniq val\n if (isLaplacian):\n prob = float(value + 1)/float(_total_count + _total_count_uniq_label)\n else:\n prob = float(value)/float(_total_count)\n # Store the output in the list in the form of {uniq_val : prob_value}\n prob_list.append({key:prob})\n return prob_list","sub_path":"code/NaiveBayesClassificationFramework.py","file_name":"NaiveBayesClassificationFramework.py","file_ext":"py","file_size_in_byte":9148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"217599233","text":"from django.db import models\nfrom datetime import date\n\ngender_list = (('M', 'Male'), ('F', 'Female'))\nclass Person(models.Model):\n name = models.CharField(max_length=50, blank=False)\n gender = models.CharField(max_length=1, null=True, blank=False, choices=gender_list)\n birthday = models.DateField(default=date.today)\n email = models.EmailField(max_length=70, blank=False, unique=True, null=True)\n desc = models.TextField(max_length=100, blank=True)\n photo = models.FileField(upload_to='images/',null=True, blank=True)\n\n\n def __str__(self):\n return '%s'%(self.name)","sub_path":"mysite/first/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"47592566","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Migrate.\n\"\"\"\nimport sqlalchemy as sa\n\nfrom omoide import commands\nfrom omoide import constants\nfrom omoide import infra\nfrom omoide.database import operations\n\n\n# pylint: disable=too-many-locals\ndef act(command: commands.MigrateCommand,\n filesystem: infra.Filesystem,\n stdout: infra.STDOut,\n echo: bool = False) -> int:\n \"\"\"Migrate.\"\"\"\n total_new_migrations = 0\n walk = infra.walk_storage_from_command(command, filesystem)\n\n for branch, leaf, leaf_folder in walk:\n migration_file = filesystem.join(leaf_folder,\n constants.MIGRATION_FILE_NAME)\n\n if not filesystem.exists(migration_file):\n stdout.print(f'\\t[{branch}][{leaf}] Nothing to migrate')\n continue\n\n local_db_file = filesystem.join(leaf_folder,\n constants.LEAF_DB_FILE_NAME)\n\n if filesystem.exists(local_db_file) and not command.force:\n stdout.cyan(\n f'\\t[{branch}][{leaf}] Migration database already exist'\n )\n continue\n\n if filesystem.exists(local_db_file):\n filesystem.delete_file(local_db_file)\n stdout.yellow(\n f'\\t[{branch}][{leaf}] Deleted {constants.LEAF_DB_FILE_NAME}'\n )\n\n engine = operations.restore_database_from_scratch(\n folder=leaf_folder,\n filename=constants.LEAF_DB_FILE_NAME,\n filesystem=filesystem,\n echo=echo,\n )\n\n content = filesystem.read_file(migration_file)\n migrations = content.split(';')\n\n with engine.connect() as conn:\n trans = conn.begin()\n try:\n for migration in migrations:\n conn.execute(sa.text(migration))\n trans.commit()\n except Exception:\n trans.rollback()\n raise\n\n total_new_migrations += len(migrations)\n stdout.yellow(f'\\t[{branch}][{leaf}] Saved migrations')\n\n return total_new_migrations\n","sub_path":"omoide/migration_engine/operations/migrate/migrate.py","file_name":"migrate.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"161043808","text":"import os\n\nfrom util import Client\n\n# when running the agent locally, assume that the environment is accesible at localhost:5000\n# when running a containerised agent, assume that the environment is accesible at $RANGL_ENVIRONMENT_URL (typically http://environment:5000)\nremote_base = os.getenv(\"RANGL_ENVIRONMENT_URL\", \"http://localhost:5000/\")\n\nclient = Client(remote_base)\n\nenv_id = \"reference-environment-v0\"\nseed = int(os.getenv(\"RANGL_SEED\", 123456))\ninstance_id = client.env_create(env_id, seed)\n\n\n\nclient.env_monitor_start(\n instance_id,\n directory=f\"monitor/{instance_id}\",\n force=True,\n resume=False,\n video_callable=False,\n)\n\nclient.env_reset(instance_id)\nwhile True:\n action = client.env_action_space_sample(instance_id)\n observation, reward, done, info = client.env_step(instance_id, action)\n print(instance_id, reward)\n if done:\n print(instance_id)\n break\n\nclient.env_monitor_close(instance_id)\nprint(\"done\", done)\n\n# make sure you print the instance_id as the last line in the script\nprint(instance_id)\n","sub_path":"random_agent_submission/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"413939394","text":"import re\r\nimport webbrowser\r\nimport smtplib\r\nimport time\r\nfrom time import strftime\r\nfrom email.mime.text import MIMEText\r\nfrom selenium import webdriver\r\n\r\ndef main():\r\n\ts_yr = input(\"Session year: \")\r\n\ts_cd = input(\"Winter or Summer? (W\\S): \")\r\n\tcname = input(\"Enter 4 char course identifier: \")\r\n\tc_lvl = input(\"Enter 3 num course digit: \")\r\n\tc_id = input(\"Enter Course section: \")\r\n\treserved = input(\"Reserved? (y/n): \")\r\n\temail = input(\"Please enter your email address:\\n\")\r\n\r\n\turl = 'https://courses.students.ubc.ca/cs/main?sessyr=' + s_yr + '&sesscd=' + s_cd\r\n\t\r\n\tprint('Looking for available seats...')\r\n\tseats = requestPage(cname, c_lvl, c_id)\r\n\troom = findSeats(seats, reserved)\r\n\tif room > 0:\r\n\t\tsendMail(email, room)\r\n\telse:\t\r\n\t\tprint('No seats available yet, you will receive a email when there\\'s room')\r\n\t\treloadPage(cname, c_lvl, c_id, reserved, email)\r\n\r\ndef sendMail(email, room):\r\n\taddr_to = email\r\n\taddr_from = 'UBC'\r\n\r\n\tusername = 'XXX@gmail.com'\r\n\tpassword = 'XXX'\r\n\r\n\twhen = strftime(\"%m/%d/%Y %H:%M\")\r\n\tmsg = MIMEText(\"Hi there! \\n\\n{0} seats are available in your class! (as of {1})\\n\".format(room, when))\r\n\tmsg['To'] = addr_to\r\n\tmsg['From'] = addr_from\r\n\tmsg['Subject'] = \"A seat is available in your class!\"\r\n\r\n\ttry:\r\n\t\tserver = smtplib.SMTP(\"smtp.gmail.com\", 587)\r\n\t\tserver.ehlo()\r\n\t\tserver.starttls()\r\n\t\tserver.login(username, password)\r\n\t\tserver.sendmail(addr_from, addr_to, msg.as_string())\r\n\t\tserver.quit\r\n\texcept: 'Failed to send email'\r\n\r\ndef reloadPage(cname, c_lvl, c_id, reserved, email):\r\n\temail = email\r\n\tcname = cname\r\n\tc_lvl= c_lvl\r\n\tc_id = c_id\r\n\trestricted = reserved\r\n\twhile True:\r\n\t\tseats = requestPage(cname, c_lvl, c_id)\r\n\t\troom = findSeats(seats, restricted)\r\n\t\tif room > 0:\r\n\t\t\tprint(str(room) + 'seats are available!')\r\n\t\t\tsendMail(email, room)\r\n\t\t\tbreak\r\n\t\ttime.sleep(30)\r\n\r\n\r\ndef requestPage(cname, c_lvl, c_id):\r\n\tdriver = webdriver.Firefox()\r\n\tdriver.get('https://courses.students.ubc.ca/cs/main?sessyr=2014&sesscd=W')\r\n\tdriver.implicitly_wait(5)\r\n\tdriver.find_element_by_xpath('//*[@id=\"ubc7-unit-navigation\"]/ul/li[1]/div/a').click()\r\n\tdriver.implicitly_wait(5)\r\n\tdriver.find_element_by_xpath('//*[@id=\"ubc7-unit-navigation\"]/ul/li[1]/div/ul/li[2]/a').click()\r\n\tdriver.implicitly_wait(5)\r\n\tdriver.find_element_by_link_text(cname).click()\r\n\tdriver.implicitly_wait(5)\r\n\tdriver.find_element_by_link_text('{0} {1}'.format(cname, c_lvl)).click()\r\n\tdriver.find_element_by_link_text('{0} {1} {2}'.format(cname, c_lvl, c_id)).click()\r\n\r\n\tpagehtml= str(driver.page_source)\r\n\tdriver.quit()\r\n\tf = open('source', 'w')\r\n\tf.write(pagehtml)\r\n\tf.close()\r\n\tseatsRE = 'Total Seats Remaining:(\\d+)' + '.+' + \\\r\n\t\t\t\t'General Seats Remaining:(\\d+)' + '.+' + \\\r\n\t\t\t\t'Restricted Seats Remaining\\*:(\\d+)'\r\n\tseats = re.search(seatsRE, pagehtml)\r\n\treturn seats\r\n\r\n\r\ndef findSeats(seats, reserved):\r\n\ttotalseats = int(seats.group(1))\r\n\tgeneralseats = int(seats.group(2))\r\n\treservedseats = int(seats.group(3))\r\n\r\n\troom = 0\r\n\tif totalseats > 0:\r\n\t\tif generalseats > 0:\r\n\t\t\troom = generalseats\r\n\t\telif reservedseats > 0:\r\n\t\t\tif reserved == 'y':\r\n\t\t\t\troom = reservedseats\r\n\treturn room\r\n\r\n# Entry Point\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"regCourse.py","file_name":"regCourse.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"372649535","text":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nTest The deprecation methods\n\"\"\"\n\nimport unittest\nimport warnings\nfrom test import QiskitNatureTestCase\nfrom ddt import data, ddt\nfrom qiskit_nature.deprecation import (\n DeprecatedType,\n warn_deprecated,\n warn_deprecated_same_type_name,\n deprecate_function,\n deprecate_method,\n deprecate_arguments,\n)\n\n# pylint: disable=bad-docstring-quotes\n\n\n@deprecate_function(\"0.1.1\", DeprecatedType.FUNCTION, \"some_function1\", \"and more information\", 2)\ndef func1(arg1):\n \"\"\"function 1\"\"\"\n del arg1\n pass\n\n\n@deprecate_function(\"0.2.0\", DeprecatedType.FUNCTION, \"some_function2\")\ndef func2(arg2):\n \"\"\"function 2\"\"\"\n del arg2\n pass\n\n\n@deprecate_arguments(\"0.1.2\", {\"old_arg\": \"new_arg\"})\ndef func3(new_arg=None, old_arg=None):\n \"\"\"function 3\"\"\"\n del new_arg, old_arg\n pass\n\n\nclass DeprecatedClass1:\n \"\"\"Deprecated Test class 1\"\"\"\n\n def __init__(self):\n warn_deprecated(\n \"0.3.0\", DeprecatedType.CLASS, \"DeprecatedClass1\", DeprecatedType.CLASS, \"NewClass\"\n )\n\n\nclass DeprecatedClass2:\n \"\"\"Deprecated Test class 2\"\"\"\n\n def __init__(self):\n warn_deprecated_same_type_name(\n \"0.3.0\", DeprecatedType.CLASS, \"DeprecatedClass2\", \"from package test2\"\n )\n\n\nclass TestClass:\n \"\"\"Test class with deprecation\"\"\"\n\n @deprecate_method(\n \"0.1.0\", DeprecatedType.METHOD, \"some_method1\", \"and additional information\", 1\n )\n def method1(self):\n \"\"\"method 1\"\"\"\n pass\n\n @deprecate_method(\"0.2.0\", DeprecatedType.METHOD, \"some_method2\")\n def method2(self):\n \"\"\"method 2\"\"\"\n pass\n\n @deprecate_arguments(\"0.1.2\", {\"old_arg\": \"new_arg\"})\n def method3(self, new_arg=None, old_arg=None):\n \"\"\"method3\"\"\"\n del new_arg, old_arg\n pass\n\n\n@ddt\nclass TestDeprecation(QiskitNatureTestCase):\n \"\"\"Test deprecation methods\"\"\"\n\n @data(\n (\n \"func1\",\n \"The func1 function is deprecated as of version 0.1.1 \"\n \"and will be removed no sooner than 3 months after the release. \"\n \"Instead use the some_function1 function and more information.\",\n ),\n (\n \"func2\",\n \"The func2 function is deprecated as of version 0.2.0 \"\n \"and will be removed no sooner than 3 months after the release. \"\n \"Instead use the some_function2 function.\",\n ),\n )\n def test_function_deprecation(self, config):\n \"\"\"test function deprecation\"\"\"\n\n function_name, msg_ref = config\n\n # emit deprecation the first time it is used\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n globals()[function_name](None)\n msg = str(c_m[0].message)\n self.assertEqual(msg, msg_ref)\n\n # trying again should not emit deprecation\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n globals()[function_name](None)\n self.assertListEqual(c_m, [])\n\n def test_class_deprecation1(self):\n \"\"\"test class deprecation 1\"\"\"\n\n msg_ref = (\n \"The DeprecatedClass1 class is deprecated as of version 0.3.0 \"\n \"and will be removed no sooner than 3 months after the release. \"\n \"Instead use the NewClass class.\"\n )\n\n # emit deprecation the first time it is used\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n DeprecatedClass1()\n msg = str(c_m[0].message)\n self.assertEqual(msg, msg_ref)\n\n # trying again should not emit deprecation\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n DeprecatedClass1()\n self.assertListEqual(c_m, [])\n\n def test_class_deprecation2(self):\n \"\"\"test class deprecation 2\"\"\"\n\n msg_ref = (\n \"The DeprecatedClass2 class is deprecated as of version 0.3.0 \"\n \"and will be removed no sooner than 3 months after the release. \"\n \"Instead use the DeprecatedClass2 class from package test2.\"\n )\n\n # emit deprecation the first time it is used\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n DeprecatedClass2()\n msg = str(c_m[0].message)\n self.assertEqual(msg, msg_ref)\n\n # trying again should not emit deprecation\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n DeprecatedClass2()\n self.assertListEqual(c_m, [])\n\n @data(\n (\n \"method1\",\n \"The method1 method is deprecated as of version 0.1.0 \"\n \"and will be removed no sooner than 3 months after the release. \"\n \"Instead use the some_method1 method and additional information.\",\n ),\n (\n \"method2\",\n \"The method2 method is deprecated as of version 0.2.0 \"\n \"and will be removed no sooner than 3 months after the release. \"\n \"Instead use the some_method2 method.\",\n ),\n )\n def test_method_deprecation(self, config):\n \"\"\"test method deprecation\"\"\"\n\n method_name, msg_ref = config\n method = getattr(TestClass(), method_name)\n\n # emit deprecation the first time it is used\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n method()\n msg = str(c_m[0].message)\n self.assertEqual(msg, msg_ref)\n\n # trying again should not emit deprecation\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n method()\n self.assertListEqual(c_m, [])\n\n def test_function_arguments_deprecation(self):\n \"\"\"test function arguments deprecation\"\"\"\n\n msg_ref = (\n \"func3: the old_arg argument is deprecated as of version 0.1.2 \"\n \"and will be removed no sooner than 3 months after the release. \"\n \"Instead use the new_arg argument.\"\n )\n # both arguments at the same time should raise exception\n with self.assertRaises(TypeError):\n func3(new_arg=\"2222\", old_arg=\"hello\")\n\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n func3(old_arg=\"hello\")\n msg = str(c_m[0].message)\n self.assertEqual(msg, msg_ref)\n\n def test_method_arguments_deprecation(self):\n \"\"\"test method arguments deprecation\"\"\"\n\n obj = TestClass()\n\n msg_ref = (\n \"method3: the old_arg argument is deprecated as of version 0.1.2 \"\n \"and will be removed no sooner than 3 months after the release. \"\n \"Instead use the new_arg argument.\"\n )\n # both arguments at the same time should raise exception\n with self.assertRaises(TypeError):\n obj.method3(new_arg=\"2222\", old_arg=\"hello\")\n\n with warnings.catch_warnings(record=True) as c_m:\n warnings.simplefilter(\"always\")\n obj.method3(old_arg=\"hello\")\n msg = str(c_m[0].message)\n self.assertEqual(msg, msg_ref)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test_deprecation.py","file_name":"test_deprecation.py","file_ext":"py","file_size_in_byte":7853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"438454047","text":"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport sys\n\nwarn = None\n\ndef unpack_brackets(s):\n pairs = [\"<>\", \"[]\", \"()\"]\n if len(s) < 2:\n return None, s\n for a, b in pairs:\n if s[0] == a and s[-1] == b:\n return a, s[1:-1]\n return None, s\n\ndef parse_inner_arg(arg, ret):\n if arg.endswith(','):\n return dict(type='comma-list', inner=parse_inner_arg(arg[:-1], ret))\n elif arg.endswith('_'):\n return dict(type='space-list', inner=parse_inner_arg(arg[:-1], ret))\n elif arg.startswith('#'):\n typ, arg = 'channel', arg[1:]\n elif ':' in arg:\n typ, arg = arg.split(':', 1)\n else:\n typ = 'str'\n\n data = {}\n if '(' in typ and typ.endswith(')'):\n typ, typarg = typ.split('(', 1)\n typarg = typarg[:-1]\n data['type-argument'] = typarg\n # make sure the type is a known argument type\n if not typ in ['flag', 'literal']:\n warn('type does not take argument: {}'.format(typ))\n\n # make sure the type is known\n if not typ in ['str', 'int', 'flag', 'literal', 'channel']:\n warn('unknown type: {}'.format(typ))\n\n data['type'] = typ\n ret['name'] = check_name(arg)\n return data\n\ndef parse_arg(arg_orig):\n b, arg = unpack_brackets(arg_orig)\n ret = {}\n if not b:\n # literal\n return (['left', 'right'], {'type': 'literal', 'type-argument': arg})\n elif b == '<':\n typ = parse_inner_arg(arg, ret)\n ret.update(typ)\n return (['left', 'right'], ret)\n elif b == '[':\n ret['type'] = 'optional'\n ret['inner'] = parse_inner_arg(arg, ret)\n return (['left'], ret)\n elif b == '(':\n ret['type'] = 'optional'\n ret['inner'] = parse_inner_arg(arg, ret)\n return (['right'], ret)\n else:\n warn('cannot parse argument: {}'.format(arg_orig))\n\ndef check_name(name):\n name = name.strip()\n if not name: # names should have length\n warn('zero-length name')\n if name.lower() != name: # names should be lower-case\n warn('name not lowcased: {}'.format(name))\n if len(name.split()) > 1: # names should have no whitespace\n warn('name has whitespace: {}'.format(name))\n # names should be [a-z][0-9] and - only\n if not all(c.isalpha() or c.isdigit() or c == '-' for c in name):\n warn('name has invalid characters: {}'.format(name))\n return name\n\ndef check_verb(verb):\n if not verb.upper() == verb: # verbs should be upper case\n warn('verb not upcased: {}'.format(verb))\n if verb.isnumeric():\n # numerics must be 000 formatted\n if verb != '{:03d}'.format(int(verb)):\n warn('invalid numeric format: {}'.format(verb))\n verb = int(verb)\n # numerics must be within this range\n if verb <= 0 or verb > 999:\n warn('invalid numeric code: {}'.format(verb))\n return verb\n\ndef parse_format(fmt, data):\n data['format'] = fmt\n\n # do our own tokenizing, to force balanced parens but handle : outside\n tokens = []\n expectstack = []\n expectmap = {'(': ')', '[': ']', '<': '>'}\n gather = ''\n split_on_space = True\n for c in fmt:\n if c in expectmap:\n expectstack.append(expectmap[c])\n if expectstack and c == expectstack[-1]:\n expectstack.pop()\n if c == ':' and not expectstack:\n split_on_space = False\n continue\n if split_on_space and c.isspace():\n if gather:\n tokens.append(gather)\n gather = ''\n else:\n gather += c\n if gather:\n tokens.append(gather)\n if expectstack:\n warn('unbalanced brackets, expecting: {}'.format(expectstack))\n \n # there should be at least a verb\n if not tokens:\n warn('no verb found')\n \n verb = tokens[0]\n args = tokens[1:]\n data['verb'] = check_verb(verb)\n if isinstance(data['verb'], int):\n data['type'] = 'numeric'\n else:\n data['type'] = 'text'\n\n associativity = set(['left', 'right'])\n data['arguments'] = []\n argnames = []\n for a in args:\n assoc, arg = parse_arg(a)\n associativity = associativity.intersection(assoc)\n if 'name' in arg:\n # arguments must be unique\n if arg['name'] in argnames:\n warn('non-unique argument name: {}'.format(arg['name']))\n argnames.append(arg['name'])\n data['arguments'].append(arg)\n\n # rectify associativities\n if not associativity:\n warn('mixed associativities')\n associativity = list(associativity)\n associativity.sort()\n data['associativity'] = associativity[0]\n\n # numerics all have targets\n if data['type'] == 'numeric':\n if len(data['arguments']) < 1 or data['arguments'][0].get('name') != 'target' or data['arguments'][0].get('type') != 'str':\n print(data['arguments'][0])\n warn('numerics need a argument')\n\n # a bunch of literals next to each other is always an error\n last_type = None\n for arg in data['arguments']:\n if arg['type'] == 'literal' and last_type == 'literal':\n warn('two successive literals, you need a :')\n break\n last_type = arg['type']\n\nsection_names = []\ndef check_section(title, data):\n required = ['name']\n \n # must have these fields\n for k in required:\n if not k in data:\n warn('required field `{}` missing'.format(k))\n return None\n\n # validate name\n data['name'] = check_name(data['name'])\n \n # section names must be unique\n if data['name'] in section_names:\n warn('non-unique section name: {}'.format(data['name']))\n section_names.append(data['name'])\n\n # add title\n data['title'] = title\n \n return data\n\nmessage_names = []\nmessage_verbs = {}\ndef check_message(fmt, data):\n required = ['name']\n \n # must have these fields\n for k in required:\n if not k in data:\n warn('required field `{}` missing'.format(k))\n return None\n\n # fill in computed details from format\n parse_format(fmt, data)\n\n # validate name\n data['name'] = check_name(data['name'])\n\n # message names must be unique\n if data['name'] in message_names:\n warn('non-unique message name: {}'.format(data['name']))\n message_names.append(data['name'])\n\n # message verbs must be unique\n if data['verb'] in message_verbs:\n warn('non-unique verb: {}'.format(data['verb']))\n message_verbs[data['verb']] = data['name']\n\n # related is a comma-seperated list\n if 'related' in data:\n data['related'] = [check_verb(r.strip()) for r in data['related'].split(',')]\n\n # only refer to section by name\n data['section'] = data['section']['name']\n\n return data\n\ndef check_version(ver, data):\n if not '.' in ver:\n warn('invalid version format')\n return (0, 0)\n maj, min = ver.split('.', 1)\n if not maj.isnumeric() or not min.isnumeric():\n warn('invalid version format')\n return (0, 0)\n return (int(maj), int(min))\n\ndef check_whole(data):\n # make sure all related verbs actually exist\n # and resolve them into names\n for msg in data['messages']:\n resolved_rel = []\n for rel in msg.get('related', []):\n if not rel in message_verbs:\n warn('unknown related verb for {}: {}'.format(msg['verb'], rel))\n else:\n resolved_rel.append(message_verbs[rel])\n if resolved_rel:\n msg['related'] = resolved_rel\n \n return data\n\ndef create_description(f, fname):\n lineno = 0\n lastheaderno = 0\n room_for_header = True\n sections = []\n messages = []\n version = None\n\n header = None\n gather = {}\n\n fields = {\n 'Version': [],\n 'Section': ['name', 'url'],\n 'Message': ['name', 'related', 'documentation'],\n }\n \n warnings = 0\n def local_warn(s):\n nonlocal warnings\n warnings += 1\n if 'verb' in gather:\n print('{}:{}: (verb {}) {}'.format(fname, lastheaderno, gather['verb'], s))\n else:\n print('{}:{}: {}'.format(fname, lastheaderno, s))\n\n global warn\n warn = local_warn\n\n def emit():\n nonlocal header, gather, sections, messages, version\n if header is not None:\n if header[0] == 'Version':\n if version:\n warn('only one version allowed')\n version = check_version(header[1], gather)\n elif header[0] == 'Section':\n section = check_section(header[1], gather)\n if section:\n sections.append(section)\n elif header[0] == 'Message':\n if sections:\n gather['section'] = sections[-1]\n message = check_message(header[1], gather)\n if message:\n messages.append(message)\n else:\n # every message must have a section\n warn('message has no section')\n header = None\n gather = {}\n\n for l in f.readlines():\n lineno += 1\n \n if l.strip().startswith('#'):\n # comment\n continue\n\n if not l.strip():\n # blank\n room_for_header = True\n continue\n\n if not ':' in l:\n warn('no `:` found')\n continue\n\n key, val = l.split(':', 1)\n key = key.strip()\n val = val.strip()\n\n if key in fields:\n # new header\n emit()\n lastheaderno = lineno\n header = (key, val)\n if not room_for_header:\n warn('need whitespace before new header')\n elif header is not None and key in fields[header[0]]:\n gather[key] = val\n else:\n warn('invalid key in this location: {}'.format(key))\n room_for_header = False\n emit()\n\n if not version:\n warn('no version found')\n version = (0, 0)\n\n data = {}\n data['major-version'], data['minor-version'] = version\n data['sections'] = sections\n data['messages'] = messages\n\n return check_whole(data)\n\np = argparse.ArgumentParser()\np.add_argument('input', type=argparse.FileType('r'))\np.add_argument('output', type=argparse.FileType('w'))\n\nif __name__ == \"__main__\":\n args = p.parse_args()\n data = create_description(args.input, args.input.name)\n if data is None:\n sys.exit(1)\n json.dump(data, args.output, indent=2)\n args.output.write('\\n')\n","sub_path":"codegen/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":10611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"526816486","text":"import time\nimport base64\nimport re\nimport os\nimport urllib2\nimport json\n\n\nclass tool_error:\n\n message = \"\"\n\n def __init__(self, msg):\n self.message = msg\n\n def __bool__(self):\n return False\n __nonzero__ = __bool__\n\n def __repr__(self):\n return self.message\n __str__ = __repr__\n\n\nhaxdb = None\ndb = None\nconfig = None\n\n\ndef init(app_config, app_db, app_haxdb):\n global haxdb, db, config\n haxdb = app_haxdb\n db = app_db\n config = app_config\n\n\ndef send_email(receiver, subject, msg):\n\n sender = \"%s <%s>\" % (config[\"EMAIL\"][\"NAME\"], config[\"EMAIL\"][\"FROM\"])\n header = \"From: %s\\n\" % sender\n header += \"To: %s\\n\" % receiver\n header += \"Subject: %s\\n\" % subject\n header += \"\\r\\n\\r\\n\"\n msg = header + msg\n\n import smtplib\n try:\n host = config[\"EMAIL\"][\"HOST\"]\n port = config[\"EMAIL\"][\"PORT\"]\n server = smtplib.SMTP(host, port, None, 10)\n server.starttls()\n server.login(config[\"EMAIL\"][\"USER\"], config[\"EMAIL\"][\"PASS\"])\n server.sendmail(sender, receiver, msg)\n server.quit()\n except smtplib.SMTPRecipientsRefused:\n return tool_error(\"INVALID EMAIL ADDRESS\")\n except smtplib.SMTPException:\n return tool_error(\"FAILED TO SEND EMAIL\")\n return True\n","sub_path":"mods/core_auth_email/mod_tools.py","file_name":"mod_tools.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"187352810","text":"lst = [int(i) for i in input().split()]\nx = int(input())\nk = 0\n\nfor i in range(len(lst)):\n if lst[i] == x:\n print(i, end = ' ')\n k = 1\nif k == 0:\n print('Отсутствует')\n\n \n","sub_path":"2.6.9.py","file_name":"2.6.9.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"561896012","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 27 10:08:25 2018\n\n@author: cdeline\n\nUsing pytest to create unit tests for readepw.\n\nto run unit tests, run pytest from the command line in the bifacial_radiance directory\n\n\"\"\"\n\nimport bifacial_radiance\nimport os\n\n# try navigating to tests directory so tests run from here.\ntry:\n os.chdir('tests')\nexcept:\n pass\n\n# test the readepw on a dummy Boulder EPW file in the /tests/ directory\nTESTDATA_FILENAME = 'USA_CO_Boulder.724699_TMY2.epw'\n\ndef test_readepw_metadata(): \n # Is this returning correct metadata?\n (EPW_DATA, EPW_METADATA) = bifacial_radiance.readepw(filename = TESTDATA_FILENAME) # this is done outside of an assert, but maybe that's ok?\n\n assert EPW_METADATA == {'Name': 'BOULDER',\n 'State': 'USA',\n 'TZ': -7.0,\n 'USAF': 724699,\n 'altitude': 1634.0,\n 'latitude': 40.02,\n 'longitude': -105.25} \n\n\ndef test_readepw_data_length():\n # Is this returning the correct amount of data? 34 x 8760\n (EPW_DATA, EPW_METADATA) = bifacial_radiance.readepw(filename = TESTDATA_FILENAME) # this is done outside of an assert, but maybe that's ok?\n assert EPW_DATA.__len__() == 8760\n assert EPW_DATA.columns.__len__() == 34\n \ndef test_readepw_data_values():\n # Is this returning the correct data maxima?\n (EPW_DATA, EPW_METADATA) = bifacial_radiance.readepw(filename = TESTDATA_FILENAME) # this is done outside of an assert, but maybe that's ok?\n assert EPW_DATA['Dry bulb temperature in C'].max() == 36.7\n assert EPW_DATA['Global horizontal radiation in Wh/m2'].max() == 1029","sub_path":"tests/test_readepw.py","file_name":"test_readepw.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"506468717","text":"import eel\nimport sqlite3\nimport subprocess\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.keys import Keys\n\n\n\nconnection = sqlite3.connect('database.db',check_same_thread=False)\n\n#better way is just to create a bloody primary key\nconnection.execute('''CREATE TABLE IF NOT EXISTS SUBJECT\n (\n NAME TEXT NOT NULL,\n URL TEXT NOT NULL,\n PRIMARY KEY('NAME')\n );''')\n\n#connection.execute('''ALTER TABLE SUBJECT ADD UNIQUE INDEX(NAME, URL);''')\n#foreign key reference is useless\nconnection.execute('''CREATE TABLE IF NOT EXISTS TIMING\n (\n DAY INT NOT NULL,\n STIME TEXT NOT NULL,\n ETIME TEXT NOT NULL,\n SUBJECT TEXT NOT NULL,\n UNIQUE(DAY, STIME, ETIME, SUBJECT)\n FOREIGN KEY('SUBJECT') REFERENCES 'SUBJECT'('NAME') ON DELETE CASCADE\n );''')\n\n#only 1 value ffs\nconnection.execute('''CREATE TABLE IF NOT EXISTS ACCOUNT\n (\n EMAIL TEXT NOT NULL,\n PASSWORD TEXT NOT NULL\n );''')\n\nconnection.commit()\n\neel.init('web')\n\n\n@eel.expose\ndef login_to_google():\n x=subprocess.Popen('cd c:\\\\Program Files\\\\Google\\\\Chrome\\\\Application & .\\chrome.exe --remote-debugging-port=8989 --user-data-dir=\"C:\\\\Users\\\\shara\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\\\\Selenium\"',shell=True)\n opt=Options()\n opt.add_argument(\"start-maximized\")\n opt.add_experimental_option(\"debuggerAddress\",\"localhost:8989\")\n driver=webdriver.Chrome(executable_path=\"chromedriver.exe\",options=opt)\n driver.get(\"https://accounts.google.com/signin/v2/identifier?ltmpl=meet&continue=https%3A%2F%2Fmeet.google.com%3Fhs%3D193&&o_ref=https%3A%2F%2Fmeet.google.com%2F_meet%2Fwhoops%3Fsc%3D232%26alias%3Dmymeetingraheel&_ga=2.262670348.1240836039.1604695943-1869502693.1604695943&flowName=GlifWebSignIn&flowEntry=ServiceLogin\")\n driver.minimize_window() \n driver.maximize_window() \n # driver.minimize_window() \n # driver.maximize_window() \n\n\n\n@eel.expose\ndef getSubject():\n val = connection.execute('SELECT * FROM SUBJECT')\n ret = [] \n for x in val:\n obj = {}\n obj['name'] = x[0]\n obj['url'] = x[1]\n ret.append(obj)\n eel.updateSubject(ret)\n\n@eel.expose\ndef addSubject(subject):\n connection.execute('INSERT OR IGNORE INTO SUBJECT VALUES (\\'{}\\',\\'{}\\');'.format(subject['name'],subject['url']))\n connection.commit()\n getSubject()\n\n@eel.expose\ndef deleteSubject(subject):\n connection.execute('DELETE FROM SUBJECT WHERE NAME = \\'{}\\' AND URL = \\'{}\\';'.format(subject['name'],subject['url']))\n connection.commit()\n deleteAllTimingOfaSubject(subject['name'])\n getSubject()\n \n@eel.expose\ndef updateSubject(new, old):\n connection.execute('UPDATE SUBJECT SET NAME = \\'{}\\' , URL = \\'{}\\' WHERE NAME = \\'{}\\' AND URL = \\'{}\\''.format(new['name'], new['url'], old['name'], old['url']))\n connection.commit()\n editAllTimingOfaSubject(new['name'], old['name'])\n getSubject()\n\n\ncurrDay = 1\n\n@eel.expose\ndef getTiming(day):\n val = connection.execute('SELECT * FROM TIMING WHERE DAY = {};'.format(day))\n ret = []\n for x in val:\n obj = {}\n obj['day'] = x[0]\n obj['start_time'] = x[1]\n obj['end_time'] = x[2]\n obj['subject'] = x[3]\n ret.append(obj)\n currDay = day\n eel.updateTiming(ret)\n\n\n@eel.expose\ndef addTiming(timing):\n connection.execute('INSERT OR IGNORE INTO TIMING VALUES ({},\\'{}\\',\\'{}\\',\\'{}\\');'.format(timing['day'],timing['start_time'],timing['end_time'], timing['subject']))\n connection.commit()\n currDay = timing['day']\n getTiming(timing['day'])\n\n@eel.expose\ndef deleteTiming(timing):\n connection.execute('DELETE FROM TIMING WHERE DAY = \\'{}\\' AND STIME = \\'{}\\' AND ETIME = \\'{}\\' AND SUBJECT = \\'{}\\';'.format(timing['day'],timing['start_time'],timing['end_time'], timing['subject']))\n connection.commit()\n currDay = timing['day']\n getTiming(timing['day'])\n \n \n@eel.expose\ndef updateTiming(new, old):\n connection.execute('UPDATE TIMING SET DAY = \\'{}\\' , STIME = \\'{}\\', ETIME = \\'{}\\' , SUBJECT = \\'{}\\' WHERE DAY = \\'{}\\' AND STIME = \\'{}\\' AND ETIME = \\'{}\\' AND SUBJECT = \\'{}\\'; '.format(new['day'],new['start_time'],new['end_time'], new['subject'],old['day'],old['start_time'],old['end_time'], old['subject']))\n connection.commit()\n currDay = new['day']\n getTiming(new['day'])\n \n\ndef deleteAllTimingOfaSubject(subject):\n connection.execute('DELETE FROM TIMING WHERE SUBJECT = \\'{}\\''.format(subject))\n connection.commit()\n getTiming(currDay)\n\n\ndef editAllTimingOfaSubject(new, old):\n connection.execute('UPDATE TIMING SET SUBJECT = \\'{}\\' WHERE SUBJECT = \\'{}\\'; '.format(new, old))\n connection.commit()\n getTiming(currDay)\n\n\neel.start('index.html')\n\n\n","sub_path":"webLauncher.py","file_name":"webLauncher.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"328723364","text":"import random\nimport itertools\n\nimport sys\nsys.path.insert(0, '../graph_shortest_path')\nfrom routing import Queue\n\nclass User:\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return f'{self.name}'\n\nclass SocialGraph:\n def __init__(self):\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n\n def addFriendship(self, userID, friendID):\n \"\"\"\n Creates a bi-directional friendship\n \"\"\"\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n\n def addUser(self, name):\n \"\"\"\n Create a new user with a sequential integer ID\n \"\"\"\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()\n\n def populateGraph(self, numUsers, avgFriendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n\n Creates that number of users and a randomly distributed friendships\n between those users.\n\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n\n # Add users\n for i in range (0, numUsers):\n self.addUser(f'user{i + 1}')\n\n # Create friendships\n num_of_friendships = (numUsers * avgFriendships) / 2\n while num_of_friendships > 0:\n for user in self.users:\n if num_of_friendships <= 0:\n break\n possible_friends = self.users.copy()\n possible_friends.pop(user)\n rand_num_of_friends = random.randint(0, num_of_friendships)\n rand_pos_friends = list(itertools.combinations(possible_friends, rand_num_of_friends))\n random.shuffle(rand_pos_friends)\n\n if len(rand_pos_friends) > 0:\n rand_pos_friends = rand_pos_friends[0]\n\n for pos_friend in rand_pos_friends:\n if pos_friend > user: \n if pos_friend not in self.friendships[user]:\n self.addFriendship(user, pos_friend)\n num_of_friendships -= 1\n\n def getAllSocialPaths(self, userID):\n \"\"\"\n Takes a user's userID as an argument\n\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n\n The key is the friend's ID and the value is the path.\n \"\"\"\n visited = {} # Note that this is a dictionary, not a set\n queue = Queue()\n queue.enqueue([userID])\n while queue.len() > 0:\n user_path = queue.dequeue()\n user = user_path[-1]\n if user not in visited:\n visited[user] = user_path\n for child in self.friendships[user]:\n new_user_path = list(user_path)\n new_user_path.append(child)\n queue.enqueue(new_user_path)\n return visited\n\n\nif __name__ == '__main__':\n sg = SocialGraph()\n sg.populateGraph(10, 2)\n print(sg.friendships)\n connections = sg.getAllSocialPaths(1)\n print(connections)\n\n\n# Questions from the README answered in the README file itself.\n","sub_path":"graph_social_network/social.py","file_name":"social.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"579039705","text":"from rest_framework import routers\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'bulletin', views.BulletinViewSet, base_name='bulletin')\nrouter.register(r'summary', views.SummaryViewSet, base_name='summary')\nrouter.register(r'bulletin_unlimited', views.BulletinUnlimitedViewSet, base_name='bulletin_unlimited')\n\nrouter2 = routers.DefaultRouter()\nrouter2.register(r'bulletin', views.BulletinViewSet, base_name='bulletin')\nrouter2.register(r'bulletin_unlimited', views.BulletinUnlimitedViewSet, base_name='bulletin_unlimited')\n","sub_path":"scripts/marketbox-medical-svr/django_server/apps/bulletin/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"380232144","text":"from pandas import DataFrame\n\nclass GeneNumber(object):\n\n\t# An init function! Nicely done.\n\tdef __init__(self, data):\n\t\tself.dataframe = DataFrame(data)\n\n\t# read a text file and return a data frame. Records should be separated by TAB\n\t# There should not be duplicate column names\n\tdef import_file(self, filename):\n\t\t# this function use to convert string to float\n\t\t# Hm. Not clear why this is necessary?\n\t\tdef convert(x):\n\t\t\ttry:\n\t\t\t\tx = float(x)\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\t\treturn(x)\n\n\t\ttable = []\n\t\tfor line in open(filename):\n\t\t\tif(line.strip()):\t# If not empty line\n\t\t\t\tline = line.rstrip('\\n').split('\\t')\n\t\t\t\tline = list(map(convert, line))\n\t\t\t\ttable.append(line)\n\t\tself.dataframe = DataFrame(table[1:],columns=table[0])\n\t\treturn\n\n\t# convert a list of genes into a set to remove the duplicates, and get the length\n\tdef countGene(self):\n\t\t# Nice use of a set here!\n\t\tcount = len(set(self.dataframe['Gene Accession Number']))\n\t\tprint(\"The number of genes is \" + str(count))\n\t\treturn\n\n\n# Read about why using if __name__ == '__main__': is good practice...\nlst = GeneNumber([])\nlst.import_file(\"data_set_HL60_U937_NB4_Jurkat.txt\")\nlst.countGene()\n","sub_path":"archives/2012/Kai/Kai_Zhang_Code/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"524107258","text":"import arcpy\nimport pythonaddins\n\nfrom os import sys, path\nsys.path.append(path.dirname(path.abspath(__file__)))\n\nimport settings\nimport utils\n\n\nclass CheckSuperposition(object):\n \"\"\"Implementation for supertool_addin.button4 (Button)\"\"\"\n\n def __init__(self):\n self.enabled = True\n self.checked = False\n\n def onClick(self):\n utils.remove_layer_if_exists(settings.INTERSECTED_LAYER_NAME)\n\n layer_1, _ = utils.get_layer_by_name(settings.BASE_LAYER)\n layer_2, _ = utils.get_layer_by_name(settings.LAYER_NAME)\n\n if layer_1 and layer_2:\n arcpy.Intersect_analysis(\n [layer_1.name, layer_2.name],\n settings.INTERSECTED_LAYER_NAME, 'ALL', '', '')\n\n intersected_layer, _ = utils.get_layer_by_name(settings.INTERSECTED_LAYER_NAME)\n\n if utils.exists_superposition(intersected_layer):\n utils.hide_or_show_layers(\n [settings.LAYER_NAME, settings.BASE_LAYER],\n visible=False)\n\n message = 'EXISTE SUPERPOSICION PARCIAL'\n\n else:\n message = 'NO EXISTE SUPERPOSICION'\n\n return pythonaddins.MessageBox(message, 'RESULTADO', 0)\n\n\nclass DrawPolygon(object):\n \"\"\"Implementation for supertool_addin.button (Button)\"\"\"\n\n def __init__(self):\n self.enabled = True\n self.checked = False\n\n def onClick(self):\n pythonaddins.GPToolDialog(\n settings.TOOL_BOX_FILE_PATH,\n settings.TOOL_BOX_NAME)\n\n\nclass RemovePolygon(object):\n \"\"\"Implementation for supertool_addin.button3 (Button)\"\"\"\n\n def __init__(self):\n self.enabled = True\n self.checked = False\n\n def onClick(self):\n layer, data_frame = utils.get_layer_by_name(settings.LAYER_NAME)\n if layer:\n arcpy.DeleteRows_management(settings.LAYER_NAME)\n\n intersected_layer, data_frame = utils.get_layer_by_name(settings.INTERSECTED_LAYER_NAME)\n if intersected_layer:\n arcpy.mapping.RemoveLayer(data_frame, intersected_layer)\n\n message = 'LA CAPA \"%s\" HA SIDO LIMPIADA' % settings.LAYER_NAME\n utils.hide_or_show_layers(\n [settings.LAYER_NAME, settings.BASE_LAYER],\n visible=True)\n\n return pythonaddins.MessageBox(message, 'RESULTADO', 0)\n\n\nclass ShowNewPolygon(object):\n \"\"\"Implementation for supertool_addin.button2 (Button)\"\"\"\n\n def __init__(self):\n self.enabled = True\n self.checked = False\n\n def onClick(self):\n layer, data_frame = utils.get_layer_by_name(settings.LAYER_NAME)\n if layer:\n utils.zoom_to_layer(layer, data_frame)\n","sub_path":"Install/supertool_addin.py","file_name":"supertool_addin.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"365997735","text":"from knx.ip import KNXIPTunnel\nimport time\nimport logging\n\ndef main():\n logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n\n tunnel = KNXIPTunnel(\"192.168.1.128\",3671)\n tunnel.connect()\n \n while (True):\n # Toggle the value of group address 0/0/1\n tunnel.group_toggle(1)\n \n # display the values of group addresses 0/0/1 to 0/0/5\n for i in range(1,6):\n v=tunnel.group_read(i)\n print(\"{} = {}\".format(i,v))\n\n # delay\n time.sleep(12)\n \nif __name__ == '__main__':\n main()","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"444044601","text":"import chainer\n\nclass Model(object):\n def __init__(self, PATCH_SHAPE):\n self.optimizer = chainer.optimizers.Adam()\n #self.optimizer = chainer.optimizers.SGD(lr=0.0000001)\n self.functions = Functions(PATCH_SHAPE)\n self.optimizer.setup(self.functions.collect_parameters())\n self.PATCH_SHAPE = PATCH_SHAPE\n\n\n def train(self, x_data, y_data):\n x = chainer.Variable(x_data)\n y = chainer.Variable(y_data.reshape(y_data.size/3, 3))\n h = self.functions.forward(x)\n self.optimizer.zero_grads()\n error = chainer.functions.mean_squared_error(h, y)\n error.backward()\n self.optimizer.update()\n return error.data\n\n def predict(self, x_data):\n x = chainer.Variable(x_data)\n return self.functions.forward(x).data\n\n def to_gpu(self):\n self.functions.to_gpu()\n self.optimizer.setup(self.functions.collect_parameters())\n\nclass Functions(chainer.FunctionSet):\n def __init__(self, PATCH_SHAPE):\n super(Functions, self).__init__(\n conv1 = chainer.functions.Convolution2D(\n in_channels=6,\n out_channels=64,\n ksize=9,\n stride=1,\n pad=4),\n conv2 = chainer.functions.Convolution2D(\n in_channels=64,\n out_channels=128,\n ksize=PATCH_SHAPE[0],\n stride=1,\n pad=0),\n fc1 = chainer.functions.Linear(128, 64),\n fc2 = chainer.functions.Linear(64, 3)\n )\n def forward(self, x):\n h = self.fc2(\n chainer.functions.relu(\n self.fc1(\n chainer.functions.relu(\n self.conv2(\n chainer.functions.relu(\n self.conv1(\n x\n )))))))\n return h\n","sub_path":"models/conv3layer.py","file_name":"conv3layer.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"435782299","text":"import logging\nfrom couchdbkit import ResourceNotFound\nfrom casexml.apps.case.models import CommCareCase\nfrom corehq.apps.indicators.utils import get_indicator_domains, get_namespaces\nfrom corehq.apps.indicators.models import CaseIndicatorDefinition, FormIndicatorDefinition, CaseDataInFormIndicatorDefinition\nfrom couchforms.models import XFormInstance\nfrom pillowtop.listener import BasicPillow\n\npillow_logging = logging.getLogger(\"pillowtop\")\n\n\nclass IndicatorPillowBase(BasicPillow):\n only_use_fresh_docs = True\n couch_filter = 'fluff_filter/domain_type'\n\n @property\n def extra_args(self):\n return {\n 'domains': ' '.join(get_indicator_domains()),\n 'doc_type': self.document_class._doc_type,\n }\n\n def change_transform(self, doc_dict):\n domain = doc_dict.get('domain')\n if not domain:\n return\n\n namespaces = get_namespaces(domain)\n if namespaces and 'computed_' in doc_dict:\n self.process_indicators(doc_dict, domain, namespaces)\n\n def process_indicators(self, doc_dict, domain, namespaces):\n raise NotImplementedError(\"You need to implement process_indicators\")\n\n\nclass CaseIndicatorPillow(IndicatorPillowBase):\n document_class = CommCareCase\n\n def process_indicators(self, doc_dict, domain, namespaces):\n case_type = doc_dict.get('type')\n if not case_type:\n return\n\n case_indicators = []\n for namespace in namespaces:\n case_indicators.extend(CaseIndicatorDefinition.get_all(namespace, domain, case_type=case_type))\n\n if case_indicators:\n case_doc = CommCareCase.get(doc_dict['_id'])\n case_doc.update_indicators_in_bulk(case_indicators, logger=pillow_logging)\n\n xform_ids = doc_dict.get('xform_ids', [])\n for namespace in namespaces:\n for xform_id in xform_ids:\n try:\n xform_doc = XFormInstance.get(xform_id)\n if not xform_doc.xmlns:\n continue\n related_xform_indicators = CaseDataInFormIndicatorDefinition.get_all(namespace, domain,\n xmlns=xform_doc.xmlns)\n xform_doc.update_indicators_in_bulk(related_xform_indicators, logger=pillow_logging)\n except ResourceNotFound:\n pillow_logging.error(\"[INDICATOR %(namespace)s %(domain)s] Tried to form indicator %(xform_id)s \"\n \"from case %(case_id)s and failed.\" % {\n 'namespace': namespace,\n 'domain': domain,\n 'xform_id': xform_id,\n 'case_id': doc_dict['_id'],\n })\n\n\nclass FormIndicatorPillow(IndicatorPillowBase):\n document_class = XFormInstance\n\n def process_indicators(self, doc_dict, domain, namespaces):\n if not doc_dict.get('inital_processing_complete', False):\n # Make sure we don't update the indicators before the XFormPillows and CasePillows.\n return\n\n xmlns = doc_dict.get('xmlns')\n if not xmlns:\n pillow_logging.warning('[INDICATOR %(domain)s] Could not find XMLS while '\n 'processing indicator for %(xform_id)s' % {\n 'domain': domain,\n 'xform_id': doc_dict['_id'],\n })\n return\n\n indicators = []\n for namespace in namespaces:\n indicators.extend(FormIndicatorDefinition.get_all(namespace, domain, xmlns=xmlns))\n\n if indicators:\n xform_doc = XFormInstance.get(doc_dict['_id'])\n xform_doc.update_indicators_in_bulk(indicators, logger=pillow_logging)\n","sub_path":"corehq/apps/indicators/pillows.py","file_name":"pillows.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"535132373","text":"from ndpi_typestruct import *\n\nclass ndpi_workflow(Structure):\n pass\n\nclass ndpi_stats(Structure):\n _fields_ = [\n ('guessed_flow_protocols', c_uint32),\n ('raw_packet_count', c_uint64),\n ('ip_packet_count', c_uint64),\n ('total_wire_bytes', c_uint64),\n ('total_ip_bytes', c_uint64),\n ('total_discarded_bytes', c_uint64),\n ('protocol_counter', c_uint64 * ((ndpi.ndpi_wrap_ndpi_max_supported_protocols() + ndpi.ndpi_wrap_ndpi_max_num_custom_protocols()) + 1)),\n ('protocol_counter_bytes', c_uint64 * ((ndpi.ndpi_wrap_ndpi_max_supported_protocols() + ndpi.ndpi_wrap_ndpi_max_num_custom_protocols()) + 1)),\n ('protocol_flows', c_uint32 * ((ndpi.ndpi_wrap_ndpi_max_supported_protocols() + ndpi.ndpi_wrap_ndpi_max_num_custom_protocols()) + 1)),\n ('ndpi_flow_count', c_uint32),\n ('tcp_count', c_uint64),\n ('udp_count', c_uint64),\n ('mpls_count', c_uint64),\n ('pppoe_count', c_uint64),\n ('vlan_count', c_uint64),\n ('fragmented_count', c_uint64),\n ('packet_len', c_uint64 * 6),\n ('max_packet_len', c_uint16),\n]\n\nclass ndpi_workflow_prefs(Structure):\n _fields_ = [\n ('decode_tunnels', c_uint8),\n ('quiet_mode', c_uint8),\n ('num_roots', c_uint32),\n ('max_ndpi_flows', c_uint32),\n ]\n\nclass ssh_ssl(Structure):\n _fields_ = [\n (\"ssl_version\", c_uint16),\n (\"client_info\", c_char * 64),\n (\"server_info\", c_char * 64),\n (\"server_organization\", c_char * 64),\n (\"ja3_client\", c_char * 33),\n (\"ja3_server\", c_char * 33),\n (\"server_cipher\", c_uint16),\n (\"client_unsafe_cipher\", c_int64), #ndpi_cipher_weakness enumerator\n (\"server_unsafe_cipher\", c_int64),\n ]\n\nclass ndpi_flow_info(Structure):\n _fields_ = [\n (\"hashval\", c_uint32),\n (\"src_ip\", c_uint32),\n (\"dst_ip\", c_uint32),\n (\"src_port\", c_uint16),\n (\"dst_port\", c_uint16),\n (\"detection_completed\", c_uint8),\n (\"protocol\", c_uint8),\n (\"bidirectional\", c_uint8),\n (\"check_extra_packets\", c_uint8),\n (\"vlan_id\", c_uint16),\n (\"ndpi_flow\", POINTER(ndpi_flow_struct)),\n (\"src_name\", c_char * 48),\n (\"dst_name\", c_char * 48),\n (\"ip_version\", c_uint8),\n (\"last_seen\", c_uint64),\n (\"src2dst_bytes\", c_uint64),\n (\"dst2src_bytes\", c_uint64),\n (\"src2dst_packets\", c_uint32),\n (\"dst2src_packets\", c_uint32),\n\n (\"detected_protocol\", ndpi_protocol),\n\n (\"info\", c_char * 96),\n (\"host_server_name\", c_char * 256),\n (\"bittorent_hash\", c_char * 41),\n (\"dhcp_fingerprint\", c_char * 48),\n (\"ssh_ssl\", ssh_ssl),\n (\"src_id\",c_void_p),\n (\"dst_id\", c_void_p),\n\n ]\n\nndpi_workflow_callback_ptr = CFUNCTYPE(None, POINTER(ndpi_workflow), POINTER(ndpi_flow_info), c_void_p)\n\nclass pcap_t(Structure):\n pass\n\nndpi_workflow._fields_ = [\n ('last_time', c_uint64),\n ('prefs', ndpi_workflow_prefs),\n ('stats', ndpi_stats),\n ('__flow_detected_callback', ndpi_workflow_callback_ptr),\n ('__flow_detected_udata', c_void_p),\n ('__flow_giveup_callback', ndpi_workflow_callback_ptr),\n ('__flow_giveup_udata', c_void_p),\n ('pcap_handle', POINTER(pcap_t)), #pcap_t\n ('ndpi_flows_root', POINTER(c_void_p)),\n ('ndpi_struct', POINTER(ndpi_detection_module_struct)),\n ('num_allocated_flows', c_uint32)\n]\n","sub_path":"2019/Puddu/ndpi_util_struct.py","file_name":"ndpi_util_struct.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"56687828","text":"import re\nimport unicodedata\nimport sys\n\n\nclass Utils:\n\n @staticmethod\n def slugify(string):\n sub = unicodedata.normalize('NFKD', string)\n return re.sub('[-\\\\s]+', '-', re.sub('[^\\\\w\\\\s-]', '', sub)\n .strip()\n .lower())\n\n @staticmethod\n def printFormat(object):\n print('+ ' + (\"-\" * 102) + ' +')\n\n if isinstance(object, list):\n for string in object:\n if string == '*-*':\n print('+ ' + (\"-\" * 102) + ' +')\n else:\n Utils.safePrint('| {:102} |'.format(\n string.replace('\\t', ' ' * 4)))\n else:\n Utils.safePrint('| {:102} |'.format(object.replace('\\t', ' ' * 4)))\n\n print('+ ' + (\"-\" * 102) + ' +')\n\n @staticmethod\n def safePrint(object):\n print(object, file=sys.stderr)\n\n @staticmethod\n def is_empty(object):\n if object:\n return False\n else:\n return True\n\n @staticmethod\n def argument(name, default, min, line):\n if (name in line):\n match = re.search(\n r\"^.*?[ \\t]+[-]{0,2}\" + name + \"[ \\t:=]+([\\d]+)\", line, re.MULTILINE)\n\n if (match != None and len(match.groups()) == 1):\n return max(1, int(match.group(1)))\n return default\n\n @staticmethod\n def isStringInt(string):\n try:\n int(string)\n return True\n except ValueError:\n return False\n","sub_path":"torrent9explorer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"460204317","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, JsonResponse, HttpResponseForbidden\nfrom .models import *\nfrom .forms import UEditorForm\nfrom django.views.decorators.csrf import csrf_exempt\nfrom haystack.generic_views import SearchView\nimport hashlib\nfrom xmltodict import parse\nfrom django.template import Context, loader\nimport random\nimport requests\nfrom lxml import etree\nimport json\n\n\n# Create your views here.\ndef index(request):\n article_list = Article.objects.all().order_by(\"-id\")\n tags = Tag.objects.all()\n comments = ArticleComments.objects.all().order_by(\"-id\")[0:9]\n return render(request, 'blog_app/index.html', {\"article_list\": article_list, \"tags\": tags,\"comments\":comments})\n\n\ndef article(request, title):\n ueditor = UEditorForm()\n the_article = Article.objects.get(title=title)\n the_article.reading += 1\n the_article.save()\n comments = ArticleComments.objects.filter(article_id=the_article.id)\n contexts = {\"the_article\": the_article, \"comments\": comments, \"ueditor\": ueditor}\n return render(request, 'blog_app/article.html', contexts)\n\n\n@csrf_exempt\ndef add_comment(request):\n name = request.POST[\"name\"]\n if name:\n the_article_id = request.POST[\"article\"]\n contents = request.POST[\"Description\"]\n comment = ArticleComments()\n comment.name = name\n comment.article_id = the_article_id\n comment.contents = contents\n comment.save()\n return JsonResponse({\"isok\": 1})\n else:\n return JsonResponse({\"isok\": 0})\n\n\ndef classification(request, classify):\n article_list = Article.objects.filter(classify=classify)\n tags = Tag.objects.all()\n comments = ArticleComments.objects.all().order_by(\"-id\")[0:9]\n return render(request, \"blog_app/classification.html\",\n {\"article_list\": article_list, \"classify\": classify, \"tags\": tags,\"comments\":comments})\n\n\ndef tag(request, the_tag):\n article_list = Tag.objects.get(tag=the_tag).article_set.all()\n print(article_list)\n tags = Tag.objects.all()\n comments = ArticleComments.objects.all().order_by(\"-id\")[0:9]\n return render(request, \"blog_app/tag.html\", {\"article_list\": article_list, \"the_tag\": the_tag, \"tags\": tags,\"comments\":comments})\n\n\nclass MySearchView(SearchView):\n def get_context_data(self, *args, **kwargs):\n tags = Tag.objects.all()\n context = super().get_context_data(*args, **kwargs)\n context[\"tags\"] = tags\n return context\n\n\n@csrf_exempt\ndef wechat(request):\n if request.method == 'GET':\n WECHAT_TOKEN = \"yunzhongyi123\"\n signature = request.GET[\"signature\"]\n timestamp = request.GET[\"timestamp\"]\n nonce = request.GET[\"nonce\"]\n echostr = request.GET[\"echostr\"]\n tmp = [WECHAT_TOKEN, timestamp, nonce]\n tmp.sort()\n tmp = \"\".join(tmp).encode()\n real_signature = hashlib.sha1(tmp).hexdigest()\n if signature == real_signature:\n result = echostr\n elif request.method == 'POST':\n data_xml = request.body\n dict_xml = parse(data_xml)\n from_user = dict_xml[\"xml\"][\"FromUserName\"]\n to_user = dict_xml[\"xml\"][\"ToUserName\"]\n msg_type = dict_xml[\"xml\"][\"MsgType\"]\n\n if msg_type == \"event\":\n event_name = dict_xml[\"xml\"][\"Event\"]\n if event_name == \"subscribe\":\n re_content = \"欢迎,旅行者!\\n1️⃣输入“笑话”获取随机笑话;\\n2️⃣ 输入“地区”+“天气”查询天气,如“北京天气”;\\n3️⃣输入“你好小芸”和小芸聊天,输入“再见小芸”离开聊天。\"\n t = loader.get_template('blog_app/reply_text.xml')\n c = {\n 'toUser': from_user,\n 'fromUser': to_user,\n 'content': re_content\n }\n result = t.render(Context(c))\n elif msg_type == \"voice\":\n text_content = dict_xml[\"xml\"][\"Recognition\"]\n text_content = text_content[:-1]\n result = text_handle(text_content, from_user, to_user)\n elif msg_type == \"text\":\n text_content = dict_xml[\"xml\"][\"Content\"]\n result = text_handle(text_content, from_user, to_user)\n else:\n re_content = \"本公众号只支持文字消息!\"\n t = loader.get_template('blog_app/reply_text.xml')\n c = {\n 'toUser': from_user,\n 'fromUser': to_user,\n 'content': re_content\n }\n result = t.render(Context(c))\n return HttpResponse(result)\n\n\ndef text_handle(text_content, from_user, to_user):\n if XiaoYun.objects.filter(from_user=from_user):\n if text_content == \"再见小芸\":\n xiaoyun = XiaoYun.objects.get(from_user=from_user)\n xiaoyun.delete()\n re_content = \"再见!\"\n else:\n payload = {\"info\": text_content, \"key\": \"4c64b71928574551b8e85b07fbf353cc\", \"userid\": from_user}\n r = requests.post(\"http://www.tuling123.com/openapi/api\", data=json.dumps(payload))\n if r.json()[\"code\"] == 100000:\n re_content = r.json()[\"text\"]\n elif r.json()[\"code\"] == 200000:\n re_content = r.json()[\"text\"] + \"\\n\" + r.json()[\"url\"]\n elif r.json()[\"code\"] == 302000:\n re_content = r.json()[\"text\"] + \":\\n\" + r.json()[\"list\"][0][\"article\"] + \"\\n\" + r.json()[\"list\"][0][\n \"detailurl\"] + \\\n \"\\n\\n\" + r.json()[\"list\"][1][\"article\"] + \"\\n\" + r.json()[\"list\"][1][\"detailurl\"] + \"\\n\\n\" + \\\n r.json()[\"list\"][2][\"article\"] + \"\\n\" + r.json()[\"list\"][2][\"detailurl\"]\n elif r.json()[\"code\"] == 308000:\n re_content = r.json()[\"text\"] + \":\\n\" + r.json()[\"list\"][0][\"name\"] + \"\\n\" + r.json()[\"list\"][0][\"info\"]+\"\\n\"+r.json()[\"list\"][0][\"detailurl\"]\n else:\n re_content = r.json()[\"text\"]\n else:\n re_content = \"你好,旅行者!\\n1️⃣输入“笑话”获取随机笑话;\\n2️⃣输入“地区”+“天气”查询天气,如“北京天气”;\\n3️⃣输入“你好小芸”和小芸聊天,输入“再见小芸”离开聊天。\"\n\n if text_content == \"博客\":\n re_content = \"www.hushuikun.com\"\n elif text_content == \"笑话\":\n jokes_count = Duanzi.objects.count()\n random_int = random.randint(1, jokes_count)\n joke = Duanzi.objects.get(id=random_int).content\n re_content = joke\n elif text_content.startswith(\"点歌\"):\n re_content = \"抱歉,点歌服务当前不可用~~\"\n #if text_content == \"点歌\":\n # re_content = \"你好,旅行者!\\n输入“点歌”+“歌曲名”或者“点歌”+“歌曲名”+“歌手名”点歌,如“点歌青花瓷”或者“点歌青花瓷周杰伦”\"\n #else:\n # headers = {\n # \"User-Agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46\"\n # \" (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1\"}\n # keyword = text_content.replace(\"点歌\", \"\")\n # payload = {\"qword\": keyword, \"ie\": \"utf-8\", \"page\": \"0\"}\n # r1 = requests.get(\"http://musicmini.baidu.com/app/search/searchList.php\",\n # params=payload, headers=headers)\n # html = r1.content.decode()\n # xml = etree.HTML(html)\n # # if xml.xpath(\"//*//*[@id='sc-table']/tr[3]/td/@append\")[0] == \"伴奏\":\n # # song_id = xml.xpath(\"//*[@id='sc-table']/tr[4]/th/i/input/@id\")[0]\n # if xml.xpath(\"//*[@id='sc-table']/tr[3]/td[1]/i/@title\")[0] == \"该歌曲来自MP3搜索\":\n # re_content = \"对不起,歌曲因版权原因暂时无法播放。\"\n # else:\n # song_id = xml.xpath(\"//*[@id='sc-table']/tr[3]/th/i/input/@id\")[0]\n # r2 = requests.get(\"http://ting.baidu.com/data/music/links?songIds=%s\" % song_id, headers=headers)\n # TITLE = r2.json()['data']['songList'][0]['songName']\n # MUSIC_Url = r2.json()['data']['songList'][0]['songLink']\n # artistName = r2.json()['data']['songList'][0]['artistName']\n # if r2.json()['data']['songList'][0]['albumName']:\n # albumName = r2.json()['data']['songList'][0]['albumName']\n # else:\n # albumName = \"\"\n # t = loader.get_template('blog_app/reply_music.xml')\n # c = {\n # 'toUser': from_user,\n # 'fromUser': to_user,\n # 'TITLE': TITLE,\n # 'MUSIC_Url': MUSIC_Url,\n # 'HQ_MUSIC_Url': MUSIC_Url,\n # 'DESCRIPTION': artistName + \" \" + albumName\n # }\n # result = t.render(Context(c))\n # return HttpResponse(result)\n elif text_content.endswith(\"天气\"):\n if text_content == \"天气\":\n re_content = \"你好,旅行者!\\n输入“地区”+“天气”查询天气,如“北京天气”。\"\n else:\n place = text_content.replace(\"天气\", \"\")\n payload = {\"location\": place, \"key\": \"ca72cd49f73343909bc11a7edb802dd8\"}\n r = requests.get(\"https://free-api.heweather.com/s6/weather/now\", params=payload)\n if r.json()[\"HeWeather6\"][0][\"status\"] == 'unknown city':\n re_content = \"未查询到输入的地区。\"\n elif r.json()[\"HeWeather6\"][0][\"status\"] == \"ok\":\n # 地区\n location = r.json()[\"HeWeather6\"][0][\"basic\"]['location']\n # 温度\n tmp = r.json()[\"HeWeather6\"][0][\"now\"]['tmp']\n # 天气状况\n cond_txt = r.json()[\"HeWeather6\"][0][\"now\"]['cond_txt']\n # 风向\n wind_dir = r.json()[\"HeWeather6\"][0][\"now\"]['wind_dir']\n # 风力\n wind_sc = r.json()[\"HeWeather6\"][0][\"now\"]['wind_sc']\n # 风速\n wind_spd = r.json()[\"HeWeather6\"][0][\"now\"]['wind_spd']\n # 相对湿度\n hum = r.json()[\"HeWeather6\"][0][\"now\"]['hum']\n # 降水量\n pcpn = r.json()[\"HeWeather6\"][0][\"now\"]['pcpn']\n # 大气压强\n pres = r.json()[\"HeWeather6\"][0][\"now\"]['pres']\n # 能见度\n if \"vis\" in r.json()[\"HeWeather6\"][0][\"now\"]:\n vis = r.json()[\"HeWeather6\"][0][\"now\"]['vis'] + \"公里\"\n else:\n vis = \"无数据\"\n # 云量\n cloud = r.json()[\"HeWeather6\"][0][\"now\"]['cloud']\n # 更新时间\n loc = r.json()[\"HeWeather6\"][0][\"update\"]['loc']\n re_content = \"地区:%s\\n温度:%s℃\\n天气状况:%s\\n风向:%s\\n风力:%s\\n风速:%s公里/小时\\n相对湿度:%s\\n\" \\\n \"降水量:%s\\n大气压强:%s\\n能见度:%s\\n云量:%s\\n更新时间:当地时间%s\" % (\n location, tmp, cond_txt, wind_dir, wind_sc,\n wind_spd, hum, pcpn, pres, vis, cloud, loc)\n elif text_content == \"你好小芸\":\n xiaoyun = XiaoYun()\n xiaoyun.from_user = from_user\n xiaoyun.save()\n re_content = \"你好!小芸来了。\"\n t = loader.get_template('blog_app/reply_text.xml')\n c = {\n 'toUser': from_user,\n 'fromUser': to_user,\n 'content': re_content\n }\n return t.render(Context(c))\n","sub_path":"blog_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"340758752","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nfrom keras.layers import Dense\nfrom keras.models import Sequential\n\n\n# In[2]:\n\n\nmodel = Sequential()\n\n\n# In[3]:\n\n\nmodel.add(Dense(units=2,activation='relu',input_dim=2))\nmodel.add(Dense(units=1,activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\n\n\n# In[4]:\n\n\nprint(model.summary())\nprint(model.get_weights())\n\n\n# In[5]:\n\n\n# XOR\nx = np.array([[0,0],[0,1],[1,0],[1,1]])\ny = np.array([0,1,1,0])\n\nmodel.fit(x,y,epochs=1000,batch_size=4)\nprint(model.get_weights())\nprint(model.predict(x,batch_size=4))\n\n\n# In[6]:\n\n\n# OR\nx = np.array([[0,0],[0,1],[1,0],[1,1]])\ny = np.array([0,1,1,1])\n\nmodel.fit(x,y,epochs=1000,batch_size=4)\nprint(model.get_weights())\nprint(model.predict(x,batch_size=4))\n\n\n# In[7]:\n\n\n# AND\nx = np.array([[0,0],[0,1],[1,0],[1,1]])\ny = np.array([0,0,0,1])\n\nmodel.fit(x,y,epochs=1000,batch_size=4)\nprint(model.get_weights())\nprint(model.predict(x,batch_size=4))\n\n\n# In[8]:\n\n\n# iris\nfrom sklearn import datasets\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[9]:\n\n\niris = datasets.load_iris()\nx = pd.DataFrame(iris['data'], columns=iris['feature_names'])\nprint(\"target_names: \"+str(iris['target_names']))\ny = pd.DataFrame(iris['target'], columns=['target'])\niris_data = pd.concat([x,y], axis=1)\niris_data.head()\n\n\n# In[10]:\n\n\niris['target_names']\n\n\n# In[11]:\n\n\ntarget_name = {0:'setosa',1:'versicolor',2:'virginica'}\niris_data['target_name'] = iris_data['target'].map(target_name)\niris_data = iris_data[(iris_data['target_name'] == 'setosa')|(iris_data['target_name'] == 'versicolor')]\niris_data = iris_data[['sepal length (cm)','petal length (cm)','target_name']]\niris_data.head()\n\n\n# In[12]:\n\n\ntarget_class = {'setosa':1,'versicolor':-1}\niris_data['target_class'] = iris_data['target_name'].map(target_class)\ndel iris_data['target_name']\niris_data.head()\n\n\n# In[13]:\n\n\ndef sign(z):\n if z > 0:\n return 1\n else:\n return -1\n\n\n# In[14]:\n\n\nw = np.array([0.,0.,0.])\nerror = 1\niterator = 0\nwhile error != 0:\n error = 0\n for i in range(len(iris_data)):\n x,y = np.concatenate((np.array([1.]), np.array(iris_data.iloc[i])[:2])), np.array(iris_data.iloc[i])[2]\n if sign(np.dot(w,x)) != y:\n print(\"iterator: \"+str(iterator))\n iterator += 1\n error += 1\n sns.lmplot('sepal length (cm)','petal length (cm)',data=iris_data, fit_reg=False, hue ='target_class')\n \n # 前一個Decision boundary 的法向量\n if w[1] != 0:\n x_last_decision_boundary = np.linspace(0,w[1])\n y_last_decision_boundary = (w[2]/w[1])*x_last_decision_boundary\n plt.plot(x_last_decision_boundary, y_last_decision_boundary,'c--')\n w += y*x \n print(\"x: \" + str(x)) \n print(\"w: \" + str(w))\n # x向量 \n x_vector = np.linspace(0,x[1])\n y_vector = (x[2]/x[1])*x_vector\n plt.plot(x_vector, y_vector,'b')\n # Decision boundary 的方向向量\n x_decision_boundary = np.linspace(-0.5,7)\n y_decision_boundary = (-w[1]/w[2])*x_decision_boundary - (w[0]/w[2])\n plt.plot(x_decision_boundary, y_decision_boundary,'r')\n # Decision boundary 的法向量\n x_decision_boundary_normal_vector = np.linspace(0,w[1])\n y_decision_boundary_normal_vector = (w[2]/w[1])*x_decision_boundary_normal_vector\n plt.plot(x_decision_boundary_normal_vector, y_decision_boundary_normal_vector,'g')\n plt.xlim(-0.5,7.5)\n plt.ylim(5,-3)\n plt.show()\n\n","sub_path":"20191003HW/20191003_1_perceptron and or xor iris/20191003_1_perceptron and or xor iris.py","file_name":"20191003_1_perceptron and or xor iris.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"70700102","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 11 17:51:43 2021\r\n\r\n@author: pyliu\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport math\r\nimport scipy as sp\r\n\r\nfrom calc_length import *\r\n\r\ndef calc_angle(a,b,c):\r\n \"\"\"\r\n Calculate internal angle at b between points a -> b -> c\r\n i.e. angle abc (smaller than pi radians)\r\n Angle has units of radians\r\n\r\n Parameters\r\n ----------\r\n a : FLOAT, N-dim vector\r\n coords of point 1\r\n b : FLOAT, N-dim vector\r\n coords of point 2\r\n c : FLOAT, N-dim vector\r\n coords of point 3\r\n\r\n Returns\r\n -------\r\n FLOAT, scalar\r\n internal angle abc (radians)\r\n\r\n \"\"\"\r\n #1) Convert to numpy for easy manipulation\r\n a = np.array(a)\r\n b = np.array(b)\r\n c = np.array(c)\r\n \r\n #2) use trig\r\n ab = b - a\r\n bc = c - b\r\n len_ab = calc_length(a,b)\r\n len_bc = calc_length(b,c)\r\n cos_theta = np.sum( np.array(ab)*np.array(bc) ) / (len_ab * len_bc)\r\n return np.arccos(cos_theta)","sub_path":"Wk10_STRANDS/calc_angle.py","file_name":"calc_angle.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"242459881","text":"\"\"\"\nGiven a string array words, find the maximum value of length(word[i]) * length(word[j]) where the two words do not share common letters. You may assume that each word will contain only lower case letters. If no such two words exist, return 0.\n\nExample 1:\n\nInput: [\"abcw\",\"baz\",\"foo\",\"bar\",\"xtfn\",\"abcdef\"]\nOutput: 16 \nExplanation: The two words can be \"abcw\", \"xtfn\".\nExample 2:\n\nInput: [\"a\",\"ab\",\"abc\",\"d\",\"cd\",\"bcd\",\"abcd\"]\nOutput: 4 \nExplanation: The two words can be \"ab\", \"cd\".\nExample 3:\n\nInput: [\"a\",\"aa\",\"aaa\",\"aaaa\"]\nOutput: 0 \nExplanation: No such pair of words.\n\"\"\"\n\n# solution 1\nclass Solution:\n def maxProduct(self, words: List[str]) -> int:\n lib = {word: set(word) for word in words} \n res = 0\n for i, word1 in enumerate(words[:-2]):\n for j, word2 in enumerate(words[i+1:]):\n # 判断两个集合是否有交集(判断是否有重复字符的方法)\n if not lib[word1] & lib[word2]: \n res = max(res, len(word1) * len(word2))\n return res\n\n# solution 2:Brute\nclass Solution:\n def withSameLetter(self, s1, s2):\n if len(s1) < 1 or len(s2) < 1: return False\n for s in s1:\n if s in s2: return True\n return False\n \n def maxProduct(self, words: List[str]) -> int:\n if len(words) < 2: return 0\n ans = 0\n for i in range(len(words)-1):\n for j in range(i, len(words)):\n if self.withSameLetter(words[i], words[j]): continue\n else:\n ans = max(ans, len(words[i]) * len(words[j]))\n return ans\n\n","sub_path":"python/0318.Maximum Product of Word Lengths.py","file_name":"0318.Maximum Product of Word Lengths.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"341574149","text":"#!/usr/bin/env python\r\n\r\n# this modules are necessary to run the raspberry pi and interact with the general purpose input/output (GPIO) pins\r\nimport RPi.GPIO as GPIO\r\n# this module is important for setting duration and pauses in the code\r\nimport time\r\n\r\n# Set up pins\r\nMotorPin1 = 17\r\nMotorPin2 = 18\r\nMotorEnable = 27\r\n\r\n# this is the message that will print when the program is run and gives instructions to the user\r\ndef print_message():\r\n\tprint (\"========================================\")\r\n\tprint (\"| RAP pipette |\")\r\n\tprint (\"| ------------------------------ |\")\r\n\tprint (\"| Motor pin 1 connect to GPIO0 |\")\r\n\tprint (\"| Motor pin 2 connect to GPIO1 |\")\r\n\tprint (\"| Motor enable connect to GPIO2 |\")\r\n\tprint (\"| |\")\r\n\tprint (\"| Controlling a motor |\")\r\n\tprint (\"| |\")\r\n\tprint (\"| O'Connor Lab|\")\r\n\tprint (\"========================================\\n\")\r\n\tprint ('Program is running...')\r\n\tprint ('Please press Ctrl+C to end the program...')\r\n\traw_input (\"Press Enter to begin\\n\") # this is a built-in python function that receives input from the user\r\n\r\ndef setup():\r\n\t# Set the GPIO modes to BCM Numbering\r\n\tGPIO.setmode(GPIO.BCM)\r\n\t# Set pins to output\r\n\tGPIO.setup(MotorPin1, GPIO.OUT)\r\n\tGPIO.setup(MotorPin2, GPIO.OUT)\r\n\tGPIO.setup(MotorEnable, GPIO.OUT, initial=GPIO.LOW) # sets the motor to off at the start\r\n\r\n# Define a motor function to spin the motor\r\n# direction should be \r\n# 1(clockwise), 0(stop), -1(counterclockwise)\r\n# if the wires ever get switched, clockwise will become counterclockwise and vise versa\r\ndef motor(direction):\r\n\t# Clockwise\r\n\tif direction == 1:\r\n\t\t# Set direction\r\n\t\tGPIO.output(MotorPin1, GPIO.HIGH)\r\n\t\tGPIO.output(MotorPin2, GPIO.LOW)\r\n\t\t# Enable the motor\r\n\t\tGPIO.output(MotorEnable, GPIO.HIGH)\r\n\t\tprint (\"Clockwise\")\r\n\t# Counterclockwise\r\n\tif direction == -1:\r\n\t\t# Set direction\r\n\t\tGPIO.output(MotorPin1, GPIO.LOW)\r\n\t\tGPIO.output(MotorPin2, GPIO.HIGH)\r\n\t\t# Enable the motor\r\n\t\tGPIO.output(MotorEnable, GPIO.HIGH)\r\n\t\tprint (\"Counterclockwise\")\r\n\t# Stop\r\n\tif direction == 0:\r\n\t\t# Disable the motor\r\n\t\tGPIO.output(MotorEnable, GPIO.LOW)\r\n\t\tprint (\"Stop\")\r\n\r\ndef main():\r\n\tprint_message() # prints the message written above\r\n\r\n\t# Define a dictionary to make the script more readable\r\n\t# CW as clockwise, CCW as counterclockwise, STOP as stop\r\n\tdirections = {'CW': 1, 'CCW': -1, 'STOP': 0}\r\n\titeration = 1\r\n\twhile iteration < 6:\r\n\r\n\t\tif iteration < 6:\r\n\t\t\t# Clockwise\r\n\t\t\tmotor(directions['CW'])\r\n\t\t\ttime.sleep(190)\r\n\t\t\t# Stop\r\n\t\t\tmotor(directions['STOP'])\r\n\t\t\ttime.sleep(30)\r\n\t\t\t# Counterclockwise\r\n\t\t\tmotor(directions['CCW'])\r\n\t\t\ttime.sleep(190)\r\n\t\t\t# Stop\r\n\t\t\tmotor(directions['STOP'])\r\n\t\t\ttime.sleep(30)\r\n\t\t\tprint (\"cycle: \" + str(iteration))\r\n\t\t\titeration = iteration + 1\r\n\t\telse:\r\n\t\t\tprint (\"Error\")\r\n\r\n# stops the motor and unassigns the GPIO pins\r\ndef destroy():\r\n\t# Stop the motor\r\n\tGPIO.output(MotorEnable, GPIO.LOW)\r\n\t# Release resource\r\n\tGPIO.cleanup() \r\n\r\n# If trying to running this script directly, do:\r\nif __name__ == '__main__':\r\n\tsetup()\r\n\ttry:\r\n\t\tmain()\r\n\t\tprint (\"RAP Mix Completed\")\r\n\t# When 'Ctrl+C' is pressed, the child program destroy() will be execut\r\n\texcept KeyboardInterrupt:\r\n\t\tdestroy()","sub_path":"code/02_RAP.py","file_name":"02_RAP.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"60969933","text":"import collections\n\n\nclass Solution:\n def removeStones(self, a) -> int:\n n = len(a)\n dx = {}\n dy = {}\n for i, p in enumerate(a):\n if p[0] not in dx:\n dx[p[0]] = {i}\n else:\n dx[p[0]].add(i)\n if p[1] not in dy:\n dy[p[1]] = {i}\n else:\n dy[p[1]].add(i)\n color = [-1] * n\n\n def dfs(i, c):\n color[i] = c\n p = a[i]\n for x in dx[p[0]]:\n if color[x] == -1:\n dfs(x, c)\n for y in dy[p[1]]:\n if color[y] == -1:\n dfs(y, c)\n\n col = 0\n for i in range(n):\n if color[i] == -1:\n dfs(i, col)\n col += 1\n d = collections.Counter(color)\n return sum([d[c] - 1 for c in d])\n\n\ns = Solution()\nprint(s.removeStones([[0, 1], [1, 0]]))\n# print(s.removeStones([[0, 0], [0, 1], [1, 0], [1, 2], [2, 1], [2, 2]]))\n# print(s.removeStones([[0, 0], [0, 2], [1, 1], [2, 0], [2, 2]]))\n","sub_path":"leetcode/2020/most-stones-removed-with-same-row-or-column.py","file_name":"most-stones-removed-with-same-row-or-column.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"186870890","text":"import numpy as np\nimport os\nimport scipy.io as sio\nimport pandas as pd\nimport json\n\n \n\ndef extractSign():\n datadir=\"train_data\"\n sign={}\n for dir in os.listdir(datadir):\n fullpath=os.path.join(datadir,dir,\"candidates.mat\")\n scanDict=sio.loadmat(fullpath)\n observations=np.array(scanDict.get('candidates'))\n cand_ids=np.array([observation[3] for observation in observations[0,:] if observation[0]==1]).tolist()\n sign[fullpath]=cand_ids\n return sign \n\n\n \n \ndef saveNbOfObservations(): \n datadir=\"train_data\"\n \n paths=[]\n nb_obs=[]\n nb_positives=[]\n for dir in os.listdir(datadir):\n if os.path.isdir(dir):\n fullpath=os.path.join(datadir,dir,\"candidates.mat\")\n scanDict=sio.loadmat(fullpath)\n observations=np.array(scanDict.get('candidates'))\n cand_shape=observations.shape\n paths.append(dir)\n nb_obs.append(cand_shape[1])\n cand_ids=np.array([observation[3] for observation in observations[0,:] if observation[0]==1]).tolist()\n nb_positives.append(len(cand_ids))\n print(dir)\n \n df=pd.DataFrame({\"path\":paths, \"nb_observs\":nb_obs, \"nb_positives\":nb_positives})\n\n# print(\"Min nb of observations:\", df[\"nb_observs\"].min())\n# print(\"Max nb of observations\", df[\"nb_observs\"].max())\n# print(\"Average nb of observations\", df[\"nb_observs\"].max())\n\n return df \n \n \n \ndef candD(candidateData):\n return {\n 'label':candidateData[0],\n 'vol':candidateData[2],\n 'cand_id':candidateData[3]\n }\n\n\n#with open('positives.json', 'w') as outfile:\n# json.dump( extractSign(), outfile) \n# \n# \n#with open('nbObservations.json', 'w') as outfile:\n# json.dump( saveNbOfObservations(), outfile) \n# \n#frame=saveNbOfObservations() \n#frame.to_csv(\"info_on_data.csv\")\n\n\n#df=pd.read_csv(\"info_on_data.csv\")\n#print(df.head())\n#print(\"Total Nuymber of Observations \", df[\"nb_observs\"].sum())\n#print(\"Total Nuymber of Positive Examples \", df[\"nb_positives\"].sum())\n#print(df.describe())\n\n\nsign={}\ndatadir=\"train_data\"\n\nfor dir in os.listdir(datadir):\n pathToScan=os.path.join(datadir,dir)\n if os.path.isdir(pathToScan): \n fullpath=os.path.join(pathToScan,\"candidates.mat\")\n scanDict=sio.loadmat(fullpath)\n candids=np.array(scanDict.get('candidates'))\n positives=[]\n for cand in candids[0,:]:\n # getting the candidate:\n if cand[0]==0:\n positives.append(cand[3])\n \n sign[dir]=positives\n print(dir) \n \nwith open('positives.json', 'w') as outfile:\n json.dump(sign, outfile) \n\nprint(sign)\n\n#root=\n# data_folders = [\n#os.path.join(root, d) for d in sorted(os.listdir(root))\n#if os.path.isdir(os.path.join(root, d))]\n# if len(data_folders) != num_classes:\n#raise Exception(\n# 'Expected %d folders, one per class. Found %d instead.' % (\n# num_classes, len(data_folders)))\n# print(data_folders)\n# return data_folders","sub_path":"3_Approach2-Vanilla_Convolutional_Neural_Net/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"644910798","text":"#!/usr/bin/env python\n# coding:utf-8\n\nfrom __future__ import division\nimport numpy as np\nimport chainer\nfrom multibox_coder import MultiboxCoder\nfrom transformsUtils.bbox.resize_bbox import resize_bbox\nfrom transformsUtils.image.resize import resize\nimport chainer.functions as F\n\nclass SSD(chainer.Chain):\n def __init__(self, extractor, multibox, steps, sizes, variance=(0.1, 0.2), mean=0):\n self.mean = mean\n self.use_preset('visualize')\n super(SSD, self).__init__()\n with self.init_scope():\n self.extractor = extractor\n self.multibox = multibox\n self.coder = MultiboxCoder(extractor.grids, multibox.aspect_ratios, steps, sizes, variance)\n\n @property\n def insize(self):\n return self.extractor.insize\n\n @property\n def n_fg_class(self):\n return self.multibox.n_class - 1\n\n def to_cpu(self):\n super(SSD, self).to_cpu()\n self.coder.to_cpu()\n\n def to_gpu(self, device=None):\n super(SSD, self).to_gpu(device)\n self.coder.to_gpu(device=device)\n\n def __call__(self, x):\n return self.multibox(self.extractor(x))\n\n def extract(self, x):\n h = x\n activations = {'input': x}\n ys, hs = self.extractor(h, retain_output=True)\n mb_locs, mb_confs = self.multibox(ys)\n activations['prob'] = mb_confs\n activations['bbox'] = mb_locs\n activations[\"layer\"] = hs\n return activations\n\n def extract_layer_feature(self, x, layer):\n ys, hs = self.extractor(x, retain_output=True)\n h = hs[layer]\n return h\n\n\n def _prepare(self, img):\n img = img.astype(self.xp.float32)\n img = resize(img, (self.insize, self.insize))\n #img = transforms.resize(img, (self.insize, self.insize))\n #img -= self.mean\n return img\n\n def set_preset(self, thresh):\n self.nms_thresh = 0.45\n self.score_thresh = thresh\n\n def use_preset(self, preset):\n if preset == 'visualize':\n self.nms_thresh = 0.35\n self.score_thresh = 0.4\n elif preset == 'evaluate':\n self.nms_thresh = 0.45\n self.score_thresh = 0.01\n else:\n raise ValueError('preset must be visualize or evaluate')\n\n def predict_ssd(self, x):\n sizes = list()\n for img in x:\n _, H, W = img.shape\n sizes.append((H, W))\n #x = chainer.Variable(x)\n mb_locs, mb_confs = self(x)\n mb_locs, mb_confs = mb_locs.data, mb_confs.data\n\n bboxes = list()\n labels = list()\n scores = list()\n for mb_loc, mb_conf, size in zip(mb_locs, mb_confs, sizes):\n bbox, label, score = self.coder.decode(mb_loc, mb_conf, self.nms_thresh, self.score_thresh)\n bbox = resize_bbox(bbox, (self.insize, self.insize), size)\n #bbox = transforms.resize_bbox(\n # bbox, (self.insize, self.insize), size)\n bboxes.append(chainer.cuda.to_cpu(bbox))\n labels.append(chainer.cuda.to_cpu(label))\n scores.append(chainer.cuda.to_cpu(score))\n\n return bboxes, labels, scores\n\n def predict(self, imgs):\n x = list()\n sizes = list()\n for img in imgs:\n _, H, W = img.shape\n img = self._prepare(img)\n x.append(self.xp.array(img))\n sizes.append((H, W))\n\n with chainer.using_config('train', False), \\\n chainer.function.no_backprop_mode():\n x = chainer.Variable(self.xp.stack(x))\n mb_locs, mb_confs = self(x)\n mb_locs, mb_confs = mb_locs.data, mb_confs.data\n\n bboxes = list()\n labels = list()\n scores = list()\n for mb_loc, mb_conf, size in zip(mb_locs, mb_confs, sizes):\n bbox, label, score = self.coder.decode(\n mb_loc, mb_conf, self.nms_thresh, self.score_thresh)\n bbox = resize_bbox(\n bbox, (self.insize, self.insize), size)\n #bbox = transforms.resize_bbox(\n # bbox, (self.insize, self.insize), size)\n bboxes.append(chainer.cuda.to_cpu(bbox))\n labels.append(chainer.cuda.to_cpu(label))\n scores.append(chainer.cuda.to_cpu(score))\n return bboxes, labels, scores\n\n","sub_path":"chainer/ssd_withRotate_Score/model/ssd.py","file_name":"ssd.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"372589175","text":"import os\nimport binascii\nimport datetime\nimport logging\nfrom decimal import Decimal\n\nfrom rest_framework import status, viewsets, generics, mixins, exceptions\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import list_route\n\nfrom django.conf import settings\nfrom django.utils.timezone import utc\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.utils import timezone\n\nfrom apps.payments.models import Order\nfrom apps.payments.api.serializers import (\n OrderCreateSerializer, OrderSerializer)\nfrom apps.rates.models import Exchange\nfrom apps.transfers.api.views import get_transfer_backend\nfrom apps.transfers.models import Transfer\n\n\nlogger = logging.getLogger('money.payment')\n\n\ndef save_deferred_transfer(request, transfer_backend, order):\n transfer = transfer_backend.process(order=order)\n if '__transfer__' in request.session:\n del request.session['__transfer__']\n return transfer\n\n\nclass PaymentViewSet(viewsets.ViewSet):\n @list_route(methods=['post'])\n def order(self, request):\n transfer_after = request.data.get('forTransfer', False)\n\n order_serializer = OrderCreateSerializer(\n data=request.data, user=request.user, for_transfer=transfer_after)\n\n if order_serializer.is_valid():\n if transfer_after:\n transfer_data = request.data['transfer']\n try:\n transfer_backend_class = get_transfer_backend(transfer_data)\n if not transfer_backend_class:\n logger.error(\n 'Несуществующий метод перевода: %s' % (\n transfer_data.get('method'),))\n return Response(status=status.HTTP_400_BAD_REQUEST)\n except:\n logger.error(\n 'Невозможно импортировать модуль метода перевода: %s' % (\n transfer_data.get('method'),))\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n transfer_backend = transfer_backend_class(\n request.user, reason=Transfer.REASON.DEFERRED)\n transfer_serializer = transfer_backend.get_serializer(\n transfer_data, request,\n order_data=order_serializer.validated_data)\n\n if not transfer_after or transfer_serializer.is_valid():\n amount = Exchange.cost(\n order_serializer.validated_data['quantity'], 'RUB')\n order = Order.objects.create(\n user=request.user,\n quantity=order_serializer.validated_data['quantity'],\n amount=round(amount, 2),\n created=timezone.now(),\n note='Покупка умов',\n comment=order_serializer.validated_data.get('comment'),\n for_transfer=transfer_after\n )\n result = order_serializer.backend_instance.process(order)\n\n if transfer_after:\n transfer = save_deferred_transfer(\n request, transfer_backend, order)\n\n return Response(result, status=status.HTTP_201_CREATED)\n else:\n logger.error(\n 'Ошибка покупки для оплаты. Запрос: %s; Ошибки: %s' % (\n str(request.data), str(transfer_serializer.errors)\n ))\n return Response({\n 'transfer': transfer_serializer.errors\n }, status=status.HTTP_400_BAD_REQUEST)\n\n return Response({\n 'payment': order_serializer.errors\n }, status=status.HTTP_400_BAD_REQUEST)\n\n @list_route(methods=['post'])\n def retry(self, request):\n order = get_object_or_404(Order,\n number=request.data.get('number'),\n status=Order.STATUS.CANCELED)\n if order.user != request.user:\n raise exceptions.PermissionDenied()\n\n data = {\n 'quantity': request.data.get('quantity', order.quantity),\n 'comment': request.data.get('comment', order.comment),\n 'method': request.data.get('method',\n order.backend_name.replace(\n 'PaymentBackend', ''))\n }\n\n order_serializer = OrderCreateSerializer(\n data=data, user=request.user, for_transfer=order.for_transfer)\n\n if order_serializer.is_valid():\n amount = Exchange.cost(\n order_serializer.validated_data['quantity'], 'RUB')\n new_order = Order.objects.create(\n user=request.user,\n quantity=order_serializer.validated_data['quantity'],\n amount=round(amount, 2),\n created=timezone.now(),\n note=order.note,\n comment=order_serializer.validated_data.get('comment'),\n for_transfer=order.for_transfer\n )\n\n if order.for_transfer:\n transfers = Transfer.objects.filter(\n order=order,\n reason=Transfer.REASON.DEFERRED\n )\n for transfer in transfers:\n transfer.pk = None\n transfer.status = Transfer.STATUS.NEW\n transfer.order = new_order\n transfer.created = new_order.created\n transfer.amount = new_order.quantity\n transfer.cost = new_order.amount\n transfer.system_comment = None\n transfer.receiver_hash = binascii.hexlify(\n os.urandom(32)\n ).decode('utf-8')\n transfer.save()\n\n result = order_serializer.backend_instance.process(new_order)\n return Response(result, status=status.HTTP_201_CREATED)\n\n return Response({\n 'payment': order_serializer.errors\n }, status=status.HTTP_400_BAD_REQUEST)\n\n @list_route(methods=['get'])\n def info(self, request):\n try:\n order = Order.objects.get(\n number=request.query_params['id']\n )\n if order.status == Order.STATUS.NEW:\n raise\n except:\n raise Http404\n\n if order.user != request.user:\n raise exceptions.PermissionDenied()\n\n context = {\n 'request': request\n }\n order_serializer = OrderSerializer(order, context=context)\n return Response(order_serializer.data, status=status.HTTP_200_OK)\n","sub_path":"apps/payments/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"593430249","text":"#coding=utf-8\nimport json\nimport traceback\n\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\n\n# html = open('itjuzi_gai.html','r').read()\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\n\n\ndef parseComListHtml(html):\n soup = BeautifulSoup(html, 'html.parser')\n\n comlistbox = soup.find('div', class_='company-list-box')\n\n leftbox = comlistbox.find('div', class_='company-list-left')\n\n comlist = {}\n\n leftlis = leftbox.find_all('li',)\n for li in leftlis:\n if li.get('data-id'):\n comleftdata = {\n 'com_id': li.get('data-id', ''),\n 'com_logo_archive': li.find('img').get('src', ''),\n 'com_name': li.find_all('a')[1].text,\n 'com_des': li.find('p', class_='des').text,\n }\n comlist[li.get('data-id')] = comleftdata\n\n infobox = comlistbox.find('div', class_='company-list-info')\n infolis = infobox.find_all('li', )\n for li in infolis:\n if li.get('data-id'):\n infodivs = li.find_all('div')\n latestround = infodivs[2].find_all('span')\n cominfodata = {\n 'com_cat_name': infodivs[0].text,\n 'com_sub_cat_name': infodivs[1].text,\n 'invse_total_money': infodivs[3].text,\n 'guzhi': infodivs[4].text,\n 'com_addr': infodivs[5].text,\n 'com_born_date': infodivs[6].text,\n 'com_status': infodivs[7].text,\n 'com_scale': infodivs[8].text,\n 'invse_date': latestround[0].text,\n 'invse_round_id': latestround[1].text,\n 'invse_detail_money': latestround[2].text,\n }\n comlist[li.get('data-id')].update(cominfodata)\n\n page = soup.find('div', id_='page-selection').find('li', class_='active').text\n\n return comlist, page\n\n\n\ndef parseComDetailHtml(html):\n soup = BeautifulSoup(html, 'html.parser')\n if soup.title:\n response = {}\n com_name = soup.title.text\n if com_name in (u'www.itjuzi.com', u'找不到您访问的页面', u'502 Bad Gateway', u'403', u'IT桔子 | 泛互联网创业投资项目信息数据库及商业信息服务商'):\n return None, com_name, None\n com_name = com_name.replace(u' - IT桔子', u'').split(',')[0]\n com_web = None\n a_s = soup.find('svg', class_='svg-icon link mr-2', )\n if a_s:\n com_web = a_s.parent.get('href')\n response['com_web'] = com_web\n full_name = soup.find('p', class_='seo-second-title margin-right50', )\n if full_name:\n full_name = full_name.text\n # 联系方式\n contact_ul = soup.find('ul', class_='list-block aboutus')\n if contact_ul:\n for info in contact_ul.find_all('li'):\n if info.find('svg', class_='svg-icon phone align-middle'):\n response['mobile'] = info.text.replace('\\n', '').replace('\\t', '')\n if info.find('svg', class_='svg-icon email align-middle'):\n response['email'] = info.text.replace('\\n', '').replace('\\t', '')\n if info.find('svg', class_='svg-icon home'):\n response['detailaddress'] = info.text.replace('\\n', '').replace('\\t', '')\n\n com_sub_catele = soup.find('a', class_='tag d-inline-block mr-2 mb-2 sub_scope-tag tag-item')\n com_sub_cat = com_sub_catele.text if com_sub_catele else None\n newslist = []\n newsEle = soup.find(id='news')\n if newsEle:\n newsEle = newsEle.find_all('div', class_='list-group-item mobile-news-item d-flex align-items-center feedback-btn-parent justify-content-around border-0 juzi-list-item pt-4 pb-4')\n for new in newsEle:\n newdata = {}\n newdata['newsdate'] = new.find('span', class_='news-date d-inline-block').text\n newdata['linkurl'] = new.find('a').get('href')\n newdata['title'] = new.find('a').text\n newslist.append(newdata)\n\n response['news'] = newslist\n response['com_sub_cat_name'] = com_sub_cat\n\n '''\n # com_name = soup.title.text\n # if com_name in (u'www.itjuzi.com', u'找不到您访问的页面', u'502 Bad Gateway', u'403'):\n # return None, com_name\n # com_web = None\n # a_s = soup.find('i', class_='fa fa-link t-small', )\n # if a_s:\n # com_web = a_s.parent['href']\n # name = soup.find('h1', class_='seo-important-title', )\n # full_name = None\n # if name:\n # com_name = name.text.replace(u'\\t', u'')\n # com_name = com_name.split('\\n')[1]\n # full_name = name['data-fullname']\n # # 联系方式\n # ll = ['mobile', 'email', 'detailaddress']\n # response = {}\n # contact_ul = soup.find('ul', class_='list-block aboutus')\n # if contact_ul:\n # for info in contact_ul.find_all('li'):\n # if info.find('i', class_='fa icon icon-phone-o'):\n # response['mobile'] = info.text.replace('\\n', '').replace('\\t', '')\n # if info.find('i', class_='fa icon icon-email-o'):\n # response['email'] = info.text.replace('\\n', '').replace('\\t', '')\n # if info.find('i', class_='fa icon icon-address-o'):\n # response['detailaddress'] = info.text.replace('\\n', '').replace('\\t', '')\n\n\n # # 融资信息\n # investents = soup.find(id='financing')\n # eventtable = investents.find('table')\n # eventtrlist = eventtable.find_all('tr')\n # eventlist = []\n # for eventtr in eventtrlist:\n # if eventtr.find(class_='date'):\n # date = eventtr.find(class_='date').text\n # round = eventtr.find(class_='round').text\n # money = eventtr.find(class_='finades').text\n #\n # link = eventtr.find(class_='finades').a['href']\n # type = link.split('/')[-2]\n # event_id = link.split('/')[-1]\n # data = {\n # 'date': date,\n # 'round': round,\n # 'money': money,\n # }\n # if type == 'merger':\n # data['investormerge'] = 2\n # data['merger_id'] = event_id\n # data['merger_with'] = eventtr.find('a', class_='line1 c-gray').text if eventtr.find('a',\n # class_='line1 c-gray') else ''\n # else:\n # data['investormerge'] = 1\n # data['invse_id'] = event_id\n # line1s = eventtr.find_all('a', class_='line1')\n # invsest_with = []\n # for line1 in line1s:\n # url = line1.get('href', None)\n # invst_name = line1.text\n # invsest_with.append({'url': url, 'invst_name': invst_name})\n # data['invsest_with'] = invsest_with\n # eventlist.append(data)\n # response['events'] = eventlist\n #\n # industryType = soup.find('a', class_='one-level-tag').text if soup.find('a', class_='one-level-tag') else ''\n # response['industryType'] = industryType\n #\n # # 团队信息\n # members = []\n # membersul = soup.find('ul', class_='list-unstyled team-list limited-itemnum')\n # if membersul:\n # lilist = membersul.find_all('li')\n # for li in lilist:\n # dic = {}\n # dic['姓名'] = li.find('a', class_='person-name').text.replace('\\n', '').replace('\\t', '') if li.find('a',\n # class_='person-name') else None\n # dic['职位'] = li.find('div', class_='per-position').text.replace('\\n', '').replace('\\t', '') if li.find(\n # 'div', class_='per-position') else None\n # dic['简介'] = li.find('div', class_='per-des').text.replace('\\n', '').replace('\\t', '') if li.find('div',\n # class_='per-des') else None\n # members.append(dic)\n # response['indus_member'] = members\n #\n # # 新闻\n # res = soup.find_all('ul', class_='list-unstyled news-list')\n # news = []\n # for ss in res:\n # # print ss.name\n # lilist = ss.find_all('li')\n # for li in lilist:\n # dic = {}\n # dic['newsdate'] = li.find('span', class_='news-date').text.replace('\\n', '').replace('\\t',\n # '') if li.find(\n # 'span', class_='news-date') else None\n # a = li.find('a', class_='line1')\n # dic['title'] = a.text.replace('\\n', '').replace('\\t', '')\n # dic['linkurl'] = a['href']\n # dic['newstag'] = li.find('span', class_='news-tag').text.replace('\\n', '').replace('\\t', '') if li.find(\n # 'span', class_='news-tag') else None\n # news.append(dic)\n # response['news'] = news\n # response['com_web'] = com_web\n #\n # # 工商信息\n # # recruit-info\n # recruit_info = soup.find('div', id='recruit-info')\n # if recruit_info:\n # tablistul = recruit_info.find('ul', class_='nav-tabs list-inline stock_titlebar')\n # tablistli = tablistul.find_all('li')\n # for tabli in tablistli:\n # tabhref = tabli.a['href'].replace('#', '')\n # if tabhref in ['indus_base', u'indus_base']: # 基本信息\n # indus_base = recruit_info.find('div', id=tabhref)\n # com_full_name = indus_base.find('th').text\n # infolisttd = indus_base.find_all('td')\n # infodic = {}\n # for info in infolisttd:\n # if info:\n # if info.find('span', class_='tab_title') and info.find('span', class_='tab_main'):\n # if info.find('span', class_='tab_title').text:\n # infodic[info.find('span', class_='tab_title').text] = info.find('span',\n # class_='tab_main').text.replace(\n # '\\n', '').replace('\\t', '')\n # infodic[u'公司名称:'] = com_full_name.replace('\\n', '').replace('\\t', '')\n # response[tabhref] = infodic\n #\n # if tabhref in ['indus_shareholder', u'indus_shareholder', 'indus_foreign_invest',\n # u'indus_foreign_invest', 'indus_busi_info', u'indus_busi_info']: # 股东信息、企业对外投资信息、工商变更信息\n # indus_shareholder = recruit_info.find('div', id=tabhref)\n # thead = indus_shareholder.find('thead')\n # if thead:\n # theadthlist = thead.find_all('th')\n # theadlist = []\n # for theaditem in theadthlist:\n # theadlist.append(theaditem.text)\n # tbody = indus_shareholder.find('tbody')\n # infolist = []\n # if tbody:\n # trlist = tbody.find_all('tr')\n # for tr in trlist:\n # infodic = {}\n # tdlist = tr.find_all('td')\n # for i in range(0, len(theadlist)):\n # try:\n # infodic[theadlist[i]] = tdlist[i].text.replace('\\n', '').replace('\\t', '') if \\\n # tdlist[i].text else None\n # except IndexError:\n # print('数组越界', len(theadlist), len(tdlist))\n # if infodic != {}:\n # infolist.append(infodic)\n # response[tabhref] = infolist\n '''\n return response, com_name, full_name\n else:\n return None, None, None\n\n# html = open('itjuzi_gai.html', 'r').read()\n# ss = parseComDetailHtml(html)\n\n# driver = webdriver.Chrome('/usr/local/bin/chromedriver')\n\ndef parseComFinanceByDriver(driver):\n\n financeData = []\n try:\n driver.find_element_by_xpath('//*[@id=\"financing\"]/table/tbody')\n except NoSuchElementException:\n return []\n path_id = 0\n while True:\n path_id += 1\n eventdata = {}\n try:\n tr_xpath = '//*[@id=\"financing\"]/table/tbody/tr[%s]' % path_id\n driver.find_element_by_xpath(tr_xpath)\n eventdata['date'] = driver.find_element_by_xpath(tr_xpath + '/td[1]').text\n eventdata['round'] = driver.find_element_by_xpath(tr_xpath + '/td[2]').text\n eventdata['money'] = driver.find_element_by_xpath(tr_xpath + '/td[3]').text\n\n eventurlele = driver.find_element_by_xpath(tr_xpath + '/td[5]/a')\n eventurl = eventurlele.get_attribute('href')\n if eventurl.split('/')[-2] == 'investevent':\n investormerge = 1\n eventdata['invse_id'] = eventurl.split('/')[-1]\n else:\n investormerge = 2\n eventdata['merger_id'] = eventurl.split('/')[-1]\n eventdata['investormerge'] = investormerge\n\n if investormerge == 1:\n invsest_with = []\n try:\n investor = driver.find_element_by_xpath(tr_xpath + '/td[4]/a')\n invsest_with.append({'url': investor.get_attribute(\"href\"), 'invst_name': investor.text})\n except (NoSuchElementException, Exception):\n i = 0\n while i < 10:\n i += 1\n try:\n investor = driver.find_element_by_xpath(tr_xpath + '/td[4]' + '/a[%s]' % i)\n invsest_with.append({'url': investor.get_attribute(\"href\"), 'invst_name': investor.text})\n except (NoSuchElementException, Exception):\n break\n eventdata['invsest_with'] = invsest_with\n else:\n investor = driver.find_element_by_xpath(tr_xpath + '/td[4]/a')\n eventdata['merger_with'] = investor.text\n\n financeData.append(eventdata)\n except (NoSuchElementException, Exception):\n break\n\n return financeData\n\n\n\n\n\n\n\ndef parseComMemberByDriver(driver):\n try:\n driver.find_element_by_xpath('//*[@id=\"app\"]/div[1]/div/div[3]/div[1]/div/div[4]/div[1]/div[3]/div[1]/ul')\n except NoSuchElementException:\n return []\n memberlist = []\n path_id = 0\n while True:\n path_id += 1\n memberdata = {}\n try:\n li_xpath = '//*[@id=\"app\"]/div[1]/div/div[3]/div[1]/div/div[4]/div[1]/div[3]/div[1]/ul/li[%s]' % path_id\n driver.find_element_by_xpath(li_xpath)\n memberdata['姓名'] = driver.find_element_by_xpath('//*[@id=\"app\"]/div[1]/div/div[3]/div[1]/div/div[4]/div[1]/div[3]/div[1]/ul/li[1]/a[2]').text\n memberdata['职位'] = driver.find_element_by_xpath('//*[@id=\"app\"]/div[1]/div/div[3]/div[1]/div/div[4]/div[1]/div[3]/div[1]/ul/li[1]/div[1]').text\n memberdata['简介'] = driver.find_element_by_xpath('//*[@id=\"app\"]/div[1]/div/div[3]/div[1]/div/div[4]/div[1]/div[3]/div[1]/ul/li[1]/div[2]').text\n memberlist.append(memberdata)\n except (NoSuchElementException, Exception):\n break\n return memberlist\n\ndef getComBasic(driver, com_id):\n driver.get('https://www.itjuzi.com/api/companies/%s?type=basic' % com_id)\n basicpage = driver.page_source.replace(\n '
',\n        '').replace('
', '')\n basic = json.loads(basicpage)\n basicDic = {}\n if basic.get('data'):\n basicDic.update(basic['data']['basic'])\n basicDic['com_addr'] = basic['data']['basic']['com_prov']\n basicDic['com_sub_cat_name'] = basic['data']['basic']['com_sub_scope'][0]['name']\n basicDic['com_full_name'] = basic['data']['basic']['com_registered_name']\n basicDic['com_web'] = basic['data']['basic']['com_url']\n basicDic['invse_total_money'] = str(basic['data']['basic']['total_money']) + '万'\n return basicDic\n\ndef getComIndustryInfo(driver, com_id):\n driver.get('https://www.itjuzi.com/api/companies/%s?type=icp' % com_id)\n industryinfo = driver.page_source.replace(\n '
',\n        '').replace('
', '')\n info = json.loads(industryinfo)\n\n indus_base = {}\n indus_baseData = info['data']['elecredit'].get('elecredit_basic')\n if indus_baseData:\n indus_base.update({ u'地址:': indus_baseData['dom'],\n u'公司类型:': indus_baseData['enttype'],\n u'公司名称:': indus_baseData['entname'],\n u'注册资本:': indus_baseData['regcap'] + '万人民币',\n u'法人代表:': indus_baseData['frname'],\n u'成立时间:': indus_baseData['esdate'], })\n indus_shareholderDara = info['data']['elecredit'].get('elecredit_shareholder')\n indus_shareholder = []\n if indus_shareholderDara:\n for userdata in indus_shareholderDara:\n indus_shareholder.append({u'出资比例': userdata['fundedratio'],\n u'出资日期': userdata['condate'],\n u'股东': userdata['shaname'],\n u'出资方式': userdata['conform'],\n u'认缴出资额': userdata['subconam'] + '万' + userdata['regcapcur'],})\n indus_busi_info = []\n indus_busi_infoData = info['data']['elecredit'].get('elecredit_alter')\n if indus_busi_infoData:\n for busiData in indus_busi_infoData:\n indus_busi_info.append({\n u'变更日期': busiData['altdate'] + busiData['altitem'],\n u'变更前': busiData['altbe'],\n u'变更后': busiData['altaf'],\n })\n indus_foreign_invest = []\n indus_foreign_investData = info['data']['elecredit'].get('elecredit_entinv')\n if indus_foreign_investData:\n for foreignData in indus_foreign_investData:\n indus_foreign_invest.append({\n u'出资比例': foreignData['fundedratio'],\n u'出资日期': foreignData['esdate'],\n u'出资方式': foreignData['conform'],\n u'认缴出资额': foreignData['subconam'] + '万' + foreignData['regcapcur'],\n u'公司名称': foreignData['entname'],\n })\n\n\n return {'com_id': com_id, 'indus_base': indus_base, 'indus_shareholder': indus_shareholder, 'indus_busi_info': indus_busi_info, 'indus_foreign_invest': indus_foreign_invest}\n\n\ndef parseComIndustryInfoByDriver(driver, com_id, proxy):\n cookies = driver.get_cookies()\n coostrlist = []\n for coo in cookies:\n coostrlist.append('%s=%s' % (coo['name'], coo['value']))\n cookie = ';'.join(coostrlist)\n acc_token = driver.execute_script(\"return localStorage.getItem('accessToken')\")\n headers = {\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Authorization': acc_token,\n 'Connection': '',\n 'Cookie': cookie,\n 'Host': 'www.itjuzi.com',\n 'Referer': 'https://www.itjuzi.com/company/%s' % com_id,\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36',\n }\n if proxy and len(proxy) > 0:\n proxies = {\n \"https\": \"http://%s\" % proxy,\n }\n else:\n proxies = None\n def getRequestRes():\n try:\n res = requests.get('https://www.itjuzi.com/api/companies/%s?type=icp' % com_id, headers=headers,\n proxies=proxies, timeout=20).content\n infores = json.loads(res)['data']['elecredit']\n return infores\n except Exception:\n print('获取icp失败--com_id:%s'%com_id)\n time.sleep(3)\n return getRequestRes()\n\n info = getRequestRes()\n\n indus_base = {}\n indus_baseData = info.get('elecredit_basic')\n if indus_baseData:\n indus_base.update({ u'地址:': indus_baseData['dom'],\n u'公司类型:': indus_baseData['enttype'],\n u'公司名称:': indus_baseData['entname'],\n u'注册资本:': str(indus_baseData['regcap']) + '万人民币' if indus_baseData['regcap'] else '',\n u'法人代表:': indus_baseData['frname'],\n u'成立时间:': indus_baseData['esdate'], })\n indus_shareholderDara = info.get('elecredit_shareholder')\n indus_shareholder = []\n if indus_shareholderDara:\n for userdata in indus_shareholderDara:\n indus_shareholder.append({u'出资比例': (str(userdata['fundedratio']) + '%') if userdata['fundedratio'] and userdata['fundedratio'] != 0 else None,\n u'出资日期': userdata['condate'],\n u'股东': userdata['shaname'],\n u'出资方式': userdata['conform'],\n u'认缴出资额': (str(userdata['subconam']) + '万' if userdata['subconam'] else '' ) + (userdata['regcapcur'] if userdata['regcapcur'] else ''),})\n indus_busi_info = []\n indus_busi_infoData = info.get('elecredit_alter')\n if indus_busi_infoData:\n for busiData in indus_busi_infoData:\n indus_busi_info.append({\n u'变更日期': busiData['altdate'] + busiData['altitem'],\n u'变更前': busiData['altbe'],\n u'变更后': busiData['altaf'],\n })\n indus_foreign_invest = []\n indus_foreign_investData = info.get('elecredit_entinv')\n if indus_foreign_investData:\n for foreignData in indus_foreign_investData:\n indus_foreign_invest.append({\n u'出资比例': (str(foreignData['fundedratio']) + '%') if foreignData['fundedratio'] and foreignData['fundedratio'] != 0 else None,\n u'出资日期': foreignData['esdate'],\n u'出资方式': foreignData['conform'],\n u'认缴出资额': (str(foreignData['subconam']) + '万') if foreignData['subconam'] else '' + foreignData['regcapcur'] if foreignData['regcapcur'] else '',\n u'公司名称': foreignData['entname'],\n })\n\n\n return {'com_id': com_id, 'indus_base': indus_base, 'indus_shareholder': indus_shareholder, 'indus_busi_info': indus_busi_info, 'indus_foreign_invest': indus_foreign_invest}\n\n","sub_path":"python/emptygit/webdriver/parseItjuziHtml.py","file_name":"parseItjuziHtml.py","file_ext":"py","file_size_in_byte":23916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"342340043","text":"\"\"\"\n418\nmedium\nsentence screen fitting\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def wordsTyping(self, sentence: List[str], rows: int, cols: int) -> int:\n\n # DP\n\n s = \" \".join(sentence) + \" \"\n p = 0\n for i in range(rows):\n p += cols\n if s[(p-1) % len(s)] == \" \":\n continue\n elif s[p % len(s)] == \" \":\n p += 1\n else:\n while s[(p-1) % len(s)] != \" \":\n p -= 1\n return p // len(s)\n\n\n\nsentence = [\"hello\",\"world\"]\nrows = 2\ncols = 8\n\nsentence = [\"a\", \"bcd\", \"e\"]\nrows = 3\ncols = 6\n\n\nsol = Solution()\nprint(sol.wordsTyping(sentence, rows, cols))","sub_path":"Q418-v2.py","file_name":"Q418-v2.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"81337648","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread('origin.png')\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nimg_blur = cv2.GaussianBlur(img_gray,(11,11),5)#gaussian filter,size=11x11,sigma=5\n#adaptive threshold method with gaussian_c as its threshold\nimg_thre = cv2.adaptiveThreshold(img_blur, 255 , cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,13,2)\n#close operation to close the edge\nkernel = np.ones((3, 3), np.uint8)\nimg_close = cv2.morphologyEx(img_thre,cv2.MORPH_CLOSE,kernel,iterations=5)\nimg_countour = img_close.copy()\n#get contours\ncontours , hierarchy = cv2.findContours(img_countour,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n#initialize\n_threshold = {'len':5,'area':100,'extent':0.9}\ncount_ellipse = 0\narea_ellipse = []\ncount_rect = 0\nperimeter_rect = []\n\nfor cnt in contours:\n #fit ellipse\n if len(cnt) < _threshold['len']:#length filter\n continue\n\n area = cv2.contourArea(cnt)\n if area < _threshold['area']:#area filter\n continue\n\n rect = cv2.minAreaRect(cnt)#get bounding box\n w,h = rect[1]#the size of bbox\n area_rect = w*h#the area of bbox\n extent = float(area)/area_rect#extent,used for distinguishing between ellipse and rectangles\n\n if extent > _threshold['extent']:#it is a rectangle\n perimeter = 2*(w+h)#perimeter\n perimeter_rect.append(perimeter)\n count_rect += 1\n\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n img = cv2.drawContours(img,[box],0,(0,0,255),2)\n else:#it is a ellipse\n count_ellipse += 1\n area_ellipse.append(area)\n ellipse = cv2.fitEllipse(cnt)\n cv2.ellipse(img, ellipse, (0,255,0), 2)\n\nprint('the number of coins:',count_ellipse)\nprint('area of all coins',area_ellipse)\nprint('the number of boxes:',count_rect)\nprint('perimeter of all boxes:',perimeter_rect)\n\ncv2.imshow('gassianBlur',img_blur)\ncv2.imshow('adaptiveThreshold',img_thre)\ncv2.imshow('close',img_close)\ncv2.imshow('final result', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"counting_coins_and_boxes/count_object.py","file_name":"count_object.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"306689990","text":"from sys import maxsize\n\nclass Contact:\n def __init__(self, firstname=None, lastname=None, middlename=None, id=None, homephone=None,\n mobilephone=None, workphone=None, fax=None, all_phones_from_home_page=None,\n email1=None, email2=None, email3=None, all_emails_from_hp=None, address_from_hp=None):\n self.firstname = firstname\n self.lastname = lastname\n self.middlename = middlename\n self.homephone = homephone\n self.mobilephone = mobilephone\n self.workphone = workphone\n self.fax = fax\n self.id = id\n self.all_phones_from_home_page = all_phones_from_home_page\n self.email1 = email1\n self.email2 = email2\n self.email3 = email3\n self.all_emails_from_hp = all_emails_from_hp\n self.address_from_hp = address_from_hp\n\n def __repr__(self):\n return f'{self.id}, {self.firstname}, {self.lastname}, {self.mobilephone}, {self.homephone}, {self.workphone}'\n\n def __eq__(self, other):\n return (self.id is None or other.id is None or self.id == other.id)\\\n and self.firstname == other.firstname\\\n and self.lastname == other.lastname\n\n def id_or_max(self):\n if self.id:\n return int(self.id)\n else:\n return maxsize\n","sub_path":"model/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"595558912","text":"#!/bin/python3\n\nimport numpy as np\n\nclass HopfieldNetwork:\n\n def store(self, examples):\n X = np.asmatrix(examples)\n auxW = [[ (1 / X.shape[1]) * sum([X.item(k, i)*X.item(k, j) for k in range(X.shape[0])]) if i != j else 0 for j in range(X.shape[1]) ] for i in range(X.shape[1])]\n aaux = np.asmatrix(auxW).reshape(X.shape[1], X.shape[1])\n self.w = (1 / X.shape[1]) * np.matmul(X.T, X) - np.eye(X.shape[1])\n self.w = aaux\n\n def recognize(self, x, iterations=1000):\n # S = np.asmatrix(np.tile(x, self.w.shape[0])).reshape(self.w.shape[1], self.w.shape[1])\n # for it in iterations:\n # S = np.sign(np.dot(self.w, S))\n S = np.asmatrix(x).T\n oldS = np.asmatrix(np.zeros(S.shape[1])).T\n history = []\n it = 0\n # for i in range(iterations):\n while (oldS - S).any() != 0 and it < iterations:\n # while it < iterations:\n oldS = S\n history.append(oldS)\n S = np.sign(np.matmul(self.w, S))\n it+=1\n return S, history","sub_path":"TP4/hopfield.py","file_name":"hopfield.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"184685091","text":"# %%\n# Importing libraries\n\nimport os\nimport glob\nimport pickle\nimport re\n\n# Our numerical workhorses\nimport numpy as np\nimport pandas as pd\n\n# Import the project utils\nimport sys\nsys.path.insert(0, '../../analysis/')\nimport mwc_induction_utils as mwc\n\n# Import matplotlib stuff for plotting\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n# Seaborn, useful for graphics\nimport seaborn as sns\n\nmwc.set_plotting_style()\n\n\n# %% Define function to compute the fold-change accounting for multiple\n# promoters\ndef fold_change_double_R_adjust(R, iptg, epsilon_r, f,\n ea=-np.log(139), ei=-np.log(0.53), epsilon=4.5,\n Ns=2, Nns=4.6E6):\n '''\n Computes the fold-change for a promoter that spends f fraction of the time\n with two copies of the promoter and (1-f) with one promoter.\n Parameters\n ----------\n R : array-like.\n Number of repressors per cell.\n iptg : array-like.\n Inducer concentration.\n epsilon_r : float.\n Repressor-DNA binding energy.\n f : float.\n Fraction of the cell cycle that the cell spends with multiple copies of\n the promoter.\n ea, ei : float.\n -log(Ka) and -log(Ki) respectively.\n epsilon : float.\n Energy difference between active and inactive state of repressor.\n Ns : int.\n Number of promoters that the cell has f fraction of the time.\n Nns : int.\n Number of non-specific binding sites\n\n Returns\n -------\n fold_change : array-like.\n fold-change in gene expression.\n '''\n # Compute the number of active repressors\n Ract = mwc.pact_log(iptg=iptg, ea=ea, ei=ei, epsilon=epsilon) * R\n\n Reff = Ract / (1 + f)\n\n # Define the coefficients of the polynomial\n a = -2 * Nns * np.exp(-epsilon_r)\n b = np.exp(-epsilon_r) * (2 * Reff - Ns) - 2 * Nns\n c = 2 * Reff\n\n # Initialize array to save\n lam = np.empty_like(Ract)\n # Find the value of lambda\n for i, r in enumerate(Ract):\n lam[i] = np.max(np.roots([a, b[i], c[i]]))\n\n # Compute and return fold-change\n single_fc = 1 / (1 + Reff / Nns * np.exp(-epsilon_r))\n double_fc = 1 / (1 + lam * np.exp(-epsilon_r))\n fold_change = 1 / (1 + f) * ((1 - f) * single_fc + 2 * f * double_fc)\n\n return fold_change\n\n# %%\n# Read the data\n# Define working directory\ndatadir = '../../../data/'\n# List files to be read\nfiles = ['flow_master.csv', 'merged_Oid_data_foldchange.csv']\n# Read flow cytometry data\ndf_Oid = pd.read_csv(datadir + files[1], comment='#')\n# make an extra column to have consistent labeling\ndf_Oid['fold_change_A'] = df_Oid.fold_change\n# Remove manually the outlier with an unphysical fold-change\ndf_Oid = df_Oid[df_Oid.fold_change_A <= 1]\n# Read the flow cytometry data\ndf = pd.read_csv(datadir + files[0], comment='#')\n# Attach both data frames into a single one\ndf = pd.concat([df, df_Oid])\n# Drop rows containing NA values\ndf.dropna(axis=1, inplace=True)\n\n# Now we remove the autofluorescence and delta values\ndf = df[(df.rbs != 'auto') & (df.rbs != 'delta')]\n\n# %%\n# Load MCMC flatchain\n\n# Load the flat-chain\nwith open('../../../data/mcmc/main_text_KaKi.pkl', 'rb') as file:\n unpickler = pickle.Unpickler(file)\n gauss_flatchain = unpickler.load()\n gauss_flatlnprobability = unpickler.load()\n\n# map value of the parameters\nmax_idx = np.argmax(gauss_flatlnprobability, axis=0)\nea, ei, sigma = gauss_flatchain[max_idx]\n\nka_fc = np.exp(-gauss_flatchain[:, 0])\nki_fc = np.exp(-gauss_flatchain[:, 1])\n\n# %%\n# Plot the theory vs data for all 4 operators with the credible region\n\n# Define the IPTG concentrations to evaluate\nIPTG = np.logspace(-7, -2, 100)\nIPTG_lin = np.array([0, 1E-7])\n\n# Define parameters for the multi-promoter model\nf = 1 / 3\nNs = 2\n# Set the colors for the strains\ncolors = sns.color_palette('colorblind', n_colors=7)\ncolors[4] = sns.xkcd_palette(['dusty purple'])[0]\n\n# Define the operators and their respective energies\noperators = ['O1', 'O2', 'O3', 'Oid']\nenergies = {'O1': -15.3, 'O2': -13.9, 'O3': -9.7, 'Oid': -17.0}\n\n# Initialize the plot to set the size\nfig, ax = plt.subplots(2, 2, figsize=(11, 8))\nax = ax.ravel()\n\n# Loop through operators\nfor i, op in enumerate(operators):\n print(op)\n data = df[df.operator == op]\n # loop through RBS mutants\n for j, rbs in enumerate(df.rbs.unique()):\n # Check if the RBS was measured for this operator\n if rbs in data.rbs.unique():\n # plot the theory using the parameters from the fit.\n # SINGLE PROMOTER\n # Log scale\n ax[i].plot(IPTG, mwc.fold_change_log(IPTG * 1E6,\n ea=ea,\n ei=ei,\n epsilon=4.5,\n R=df[(df.rbs == rbs)].repressors.unique(),\n epsilon_r=energies[op]),\n color=colors[j])\n # Linear scale\n ax[i].plot(IPTG_lin, mwc.fold_change_log(IPTG_lin * 1E6,\n ea=ea, ei=ei, epsilon=4.5,\n R=df[(df.rbs == rbs)].repressors.unique(),\n epsilon_r=energies[op]),\n color=colors[j], linestyle='--')\n # MULTIPLE PROMOTERS\n # Log scale\n ax[i].plot(IPTG, fold_change_double_R_adjust(iptg=IPTG * 1E6, f=f,\n Ns=Ns, ea=ea, ei=ei,\n epsilon=4.5,\n R=df[(df.rbs == rbs)].repressors.unique() * 2,\n epsilon_r=energies[op]),\n color=colors[j], linestyle=':')\n # Linear scale\n ax[i].plot(IPTG_lin, fold_change_double_R_adjust(iptg=IPTG_lin *\n 1E6, f=f, Ns=Ns,\n ea=ea, ei=ei,\n epsilon=4.5,\n R=df[(df.rbs == rbs)].repressors.unique() * 2,\n epsilon_r=energies[op]),\n color=colors[j], linestyle='--')\n # MULTIPLE PROMOTERES\n\n # Plot mean and standard error of the mean for the flow data\n if op != 'Oid':\n # compute the mean value for each concentration\n fc_mean = data[data.rbs==rbs].groupby('IPTG_uM').fold_change_A.mean()\n # compute the standard error of the mean\n fc_err = data[data.rbs==rbs].groupby('IPTG_uM').fold_change_A.std() / \\\n np.sqrt(data[data.rbs==rbs].groupby('IPTG_uM').size())\n\n # plot the experimental data\n ax[i].errorbar(np.sort(data[data.rbs==rbs].IPTG_uM.unique()) / 1E6, fc_mean,\n yerr=fc_err, fmt='o',\n label=df[df.rbs==rbs].repressors.unique()[0] * 2,\n color=colors[j])\n # Plot the raw data for Oid\n else:\n ax[i].plot(data[data.rbs==rbs].IPTG_uM / 1E6,\n data[data.rbs==rbs].fold_change_A, marker='o', lw=0,\n color=colors[j])\n\n # Add operator and binding energy labels.\n ax[i].text(0.8, 0.09, r'{0}'.format(op), transform=ax[i].transAxes,\n fontsize=13)\n ax[i].text(0.67, 0.02,\n r'$\\Delta\\varepsilon_{RA} = %s\\,k_BT$' %energies[op],\n transform=ax[i].transAxes, fontsize=13)\n ax[i].set_xscale('symlog', linthreshx=1E-7, linscalex=0.5)\n ax[i].set_xlabel('IPTG (M)', fontsize=15)\n ax[i].set_ylabel('fold-change', fontsize=16)\n ax[i].set_ylim([-0.01, 1.1])\n ax[i].set_xlim(left=-5E-9)\n ax[i].tick_params(labelsize=14)\n\nax[0].legend(loc='upper left', title='repressors / cell')\n# add plot letter labels\nplt.figtext(0.0, .95, '(A)', fontsize=20)\nplt.figtext(0.50, .95, '(B)', fontsize=20)\nplt.figtext(0.0, .46, '(C)', fontsize=20)\nplt.figtext(0.50, .46, '(D)', fontsize=20)\nplt.tight_layout()\nplt.savefig('../../../figures/extras/figSxx_double_promoter_double_R.pdf',\n bbox_inches='tight')\n","sub_path":"code/figures/extras/figSxx_double_promoter_double_R.py","file_name":"figSxx_double_promoter_double_R.py","file_ext":"py","file_size_in_byte":8093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"106410267","text":"#!/usr/bin/env python\n\nimport logging\nimport coloredlogs\n\n\"\"\"\n Logger object.\n\"\"\"\n\n\ndef init(debug=False) -> logging.Logger:\n \"\"\"\n Init logger object\n :param debug: bool is debug\n :return: logger object;\n \"\"\"\n\n logger = logging.getLogger('aliceair')\n level = logging.DEBUG if debug else logging.INFO\n\n coloredlogs.install(fmt='%(asctime)s - %(levelname)s - %(message)s', logger=logger, level=level)\n return logger","sub_path":"air_monitor/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"315390332","text":"# -*- coding: utf-8 -*-\n\"\"\"Format adapter for the terminaltables module.\"\"\"\n\nimport terminaltables\nimport itertools\n\nfrom cli_helpers.utils import filter_dict_by_key\nfrom .preprocessors import (convert_to_string, override_missing_value,\n style_output)\n\nsupported_formats = ('ascii', 'double', 'github')\npreprocessors = (override_missing_value, convert_to_string, style_output)\n\n\ndef adapter(data, headers, table_format=None, **kwargs):\n \"\"\"Wrap terminaltables inside a function for TabularOutputFormatter.\"\"\"\n keys = ('title', )\n\n table_format_handler = {\n 'ascii': terminaltables.AsciiTable,\n 'double': terminaltables.DoubleTable,\n 'github': terminaltables.GithubFlavoredMarkdownTable,\n }\n\n table = table_format_handler[table_format]\n\n t = table([headers] + list(data), **filter_dict_by_key(kwargs, keys))\n\n dimensions = terminaltables.width_and_alignment.max_dimensions(\n t.table_data,\n t.padding_left,\n t.padding_right)[:3]\n for r in t.gen_table(*dimensions):\n yield ''.join(r)\n","sub_path":"cli_helpers/tabular_output/terminaltables_adapter.py","file_name":"terminaltables_adapter.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"399345871","text":"\"\"\"Token definitions for LPLR.\"\"\"\n\nimport re\nfrom typing import Pattern\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass TokenDef:\n \"\"\"Token definition with the proper regex.\"\"\"\n\n name: str\n pattern: Pattern[str]\n\n\n@dataclass()\nclass Token:\n \"\"\"A token.\"\"\"\n\n name: str\n literal: str\n\n def to_dict(self):\n \"\"\"Return a dict representation of the Token.\"\"\"\n return {\"token_name\": self.name, \"token_value\": self.value}\n\n\n\"\"\"Lexeme/token definitions\"\"\"\n_tok_defs = [\n TokenDef(\"GET_KEYWORD\", re.compile(\"get\")),\n TokenDef(\"FROM_KEYWORD\", re.compile(\"from\")),\n TokenDef(\"SEND_KEYWORD\", re.compile(\"send\")),\n TokenDef(\"OUTPUT_KEYWORD\", re.compile(\"output\")),\n TokenDef(\"ZIP_KEYWORD\", re.compile(\"zip\")),\n TokenDef(\"LBRACKET\", re.compile(\"\\[\")),\n TokenDef(\"RBRACKET\", re.compile(\"\\]\")),\n # TokenDef('LPARENTESES',re.compile('\\(')),\n # TokenDef('RPARENTESES',re.compile('\\)')),\n TokenDef(\"WHITESPACE\", re.compile(\"[ \\t]+\")),\n TokenDef(\"NEWLINE\", re.compile(\"[\\n]\")),\n TokenDef(\"SLASH\", re.compile(r\"\\/\")),\n TokenDef(\"BSLASH\", re.compile(r\"\\\\\")),\n TokenDef(\"VALUE\", re.compile(r\"\\w+\")),\n TokenDef(\"DOT\", re.compile(\"\\.\")),\n TokenDef(\"COMMA\", re.compile(\"\\,\")),\n TokenDef(\"AT\", re.compile(\"@\")),\n]\n","sub_path":"main/lplr_tokens/lplr_token.py.46db0bc0234b7c8a28221411d61ff04d.py","file_name":"lplr_token.py.46db0bc0234b7c8a28221411d61ff04d.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"332183795","text":"import tweepy\r\nfrom tkinter import *\r\nimport User_info as user\r\nimport analysis as anlys\r\nimport user_analysis as user_anlys\r\nimport csv\r\nimport os.path\r\nfrom os import path\r\nimport matplotlib\r\nimport matplotlib.figure\r\nimport matplotlib.patches\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\n\r\n#GLOBAL DECLARATION\r\n\r\nui = user.User_info()\r\nanalysis = anlys.SentimentAnalysis()\r\nuser_analysis = user_anlys.UserAnalysis()\r\n\r\nwin = Tk()\r\n\r\n\r\n\r\n# AUTHENTICATION\r\n\r\nconsumer_key = \"OGSG8sCDSMsbA20Nxhv90s1EU\"\r\nconsumer_secret = \"CpmqIGZELFR8yIF451hVjIyDuPiEXBLym8uSC2Kuq4J6bSXWvi\"\r\naccess_token = \"1144135588380962817-9ZW7uIfOMJfLQ2yevlcUSum4BvVv3H\"\r\naccess_token_secret = \"jkjJm9tNlo6Y5riEdHvkdX9tAOgjNUyzP0SC6mPoEqJNq\"\r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_token, access_token_secret)\r\napi = tweepy.API(auth)\r\n\r\n#FUNCTION\r\n\r\ndef search_keyword():\r\n global frame4,frame3,select;\r\n keyword_text = keyword.get()\r\n print(\"keyword : \",keyword_text)\r\n user_data = ui.Import_user_data(keyword_text)\r\n\r\n frame3 = Frame(win)\r\n frame3.pack()\r\n scroll = Scrollbar(frame3, orient=VERTICAL)\r\n select = Text(frame3, yscrollcommand=scroll.set, height=6)\r\n scroll.config(command=select.yview)\r\n scroll.pack(side=RIGHT, fill=Y)\r\n select.pack(side=LEFT, fill=BOTH, expand=1)\r\n select.insert(END,user_data[0])\r\n select.insert(END,user_data[10])\r\n\r\n if (path.exists('%s.csv' % (keyword_text))):\r\n #print(\"under if\")\r\n csvFile = open('%s.csv' % (keyword_text), 'w')\r\n csvWriter = csv.writer(csvFile, delimiter=',', quotechar='\"')\r\n csvWriter.writerow(user_data)\r\n else:\r\n csvFile = open('%s.csv' % (keyword_text), 'a')\r\n csvWriter = csv.writer(csvFile, delimiter=',', quotechar='\"')\r\n csvWriter.writerow([\"user_name\",\r\n \"user_username\",\r\n \"user_followers_count\",\r\n \"user_listed_count\",\r\n \"user_following\",\r\n \"user_favorites\",\r\n \"user_verified\",\r\n \"user_default_profile\",\r\n \"user_location\",\r\n \"user_time_zone\",\r\n \"user_statuses_count\",\r\n \"user_description\",\r\n \"user_geo_enabled\",\r\n \"user_contributors_enabled\"])\r\n csvWriter.writerow(user_data)\r\n print(\"Wrote tweets by %s to CSV.\" % keyword_text)\r\n #print(keyword_text)\r\n frame4 = Frame(win)\r\n frame4.pack()\r\n btnplot = Button(frame4, text=\" Plot Graph \", command=tweet_analysis)\r\n btnplot.pack(side=LEFT)\r\n\r\ndef tweet_analysis():\r\n global frame4,positive_user,negative_user;\r\n keyword_text = keyword.get()\r\n\r\n polarity,positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm,positive_user,negative_user = analysis.DownloadData(keyword_text)\r\n Text_box = Text(frame4, height=2, width=30)\r\n Text_box.pack()\r\n Text_box.insert(END,\"Polarity of the keyword : \\n\")\r\n Text_box.insert(END,str(polarity))\r\n # Create a Tkinter variable\r\n positive = StringVar(frame4)\r\n negative = StringVar(frame4)\r\n\r\n # Dictionary with options\r\n positive.set(positive_user[0]) # set the default option\r\n negative.set(negative_user[0])\r\n\r\n popupMenupos = OptionMenu(frame4, positive, *positive_user)\r\n\r\n Label(frame4, text=\"Positive Influencer\").pack()\r\n popupMenupos.pack()\r\n popupMenuneg = OptionMenu(frame4, negative, *negative_user)\r\n Label(frame4, text=\"Negative Influencer\").pack()\r\n popupMenuneg.pack()\r\n plotPieChart(positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm)\r\n\r\n\r\ndef plotPieChart(positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, noOfSearchTerms=200):\r\n labels = ['Positive [' + str(positive) + '%]', 'Weakly Positive [' + str(wpositive) + '%]','Strongly Positive [' + str(spositive) + '%]', 'Neutral [' + str(neutral) + '%]',\r\n 'Negative [' + str(negative) + '%]', 'Weakly Negative [' + str(wnegative) + '%]', 'Strongly Negative [' + str(snegative) + '%]']\r\n sizes = [positive, wpositive, spositive, neutral, negative, wnegative, snegative]\r\n colors = ['yellowgreen','lightgreen','darkgreen', 'gold', 'red','lightsalmon','darkred']\r\n fig = matplotlib.figure.Figure(figsize=(7, 7))\r\n ax = fig.add_subplot(111)\r\n patches, texts = ax.pie(sizes, colors=colors, startangle=90)\r\n ax.legend(patches, labels, loc=\"best\")\r\n #fig.title('How people are reacting on ' + searchTerm + ' by analyzing ' + str(noOfSearchTerms) + ' Tweets.')\r\n #ax.axis('equal')\r\n circle = matplotlib.patches.Circle((0, 0), 0.7, color='white')\r\n ax.add_artist(circle)\r\n canvas = FigureCanvasTkAgg(fig, master=frame4)\r\n canvas.get_tk_widget().pack()\r\n canvas.draw()\r\n\r\ndef search_user():\r\n global usertext,username_text,frame5,frame6\r\n username_text = username.get()\r\n print(\"Username : \", username_text)\r\n user_obj = api.get_user(user)\r\n user_data = [user_obj.name,\r\n user_obj.screen_name,\r\n user_obj.followers_count,\r\n user_obj.listed_count,\r\n user_obj.friends_count,\r\n user_obj.favourites_count,\r\n user_obj.verified,\r\n user_obj.default_profile,\r\n user_obj.location,\r\n user_obj.time_zone,\r\n user_obj.statuses_count,\r\n user_obj.description,\r\n user_obj.geo_enabled,\r\n user_obj.contributors_enabled]\r\n #user_data = ui.Import_user_data(username)\r\n frame5 = Frame(win)\r\n frame5.pack(side=RIGHT)\r\n scroll = Scrollbar(frame5, orient=VERTICAL)\r\n usertext = Text(frame5, yscrollcommand=scroll.set, height=6)\r\n scroll.config(command=usertext.yview)\r\n scroll.pack(side=RIGHT, fill=Y)\r\n usertext.pack(side=LEFT, fill=BOTH, expand=1)\r\n\r\n usertext.insert(END, user_data[0])\r\n usertext.insert(END, user_data[10])\r\n\r\n usertext.insert(END, \"USERNAME : \" + user_data[1] + \"\\n\")\r\n usertext.insert(END, \"FOLLOWERS COUNT : \" + user_data[2] + \"\\n\")\r\n\r\n usertext.insert(END, 'FOLLOWING COUNT : ' + user_data[4] + \"\\n\")\r\n usertext.insert(END, 'FAVOURITES : ' + user_data[4] + \"\\n\")\r\n usertext.insert(END, 'LOCATION : ' + user_data[7] + \"\\n\")\r\n usertext.insert(END, 'STATUSES COUNT : ' + user_data[8] + \"\\n\")\r\n usertext.insert(END, 'DESCRIPTION : ' + user_data[9] + \"\\n\")\r\n\r\n frame6 = Frame(win)\r\n frame6.pack(side=RIGHT)\r\n btnplotuser = Button(frame6, text=\" Plot Graph \", command=user_analysis)\r\n btnplotuser.grid(column=6, row=4)\r\n\r\n\r\ndef user_analysis():\r\n global frame6;\r\n\r\n polarity,positive, wpositive, spositive, negative, wnegative, snegative, neutral,count = user_analysis.get_all_tweets(username_text)\r\n Text_box = Text(frame6, height=2, width=30)\r\n Text_box.pack()\r\n Text_box.insert(END, \"Polarity of the keyword : \\n\")\r\n Text_box.insert(END, str(polarity))\r\n plotPieChart(positive, wpositive, spositive, negative, wnegative, snegative, neutral, username_text)\r\n\r\ndef refresh():\r\n keyword.delete(0, END)\r\n frame3.destroy()\r\n frame4.destroy()\r\n\r\ndef reset():\r\n usertext.delete(0, END)\r\n frame6.destroy()\r\n\r\n\r\n\r\n#MAIN UI\r\n\r\ndef make_window():\r\n global frame1,frame2,keyword,frame5,username;\r\n\r\n\r\n frame1 = Frame(win)\r\n frame1.pack(side=LEFT, fill=Y)\r\n Label(frame1, text=\"Any Keyword\").grid(row=0, column=0, sticky=W)\r\n keyword = Entry(frame1, bd=1)\r\n keyword.grid(row=0, column=1, sticky=W)\r\n\r\n canvas = Canvas()\r\n canvas.create_line(400, 0, 400, 800)\r\n\r\n frame5 = Frame(win)\r\n Label(frame1, text=\"User Search\").grid(row=0, column=5, sticky=W)\r\n username = Entry(frame5, bd=1)\r\n username.grid(row=1, column=5, sticky=W)\r\n frame5.pack(side=RIGHT, fill=Y)\r\n btnsearch2 = Button(frame5, text=\" Search User \", command=search_user)\r\n btnrefresh2 = Button(frame5, text=\"RESET\", command=reset)\r\n btnsearch2.grid(column=6, row=0)\r\n btnrefresh2.grid(column=7, row=0)\r\n\r\n frame2 = Frame(win)\r\n frame2.pack()\r\n btnsearch = Button(frame2, text=\" Search by Keyword \", command=search_keyword)\r\n btnrefresh = Button(frame2, text=\"RESET\", command=refresh)\r\n btnsearch.grid(column=2, row=0)\r\n btnrefresh.grid(column=3, row=0)\r\n\r\n\r\n\r\n return win\r\n\r\nif __name__== \"__main__\":\r\n\r\n win = make_window()\r\n win.geometry('800x800')\r\n win.mainloop()\r\n\r\n\r\n\r\n\r\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"328531374","text":"def factorial(x):\n original = x\n x += 1\n ans = 1\n for i in range(1, x):\n ans *= i\n return (\"The factorial of {} is {}\".format(original, ans))\n\nnumber = input(\"Enter the number you want to find the factorial: \")\nnumber = int(number)\nprint(factorial(number))\n","sub_path":"practice/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"295928521","text":"#!/usr/bin/python\n# coding:utf-8\n\nclass CreditCard:\n \"\"\"\n A consumer credit card.\n \"\"\"\n def __init__(self, consumer, bank, account, limit):\n \"\"\"\n Create a new CreditCard instance\n\n The initial balance is zero.\n\n consumer the name of the consumer (e.g. 'John Levis')\n bank the name of the bank (e.g. 'Bank of Scotland')\n account the account identifer (e.g. '5391 0375 6699 8888')\n limit credit limit (measured in GBP)\n \"\"\"\n\n self._consumer = consumer\n self._bank = bank\n self._account = account\n self._limit = limit\n self._balance = 0\n\n def get_consumer(self):\n \"\"\"\n Return the name of consumer\n :param self:\n :return:\n \"\"\"\n return self._consumer\n\n def get_bank(self):\n \"\"\"\n Return the name of the bank\n :param self:\n :return:\n \"\"\"\n return self._bank\n\n def get_account(self):\n \"\"\"\n Return the account identifer\n :param self:\n :return:\n \"\"\"\n return self._account\n\n def get_limit(self):\n \"\"\"\n Return the current credit limit\n :param self:\n :return:\n \"\"\"\n return self._limit\n\n def get_balance(self):\n \"\"\"\n Return the current balance\n :param self:\n :return:\n \"\"\"\n return self._balance\n\n def charge(self, price):\n \"\"\"\n Charge giver price to the card, assuming sufficient credit limit\n Return True if charge was processed; False if charge was denied.\n :param self:\n :param price:\n :return:\n \"\"\"\n if price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True\n\n def make_payment(self, amount):\n \"\"\"\n Process consumer payment that reduces balance\n :param self:\n :param amount:\n :return:\n \"\"\"\n self._balance -= amount\n\n\nif __name__ == '__main__':\n wallet = []\n wallet.append(CreditCard('John Bowman', 'California Savings', '5391 0375 9387 5309', 2500))\n wallet.append(CreditCard('John Bowman', 'California Federal', '3485 0399 3395 1954', 3500))\n wallet.append(CreditCard('John Bowman', 'California Finance', '5391 0375 9387 5309', 5000))\n\n for val in range(1, 10):\n wallet[0].charge(val)\n wallet[1].charge(2*val)\n wallet[2].charge(3*val)\n\n for c in range(3):\n print('Consumer is: ', wallet[c].get_consumer())\n print('Bank is: ', wallet[c].get_bank())\n print('Account is: ', wallet[c].get_account())\n print('Limit is: ', wallet[c].get_limit())\n print('Balance is: ', wallet[c].get_balance())\n\n while wallet[c].get_balance() > 100:\n wallet[c].make_payment(100)\n print('New Balance is: ', wallet[c].get_balance())\n\n print('\\n')","sub_path":"code/OOP/credit_card.py","file_name":"credit_card.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"312743742","text":"# Create Window and Close Window\n\nimport pygame, time, random\nfrom pygame.locals import *\n\nWIDTH = 600\nHEIGHT = 400\npygame.init()\nfps = pygame.time.Clock()\nwindow = pygame.display.set_mode(( WIDTH, HEIGHT ))\n\n# Declare colours, images, sounds, fonts\nBACKGROUND_COLOR = (184, 211, 239)\nOUR_COLOR = (214, 83, 83)\nOTHER_COLOR = (82, 102, 27)\n\nOUR_SPRITE = pygame.image.load(\"sprite/f15.png\").convert_alpha()\nOTHER_SPRITE = pygame.image.load(\"sprite/su27.png\").convert_alpha()\n\n\nGREEN = (0,255,0)\nYELLO = (255,255,0)\nBLACK = (0,0,0)\nWHITE = (0xFF,0xFF,0xFF)\nPINK\t= (0xFF, 0x65, 0xFD)\nARIAL20 = pygame.font.SysFont(\"Arial\", 20)\n# Variables for keeping track of my game player etc\n# Class\nclass ItemValue(object):\n\tdef __init__(self, x, y, w, h):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.w = w\n\t\tself.h = h\n\tdef moveX2( self, shift ):\n\t\tself.x += shift\n\tdef moveY2( self, shift ):\n\t\tself.y += shift\n\tdef moveX( self, shift, w):\n\t\ttmp = self.x + shift - (self.w / 2)\n\t\tif tmp <= w and (self.x + shift) >= 0 :\n\t\t\tself.x += shift\n\tdef moveY( self, shift, h ):\n\t\ttmp = self.y + shift - (self.h / 2)\n\t\tif tmp <= h and (self.y + shift) >= 0 :\n\t\t\tself.y += shift\n# Default value\nitem1 = ItemValue(200, 200, 50, 50)\nothers = []\notherSpeed = 20\nmoveX = 0\nmoveY = 0\n\nquit = False\n\n# Main game loop\nwhile not quit:\n\n\t# Process events\n\tfor event in pygame.event.get():\n\n\t\tprint(event)\n\n\t\tif event.type == QUIT :\n\t\t\tquit = True\n\t\telif event.type == KEYDOWN:\n\t\t\tif event.key == K_ESCAPE:\n\t\t\t\tquit = True\n\t\t\t# Key Event for move\n\n\t\t\tif event.key == K_DOWN:\n\t\t\t\tmoveY = 10\n\t\t\tif event.key == K_UP:\n\t\t\t\tmoveY = -10\n\t\t\tif event.key == K_LEFT:\n\t\t\t\tmoveX = -10\n\t\t\tif event.key == K_RIGHT:\n\t\t\t\tmoveX = 10\n\t\telif event.type == KEYUP:\n\t\t\tif event.key == K_DOWN or event.key == K_UP:\n\t\t\t\tmoveY = 0\n\t\t\tif event.key == K_LEFT or event.key == K_RIGHT:\n\t\t\t\tmoveX = 0\n\t\telif event.type == MOUSEMOTION:\n\t\t\t(x,y) = event.pos\n\t# Perform calculation\n\tfor other in others:\n\t\tother.moveY2(otherSpeed)\n\t\tif other.y > 500:\n\t\t\tothers = [ x for x in others if not (x.x == other.x and x.y == other.y)]\n\tif random.randint(0, 10) == 0:\n\t\tothers.append( ItemValue(random.randint(0, 500), 0, 50, 50) )\n\n\titem1.moveX(moveX, WIDTH)\n\titem1.moveY(moveY, HEIGHT)\n\n\twindow.fill( BACKGROUND_COLOR )\n\n\t# Draw graphics\n\tfor other in others:\n\t\tprint(other.x)\n\t\tprint(other.y)\n\t\t# pygame.draw.rect( window, OTHER_COLOR, ((other.x - ( other.w / 2)), (other.y - ( other.h / 2 ))), (other.w, other.h))\n\t\tpygame.draw.rect( window, OTHER_COLOR, ((other.x - (other.w / 2)), (other.y - (other.h / 2)), other.w, other.h) )\n\tpygame.draw.rect( window, OUR_COLOR, ( (item1.x - (item1.w / 2) ) , ( item1.y - ( item1.h / 2) ), item1.w, item1.h))\n\tlabel_coordinates = ARIAL20.render(\"Mouse @ \" + str( x ) + \",\" + str( y ) + \"; Item @ \" + str( item1.x + 25 ) + \",\" + str( item1.y + 25 ), 1, WHITE)\n\twindow.blit(label_coordinates, (000,000))\n\tpygame.display.update()\n\tfps.tick(25)\n\n\n\npygame.quit()","sub_path":"Lesson4.py","file_name":"Lesson4.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"625974164","text":"import numpy as np\r\n\r\ndef Check_array(arr):\r\n \"\"\"\r\n Check array values for type and value\r\n :param arr:Array\r\n :return: True if all correct, else False and print Error\r\n \"\"\"\r\n for i in arr:\r\n try:\r\n float(i)\r\n except ValueError:\r\n print(ValueError(\"Values us not correct\"))\r\n return False\r\n except TypeError:\r\n print(TypeError(\"Type is not correct\"))\r\n return False\r\n return True\r\n\r\ndef swap(array, firstnum, secondnum):\r\n \"\"\"\r\n :param array: Array\r\n :param firstnum: Int position of first value \r\n :param secondnum: Int position of second value \r\n :return: nothing\r\n \"\"\"\r\n array[firstnum],array[secondnum]=array[secondnum],array[firstnum]\r\n\r\n\r\n\r\ndef sort(array, lower, upper):\r\n \"\"\"\r\n Sort array with Quicksort\r\n :param array: Array\r\n :return: nothing\r\n \"\"\"\r\n if (lowerlower):\r\n sort(array,lower,i-1)\r\n if (i+1=6', 'requests', 'appdirs', 'PyJWT'],\n setup_requires=['pytest-runner'],\n tests_require=['pytest', 'betamax', 'flexmock']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"364037845","text":"#CS2302 Data Structures\r\n#Use of Recursion in order to determine and analyze whether a section of comments\r\n#fit into positive, negative, or normal category.\r\n#Programmed by Luis Garcia.\r\n#Last modified September 18, 2018.\r\n#Instructor Diego Aguirre.\r\n#RecursiveReddit\r\n\r\nimport nltk\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\nimport praw\r\n\r\nreddit = praw.Reddit(client_id='vo3B1Fd51rupXg', #To obtain the ckient id I went into reddit and create an application.\r\n client_secret='vrJEX_48mRmiouINsUfJUxAtRH4', # Client Secret was also created through reddit.\r\n user_agent='lgarcia27' #This user agent is the one I created on reddit.\r\n )\r\n\r\n\r\nnltk.download('vader_lexicon')\r\nsid = SentimentIntensityAnalyzer()\r\n\r\n# I created a group of lists which will be kept in an array.\r\nnormalList = [] # The Normal List will store the normal comments of reddit.\r\nnegativeList = [] # The Negative List will store the negative comments of reddit.\r\npositiveList = [] # The Positive List will store the positive comments of reddit.\r\n\r\n#My Code\r\n# This method helps to test and classified whether a comment is negative or positive \r\n# with the use of a normal comment as a value.\r\ndef classificationComments(comment, x):\r\n neg = get_text_negative_proba(comment.body)\r\n pos = get_text_positive_proba(comment.body)\r\n if neg-x > pos-x:\r\n positiveList.append(comment.body) #Append will add items to the end of the list. \r\n return negativeList.append(comment.body)\r\n#This method will support if there is a reply to a comment or comments and determine\r\n# if the comment will go as normal.\r\ndef determinationOfComments(comments, x):\r\n for oriComment in comments.list():\r\n if len(oriComment.subComments) < 1: \r\n x = get_text_neutral_proba(oriComment.body)\r\n if x >= 0.50: # if 0.50 a comment will be classified as normal\r\n normalList.append(oriComment.body)\r\n return classificationComments(oriComment, x) \r\n for reply in oriComment.subComments.list():\r\n determinationOfComments(reply.subComments)\r\n\r\n#Code given\r\ndef get_text_negative_proba(text):\r\n return sid.polarity_scores(text)['neg']\r\n\r\n\r\ndef get_text_neutral_proba(text):\r\n return sid.polarity_scores(text)['neu'] # I used normal instead of neutral\r\n\r\n\r\n\r\ndef get_text_positive_proba(text):\r\n return sid.polarity_scores(text)['pos']\r\n\r\n\r\ndef get_submission_comments(url):\r\n submission = reddit.submission(url=url)\r\n submission.comments.replace_more()\r\n\r\n return submission.comments\r\n\r\n\r\n\r\n\r\ndef main():\r\n comments = get_submission_comments('https://www.reddit.com/r/politics/comments/9glw1w/avenatti_on_possible_2020_presidential_run_im/')\r\n #line 73 provides a link which will acess the library of the comments and will help with testing purposes.\r\n #The reason I choose this URL is because I believe politics is a very controversial toppic where you can always find positive, neutral, and negative comments.\r\n # As far as testing purposes it is relevant.\r\n print(comments[0].body)\r\n print(comments[0].subComments[0].body)\r\n #print statements will help in maintaining the order of the comments rather than having all over the place.\r\n #print(positiveList)\r\n #// print('Positive Comments') # This print line will help into the organization of the code and will print \"Positive \r\n #Comments in order to follow easier were these specifics comments are.\r\n #print(negativeList)\r\n #//print('Negative Comments') # This print line will help into the organization of the code and will print \"Negative \r\n #Comments in order to follow easier were these specifics comments are.\r\n #print(normalList)\r\n #//print('Normal Comments') # This print line will help into the organization of the code and will print \"Normal \r\n #Comments in order to follow easier were these specifics comments are.\r\n\r\n neg = get_text_negative_proba(comments[0].subComments[0].body)\r\n\r\n print(neg)\r\n\r\nmain()\r\n","sub_path":"RecursiveReddit.py","file_name":"RecursiveReddit.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"559336526","text":"import json\nimport sys\nfrom json import JSONDecodeError\nfrom threading import Timer, Thread\nfrom settings.settings import LOG, UPDATE_TIME, TOPOLOGY_CREATION_TIMEOUT, PROGRAM_UPDATE\n\nfrom routing.router_port import RouterPort\n\n\nclass Router:\n def __init__(self, name, ports):\n self.name = name\n self.ports = dict()\n self._init_ports(ports)\n self.timer = None\n self.distance_vectors = {self.name: self._reset_dv()}\n self.links = {self.name: (0, None)}\n self.analizer = None\n def _reset_dv(self):\n \"\"\"\n Creates an empty distance vector with information about this router\n :return: new dv\n \"\"\"\n return {self.name: (0, None)}\n\n def init_table(self):\n \"\"\"\n method called in the constructor to create the inital\n routing table for every entrance known in this router\n default will have max-size hops\n :return:void\n \"\"\"\n for port in self.ports.values():\n port.name_request()\n\n def _compare_dvs(self, neighbour, new_dv):\n \"\"\"\n Compares received dv from neighbour with the last recorded one\n :param neighbour: where the dv came from\n :param new_dv: received dv\n :return:\n \"\"\"\n\n differences = []\n\n if neighbour in self.distance_vectors:\n for name, cost in new_dv.items():\n new_cost = self.distance_vectors[neighbour].get(name, None)\n\n if new_cost is None or not new_cost[0] == cost[0] or not new_cost[1] == cost[1]:\n differences.append(name)\n else:\n differences = list(new_dv.keys())\n\n if differences:\n self.distance_vectors[neighbour] = new_dv\n self._compute_table(differences)\n\n def _compute_table(self, destinations=None):\n\n \"\"\"\n modifies current routing table if necessary,\n whenever a better routing path is found\n :param destinations: neighbours to recompute\n :return: void\n \"\"\"\n self.analizer.marcar_inicio(self.name)\n nodes = set()\n\n for router, dv in self.distance_vectors.items():\n for name, _ in dv.items():\n nodes.add(name)\n\n if destinations is None:\n destinations = nodes\n\n for dest in destinations:\n distance = self.distance_vectors[self.name].get(dest, (sys.maxsize, None))\n\n for u in nodes:\n if u in self.distance_vectors:\n link = self.links.get(u, (sys.maxsize, None))\n dv_info = self.distance_vectors[u].get(dest, (sys.maxsize, None))\n\n new_distance = int(link[0]) + int(dv_info[0])\n\n if new_distance < distance[0]:\n distance = (new_distance, link[1] if link[1] else dv_info[1])\n\n self.distance_vectors[self.name][dest] = distance\n self.analizer.set_vd(self.name,self.distance_vectors)\n self.analizer.marcar_final(self.name)\n self._broadcast_dv()\n\n def _success(self, message):\n \"\"\"\n Internal method called when a packet is successfully received.\n :param message:\n :return:\n \"\"\"\n print(\"[{}] {}: {}\".format(self.name, 'Success! Data', message))\n\n def _log(self, message):\n \"\"\"\n Internal method to log messages.\n :param message:\n :return: None\n \"\"\"\n if LOG:\n print(\"[{}] {}\".format(self.name, message))\n\n def _init_ports(self, ports):\n \"\"\"\n Internal method to initialize the ports.\n :param ports:\n :return: None\n \"\"\"\n for port in ports:\n input_port = port['input']\n output_port = port['output']\n cost = port.get('cost', 1)\n\n router_port = RouterPort(\n input_port, output_port, cost, lambda p: self._new_packet_received(p)\n )\n\n self.ports[output_port] = router_port\n\n def _new_packet_received(self, packet_tuple):\n \"\"\"\n Internal method called as callback when a packet is received.\n :param packet_tuple packet received and interface from which it was received\n :return: None\n \"\"\"\n packet = packet_tuple[0]\n interface = packet_tuple[1]\n\n message = packet.decode()\n\n try:\n message = json.loads(message)\n except JSONDecodeError:\n self._log(\"Malformed packet\")\n return\n\n if 'destination' in message and 'data' in message:\n dest = message['destination']\n if dest == self.name:\n self._success(message['data'])\n elif message['type'] == \"name_request\":\n interface.name_response(self.name)\n elif message['type'] == \"name_response\":\n neighbour = message['data']\n self.links[neighbour] = (message['cost'], interface)\n elif message['type'] == \"broadcast\":\n table = message.get('data', {})\n self._compare_dvs(message['origin'], table)\n elif dest in self.distance_vectors[self.name]:\n interface = self.distance_vectors[self.name][dest][1]\n self._log(\"Forwarding to port {}\".format(interface.output_port))\n interface.send_packet(packet)\n else:\n self._log(\"Malformed packet\")\n\n def _broadcast_dv(self):\n \"\"\"\n sends current table to neighbours\n :return: void\n \"\"\"\n for conn in self.ports.values():\n self.analizer.sum_paquete(self.name)\n conn.broadcast(self.distance_vectors[self.name], self.name)\n\n def _broadcast(self):\n \"\"\"\n Internal method to broadcast\n :return: None\n \"\"\"\n self._log(\"Broadcasting\")\n\n if LOG:\n printable_table = {k: (v[0], v[1].output_port if isinstance(v[1], RouterPort) else None) for k, v in self.distance_vectors[self.name].items()}\n self._log(printable_table)\n\n self._broadcast_dv()\n self.timer = Timer(UPDATE_TIME, lambda: self._broadcast())\n self.timer.start()\n\n def start(self):\n \"\"\"\n Method to start the routing.\n :return: None\n \"\"\"\n self._log(\"Starting\")\n\n for port in self.ports.values():\n port.start()\n\n Timer(TOPOLOGY_CREATION_TIMEOUT, lambda: self.init_table()).start()\n\n self.timer = Timer(UPDATE_TIME, lambda: self._broadcast())\n self.timer.start()\n\n def stop(self):\n \"\"\"\n Method to stop the routing.\n Is in charge of stop the router ports threads.\n :return: None\n \"\"\"\n self._log(\"Stopping\")\n if self.timer:\n self.timer.cancel()\n\n for port in self.ports.values():\n port.stop_running()\n\n for port in self.ports.values():\n port.join()\n\n self._log(\"Stopped\")\n\n def update_table(self):\n \"\"\"\n Updates table after change\n :return:\n \"\"\"\n self._compute_table()\n self.timer = Timer(UPDATE_TIME, lambda: self._broadcast())\n self.timer.start()\n\n def change_connection_cost(self, neighbor_name, new_cost):\n \"\"\"\n Changes the link cost between myself and a neighbour\n :param neighbor_name: other side of the link\n :param new_cost: new cost of the link\n :return:\n \"\"\"\n try:\n _, interface = self.links[neighbor_name]\n self.links[neighbor_name] = (new_cost, interface)\n self.distance_vectors[self.name][neighbor_name] = (sys.maxsize, interface)\n self.timer.cancel()\n\n Timer(PROGRAM_UPDATE, lambda: self.update_table()).start()\n except KeyError:\n self._log(\"Non-existant neighbour {}\".format(neighbor_name))","sub_path":"Tarea3/Tarea3_RIP/routing/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":7885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"104081091","text":"import numpy as np \r\ndef przekatna(n):\r\n przekatna=[]\r\n for i in range(n):\r\n przekatna.append(2) \r\n A = np.diag(przekatna) \r\n for a in range(0,n):\r\n for b in range(0,n):\r\n for c in range(1,n+1):\r\n if(a==b+c or b==a+c): \r\n A[a,b]=2*(c+1) \r\n return A \r\n \r\nprint(przekatna(8))","sub_path":"6_wprowadzenie/zadanie7/zad7.py","file_name":"zad7.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"277656553","text":"# fio metasay\n\n\nfrom random import choice\n\nfrom cowpy import cow\n\n\ndef moothedata(metadata, key=None):\n keys = list(metadata.keys())\n # Blacklist metadata items that cows can't pronounce.\n blacklist = ['transform', 'affine']\n for k in blacklist:\n if k in keys:\n keys.remove(k)\n key = key or choice(keys)\n msg = cow.Moose().milk(\"%s: %s\" % (key.capitalize(), metadata[key]))\n return msg\n","sub_path":"fio_metasay/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"235965324","text":"from base64 import b64encode\nfrom flask import Flask, jsonify, request\nfrom gevent import subprocess, pywsgi, queue, socket, spawn, lock\nfrom hashlib import sha512\nfrom tempfile import mkstemp\nimport json\nimport logging\nimport os\n\napp = Flask(__name__)\nlog = logging.getLogger(__name__)\nrebuild_queue = queue.PriorityQueue()\nstate_lock = lock.Semaphore()\n\n\ndef rebuild_worker():\n while True:\n (_, cmd) = rebuild_queue.get()\n rebuild_queue.queue.clear()\n subprocess.run(cmd)\n\n\ndef rebuild(priority, args):\n rebuild_queue.put((priority, ['sudo', 'nixos-rebuild', 'switch'] + args))\n\n\ndef get_state_path():\n return os.getenv('HPOS_CONFIG_PATH')\n\n\ndef get_state_data():\n with open(get_state_path(), 'r') as f:\n return json.loads(f.read())\n\n\ndef cas_hash(data):\n dump = json.dumps(data, separators=(',', ':'), sort_keys=True)\n return b64encode(sha512(dump.encode()).digest()).decode()\n\n\n@app.route('/v1/config', methods=['GET'])\ndef get_settings():\n return jsonify(get_state_data()['v1']['settings'])\n\n\ndef replace_file_contents(path, data):\n fd, tmp_path = mkstemp(dir=os.path.dirname(path))\n with open(fd, 'w') as f:\n f.write(data)\n os.rename(tmp_path, path)\n\n\n@app.route('/v1/config', methods=['PUT'])\ndef put_settings():\n with state_lock:\n state = get_state_data()\n if request.headers.get('x-hpos-admin-cas') != cas_hash(state['v1']['settings']):\n return '', 409\n state['v1']['settings'] = request.get_json(force=True)\n replace_file_contents(get_state_path(), json.dumps(state, indent=2))\n rebuild(priority=5, args=[])\n return '', 200\n\n\ndef zerotier_info():\n proc = subprocess.run(['sudo', 'zerotier-cli', '-j', 'info'],\n capture_output=True, check=True)\n return json.loads(proc.stdout)\n\n\n@app.route('/v1/status', methods=['GET'])\ndef status():\n return jsonify({\n 'zerotier': zerotier_info()\n })\n\n\n@app.route('/v1/upgrade', methods=['POST'])\ndef upgrade():\n rebuild(priority=1, args=['--upgrade'])\n return '', 200\n\n\ndef unix_socket(path):\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n if os.path.exists(path):\n os.remove(path)\n sock.bind(path)\n sock.listen()\n return sock\n\n\nif __name__ == '__main__':\n spawn(rebuild_worker)\n pywsgi.WSGIServer(unix_socket('/run/hpos-admin.sock'), app).serve_forever()\n","sub_path":"overlays/holo-nixpkgs/hpos-admin/hpos-admin.py","file_name":"hpos-admin.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"608941708","text":"from Raspi_MotorHAT import Raspi_MotorHAT\nfrom gpiozero import DistanceSensor\n\nimport atexit\nimport leds_led_shim\nfrom servos import Servos\nfrom encoder_counter import EncoderCounter\n\n\nclass Robot:\n wheel_diameter_mm = 70.0\n ticks_per_revolution = 40.0\n wheel_distance_mm = 132.0\n def __init__(self, motorhat_addr=0x6f):\n # Setup the motorhat with the passed in address\n self._mh = Raspi_MotorHAT(addr=motorhat_addr)\n\n self.left_motor = self._mh.getMotor(1)\n self.right_motor = self._mh.getMotor(2)\n\n # Setup the Leds\n self.leds = leds_led_shim.Leds()\n # Set up servo motors for pan and tilt.\n self.servos = Servos(addr=motorhat_addr)\n \n # Setup The Distance Sensors\n self.left_distance_sensor = DistanceSensor(echo=17, trigger=27, queue_len=2)\n self.right_distance_sensor = DistanceSensor(echo=5, trigger=6, queue_len=2)\n\n # Setup the Encoders\n EncoderCounter.set_constants(self.wheel_diameter_mm, self.ticks_per_revolution)\n self.left_encoder = EncoderCounter(4)\n self.right_encoder = EncoderCounter(26)\n\n # ensure the motors get stopped when the code exits\n atexit.register(self.stop_all)\n\n def convert_speed(self, speed):\n # Choose the running mode\n mode = Raspi_MotorHAT.RELEASE\n if speed > 0:\n mode = Raspi_MotorHAT.FORWARD\n elif speed < 0:\n mode = Raspi_MotorHAT.BACKWARD\n\n # Scale the speed\n output_speed = (abs(speed) * 255) // 100\n return mode, int(output_speed)\n\n def set_left(self, speed):\n mode, output_speed = self.convert_speed(speed)\n self.left_motor.setSpeed(output_speed)\n self.left_motor.run(mode)\n\n def set_right(self, speed):\n mode, output_speed = self.convert_speed(speed)\n self.right_motor.setSpeed(output_speed)\n self.right_motor.run(mode)\n\n def stop_motors(self):\n self.left_motor.run(Raspi_MotorHAT.RELEASE)\n self.right_motor.run(Raspi_MotorHAT.RELEASE)\n\n def stop_all(self):\n self.stop_motors()\n\n # Clear the display\n self.leds.clear()\n self.leds.show()\n\n # Clear any sensor handlers\n self.left_encoder.stop()\n self.right_encoder.stop()\n\n # Reset the servos\n self.servos.stop_all()\n\n def set_pan(self, angle):\n self.servos.set_servo_angle(1, angle)\n \n def set_tilt(self, angle):\n self.servos.set_servo_angle(0, angle)\n\n","sub_path":"chapter11/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"354356409","text":"\"\"\"Pytest fixture tools plugin.\"\"\"\n\nimport py\nimport os\nimport errno\nimport inspect\nimport sys\n#import pprint\nimport functools\n\nfrom _pytest.python import getlocation\nfrom collections import defaultdict, namedtuple\n\nimport pydot\n\ntw = py.io.TerminalWriter()\nverbose = 1\n\n\ndef mkdir_recursive(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef pytest_addoption(parser):\n \"\"\"Add commandline options show-fixture-duplicates and fixture.\"\"\"\n group = parser.getgroup(\"general\")\n group.addoption('--show-fixture-duplicates',\n action=\"store_true\", dest=\"show_fixture_duplicates\", default=False,\n help=\"show list of duplicates from available fixtures\")\n group.addoption('--fixture',\n action=\"store\", type=str, dest=\"fixture_name\", default='',\n help=\"Name of specific fixture for which you want to get duplicates\")\n\n group.addoption('--fixture-graph',\n action=\"store_true\", dest=\"fixture_graph\", default=False,\n help=\"create .dot fixture graph for each test\")\n group.addoption('--fixture-graph-output-dir',\n action=\"store_true\", dest=\"fixture_graph_output_dir\", default=\"artifacts\",\n help=\"select the location for the output of fixture graph. defaults to 'artifacts'\")\n group.addoption('--fixture-graph-output-type',\n action=\"store_true\", dest=\"fixture_graph_output_type\", default=\"png\",\n help=\"select the type of the output for the fixture graph. defaults to 'png'\")\n\n\ndef pytest_cmdline_main(config):\n \"\"\"Check show_fixture_duplicates option to show fixture duplicates.\"\"\"\n if config.option.show_fixture_duplicates:\n show_fixture_duplicates(config)\n return 0\n\n\ndef show_fixture_duplicates(config):\n \"\"\"Wrap pytest session to show duplicates.\"\"\"\n from _pytest.main import wrap_session\n return wrap_session(config, _show_fixture_duplicates_main)\n\n\ndef print_duplicates(argname, fixtures, previous_argname):\n \"\"\"Print duplicates with TerminalWriter.\"\"\"\n if len(fixtures) > 1:\n fixtures = sorted(fixtures, key=lambda key: key[2])\n\n for baseid, module, bestrel, fixturedef in fixtures:\n\n if previous_argname != argname:\n tw.line()\n tw.sep(\"-\", argname)\n previous_argname = argname\n\n if verbose <= 0 and argname[0] == \"_\":\n continue\n\n funcargspec = bestrel\n\n tw.line(funcargspec)\n\n\ndef _show_fixture_duplicates_main(config, session):\n \"\"\"Preparing fixture duplicates for output.\"\"\"\n session.perform_collect()\n curdir = py.path.local()\n\n fm = session._fixturemanager\n\n fixture_name = config.option.fixture_name\n available = defaultdict(list)\n arg2fixturedefs = ([fixture_name]\n if fixture_name and fixture_name in fm._arg2fixturedefs\n else fm._arg2fixturedefs)\n for item in session.items:\n for argname in arg2fixturedefs:\n fixturedefs = fm.getfixturedefs(argname, item.nodeid)\n assert fixturedefs is not None\n if not fixturedefs:\n continue\n\n for fixturedef in fixturedefs:\n loc = getlocation(fixturedef.func, curdir)\n\n fixture = (\n len(fixturedef.baseid),\n fixturedef.func.__module__,\n curdir.bestrelpath(loc),\n fixturedef\n )\n if fixture[2] not in [f[2] for f in available[argname]]:\n available[argname].append(fixture)\n\n if fixture_name:\n print_duplicates(fixture_name, available[fixture_name], None)\n else:\n available = sorted([(key, items) for key, items in available.items()], key=lambda key: key[0])\n\n previous_argname = None\n for argname, fixtures in available:\n print_duplicates(argname, fixtures, previous_argname)\n previous_argname = argname\n\n\ndef pytest_collection_modifyitems(session, config, items):\n if config.option.fixture_graph:\n save_fixture_graph(\n config,\n name2fixturedefs={\n name: [\n fixture_def for fixture_def in fixture_defs\n # Exclude fixtures defined in test modules.\n if not fixture_def.func.__module__.split('.')[-1].startswith(\"test_\")\n ]\n for name, fixture_defs in session._fixturemanager._arg2fixturedefs.items()\n },\n filename='fixture-graph',\n )\n\n\ndef pytest_runtest_setup(item):\n if item.config.option.fixture_graph and hasattr(item, \"_fixtureinfo\"):\n save_fixture_graph(\n item.config, item._fixtureinfo.name2fixturedefs,\n filename=\"fixture-graph-{}\".format(item._nodeid.replace(\":\", \"_\").replace(\"/\", \"-\")),\n func_args=item._fixtureinfo.argnames,\n )\n\n\ndef _get_fixture_search_order(func_path):\n func_dir = os.path.dirname(func_path)\n if func_dir == func_path:\n return []\n conftest_path = os.path.join(func_dir, 'conftest.py')\n return [conftest_path] + _get_fixture_search_order(func_dir)\n\n\ndef _get_func_path(func, path_cache):\n try:\n return path_cache[func]\n except KeyError:\n path_cache[func] = func_path = inspect.getfile(func)\n return func_path\n\n\ndef _find_fixture_def(source_fixture_name, func_path, fixture_name, name2fixturedefs, get_func_path):\n search_order = _get_fixture_search_order(os.path.dirname(func_path))\n\n if source_fixture_name != fixture_name:\n # Do not include same file in search path when overriding a fixture.\n search_order.insert(0, func_path)\n\n def sort_key(fixture_def):\n try:\n return search_order.index(get_func_path(fixture_def.func))\n except ValueError:\n return sys.maxsize\n try:\n target_fixture_defs = name2fixturedefs[fixture_name]\n except KeyError:\n return None\n try:\n fixture_def = sorted(\n target_fixture_defs,\n key=sort_key,\n )[0]\n except IndexError:\n return None\n return fixture_def\n\n\ndef _get_cluster_name(func_path, cwd):\n return (\n os.path.relpath(func_path, cwd) if func_path.startswith(cwd)\n else func_path.split(\"site-packages/\")[1]\n )\n\n\ndef _get_fixture_node_name(fixture_name, fixture_def, cwd, get_func_path):\n if fixture_def is None:\n return '', ''\n func_path = get_func_path(fixture_def.func)\n return _get_cluster_name(func_path, cwd), fixture_name\n\n\nclass Tree(object):\n\n def __init__(self, parent, name, children, graph=None):\n self.name = name\n self.children = children\n self.graph = graph\n self.parent = parent\n\n def find_parent_graph(self):\n if self.parent is None:\n return None\n if self.parent.graph is not None:\n return self.parent.graph\n return self.parent.find_parent_graph()\n\n def __iter__(self):\n yield self\n for subtree in self.children.values():\n for subtree2 in subtree:\n yield subtree2\n\n def _to_string(self, level=0):\n return \" \" * level + self.name + \"\\n\" + \"\\n\".join(\n [subtree._to_string(level + 1) for subtree in self.children.values()],\n )\n\n def __str__(self):\n return self._to_string()\n\n\ndef save_fixture_graph(config, name2fixturedefs, filename, func_args=None):\n data = defaultdict(dict)\n if func_args:\n data['']['func_args'] = func_args, 'red'\n cwd = os.getcwd() + os.sep\n\n get_func_path = functools.partial(_get_func_path, path_cache={})\n\n for fixture_name, fixture_defs in list(name2fixturedefs.items()):\n if fixture_name == 'request':\n continue\n\n for fixture_def in fixture_defs:\n func_path = get_func_path(fixture_def.func)\n cluster_name = _get_cluster_name(func_path, cwd)\n color = 'green'\n data[cluster_name][fixture_name] = [\n _get_fixture_node_name(\n argname, _find_fixture_def(\n fixture_name, func_path, argname, name2fixturedefs, get_func_path,\n ), cwd, get_func_path,\n )\n for argname in fixture_def.argnames\n ], color\n\n #print(pprint.pformat(dict(data)))\n\n graph = pydot.Dot(graph_type='digraph')\n #graph.set_splines('true')\n graph.set_concentrate('true')\n graph.set_rankdir('LR')\n #graph.set_overlap('compress')\n #graph.set_ratio('compress')\n\n func_path2subgraph = {}\n\n for func_path, subgraph_data in data.items():\n\n subgraph = pydot.Cluster(graph_name=func_path)\n #subgraph.set_splines('true')\n #subgraph.set_label(func_path)\n #subgraph.set_concentrate('true')\n #subgraph.set_overlap('compress')\n #subgraph.set_ratio('compress')\n #subgraph.set_size(1)\n #graph.add_subgraph(subgraph)\n func_path2subgraph[func_path] = subgraph\n\n for name, depended_list in list(subgraph_data.items()):\n depended_list, color = depended_list\n\n node = pydot.Node(func_path + \"/\" + name, style=\"filled\", fillcolor=color)\n node.set_label(name)\n subgraph.add_node(node)\n for dest_cluster, dest_name in depended_list:\n if not dest_name:\n continue\n edge = pydot.Edge(node, dest_cluster + '/' + dest_name)\n graph.add_edge(edge)\n #subgraph.set_ltail(dest_cluster)\n\n tree = Tree(parent=None, name='/', children={}, graph=graph)\n\n for func_path, subgraph in func_path2subgraph.items():\n subtree = tree\n current_path = os.sep\n subgraph.set_label(os.path.basename(func_path))\n for segment in func_path.split(os.sep):\n current_path += segment\n try:\n subtree = subtree.children[segment]\n except KeyError:\n subgraph2 = pydot.Cluster(graph_name=current_path)\n subgraph2.set_label(segment)\n subtree.children[segment] = subtree2 = Tree(\n parent=subtree,\n name=segment,\n children={},\n graph=subgraph2,\n )\n subtree = subtree2\n subtree.graph = subgraph\n\n for subtree in tree:\n if subtree.graph is None:\n continue\n parent_graph = subtree.find_parent_graph()\n if parent_graph is None:\n continue\n parent_graph.add_subgraph(subtree.graph)\n\n log_dir = config.option.fixture_graph_output_dir\n output_type = config.option.fixture_graph_output_type\n mkdir_recursive(log_dir)\n filename = os.path.join(log_dir, filename)\n tw.line()\n tw.sep(\"-\", \"fixture-graph\")\n tw.line(\"created {}.dot.\".format(filename))\n graph.write(filename + \".dot\")\n try:\n graph.write(\"{}.{}\".format(filename, output_type), format=output_type)\n tw.line(\"created {}.{}.\".format(filename, output_type))\n except Exception:\n tw.line(\"graphvis wasn't found in PATH\")\n tw.line(\"You can convert it to a PNG using:\\n\\t'dot -Tpng {0}.dot -o {0}.png'\".format(filename))\n","sub_path":"pytest_fixture_tools/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":11508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"232904426","text":"import responder\r\nimport threading\r\n\r\nfrom sqlalchemy import Integer, Column\r\nfrom sqlalchemy.orm.session import sessionmaker\r\nimport sqlalchemy.ext.declarative\r\n\r\nfrom sqlalchemy_rope import SessionJenny\r\n\r\napi = responder.API()\r\nBase = sqlalchemy.ext.declarative.declarative_base()\r\n\r\nurl = \"sqlite:///data.db\"\r\n\r\n\r\nclass Data(Base):\r\n __tablename__ = \"data\"\r\n id = Column(Integer, primary_key=True)\r\n count = Column(Integer, default=0)\r\n\r\n\r\nengine = sqlalchemy.create_engine(url, echo=False)\r\nBase.metadata.create_all(engine)\r\nSessionMaker = sessionmaker(bind=engine)\r\n\r\njenny = SessionJenny(SessionMaker)\r\n\r\nif not jenny.session.query(Data).all():\r\n data = Data()\r\n jenny.session.add(data)\r\n jenny.session.commit()\r\n\r\n\r\n@api.route(\"/\")\r\ndef index(req, resp):\r\n\r\n print(\"-\" * 10, \"enter\", \"-\" * 10)\r\n print(\"thread:\", threading.current_thread().ident)\r\n print(\"session id:\", id(jenny.session))\r\n print(\"session id from another scope\", session_id())\r\n data = jenny.session.query(Data).first()\r\n data.count += 1\r\n jenny.session.commit()\r\n print(locals())\r\n print(\"-\" * 10, \"exit\", \"-\" * 10)\r\n resp.content = str(data.count)\r\n\r\n\r\ndef session_id():\r\n return id(jenny.session)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n api.run()\r\n","sub_path":"example/example_2.py","file_name":"example_2.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"449548281","text":"import copy\nimport Mailbox\nimport Message\n\nclass Agent(object):\n def __init__(self, agent_id, variable_value=None, domain=None):\n self.agent_id = agent_id\n self.variable_value = variable_value\n self.domain = domain\n self.constraints = []\n self.agent_view = {} #agent : value\n self.mailbox = Mailbox.Mailbox()\n\n def initialize(self):\n raise NotImplementedError()\n\n def compute(self):\n raise NotImplementedError()\n\n def send_msgs(self):\n raise NotImplementedError()\n\n def reactToMsg(self, msg):\n raise NotImplementedError()\n\n def receive_messages(self):\n for msg in self.mailbox.msgs:\n self.reactToMsg(msg)\n self.mailbox.msgs = []\n\n\nclass Agent_MGM2(Agent):\n def __init__(self, agent_id, randnum, p=0.5):\n Agent.__init__(self, agent_id)\n self.rand = randnum\n self.p = p\n self.current_cost = None\n self.outgoingMail = Mailbox.Mailbox()\n self.offered = False\n self.offered_agent_id = None\n self.R = None\n self.bestValueOption = None\n self.swap = False\n\n def send_msgs(self): #send all messages from outgoing mailbox\n msgs = self.outgoingMail.msgs\n self.outgoingMail.msgs = []\n return msgs\n\n def initialize(self): #this will be a random decision of a value\n index = self.rand.randint(0, len(self.domain)-1)\n self.variable_value = self.domain[index]\n self.send_value_to_neighbors() #creates messages for outbox of current value\n\n def acceptFriendship(self, msg): #calculates best R for improvement of a friendship\n maxR = -1\n minMydomval = None\n minOtherDomVal = None\n myView=copy.copy(self.agent_view)\n myNeigView=copy.copy(msg.agentView)\n for domval in self.domain:\n for domvalneighbor in msg.domain:\n myView[self.offered_agent_id]= domvalneighbor\n myNeigView[self.agent_id]= domval\n c1 = calculateCostIfUsingValue(domval, self.constraints, myView)\n c2 = calculateCostIfUsingValue(domvalneighbor, msg.constraints, myNeigView)\n R = (self.current_cost + msg.currentCost) - (c1 + c2)\n if R > maxR:\n maxR = R\n minMydomval = domval\n minOtherDomVal = domvalneighbor\n\n self.R = maxR\n self.bestValueOption = minMydomval\n\n return minOtherDomVal\n\n def checkAlgorithmStateByMessages(self): #checks the state of the algorithm according to the type of messages the agent\n # receives. since the alg is synch', they will all be in the same stage at the same time\n for msg in self.mailbox.msgs:\n if type(msg) == Message.ValueMessage:\n return 1\n elif type(msg) == Message.FriendshipMessage or type(msg) == Message.UnFriendshipMessage:\n return 2\n elif type(msg) == Message.AcceptFriendshipMessage or type(msg) == Message.DeclineFriendshipMessage:\n return 3\n elif type(msg) == Message.RMessage:\n return 4\n elif type(msg) == Message.CanSwapMessage or type(msg) == Message.CannotSwapMessage or type(msg) == Message.SwapPhaseNotPartners:\n return 5\n\n def compute(self): #here we write the MGM2 algorithm for a single agent\n incoming = len(self.mailbox.msgs)\n algorithmState = self.checkAlgorithmStateByMessages()\n\n if algorithmState == 1:\n self.receive_value_messages() #get everyones values\n self.randomize_partner() #choose if to get a partner and if so, send random neighbor a friendship message\n\n elif algorithmState == 2:\n self.finalize_partnerships() #if i asked for friendship, i reject everyone\n #if i didnt ask for a friendship, i approve first request and reject the rest\n\n elif algorithmState == 3:\n self.get_R() #calculate R\n self.send_R_to_neighbors() #send R\n\n elif algorithmState == 4:\n self.check_if_can_swap() #checks if this agent is best out of neighbors\n\n elif algorithmState == 5: #if both can swap/not in a couple and still best, swap\n self.find_out_if_can_swap()\n\n if self.swap is True:\n self.variable_value = self.bestValueOption\n\n self.send_value_to_neighbors() #send value to neighbor (whether agent swapped or not)\n\n self.resetAlgorithmParameters() #reset state of friendships, etc.\n\n\n def choose_best_domval(self): #single agent improvement\n Rmax = -1\n bestVal = None\n for domval in self.domain:\n cost = calculateCostIfUsingValue(domval, self.constraints, self.agent_view)\n R = self.current_cost - cost\n if R > Rmax:\n Rmax = R\n bestVal = domval\n self.R = Rmax\n self.bestValueOption = bestVal\n\n def resetAlgorithmParameters(self): #reset state of friendships, etc.\n self.offered = False\n self.offered_agent_id = None\n self.R = None\n self.bestValueOption = None\n self.swap = False\n\n def receive_value_messages(self): #update agent view and cost\n for msg in self.mailbox.msgs:\n self.agent_view[msg.sender_id] = msg.context\n self.mailbox.msgs = []\n self.current_cost = calculateCostIfUsingValue(self.variable_value, self.constraints, self.agent_view)\n\n def randomize_partner(self): #choose if to have a friend, and which friend if chosen\n chosenNeighbor = None\n if self.rand.random() < self.p: # offer friendship\n chosenNeighbor = self.rand.choice(list(self.agent_view.keys()))\n self.offered_agent_id = chosenNeighbor\n self.offered = True\n self.outgoingMail.msgs.append(\n Message.FriendshipMessage(self.agent_id, chosenNeighbor, self.domain, self.variable_value,\n self.constraints, self.agent_view, self.current_cost))\n unchosen_neighbors = copy.deepcopy(list(self.agent_view.keys()))\n if chosenNeighbor is not None:\n unchosen_neighbors.remove(chosenNeighbor)\n for neighbor in unchosen_neighbors: #send fake messages to whoever we didnt offer\n self.outgoingMail.msgs.append(Message.UnFriendshipMessage(self.agent_id, neighbor))\n\n def finalize_partnerships(self): #accept or decline friendship offers\n for msg in self.mailbox.msgs:\n if type(msg) == Message.FriendshipMessage and self.offered is False:\n otherDomAlternative = self.acceptFriendship(msg)\n self.offered = True\n self.offered_agent_id = msg.sender_id\n self.outgoingMail.msgs.append(Message.AcceptFriendshipMessage(self.agent_id, self.offered_agent_id, self.R, self.bestValueOption,\n otherDomAlternative))\n else: # did offer but not to sender or already accepted a partner\n self.outgoingMail.msgs.append(Message.DeclineFriendshipMessage(self.agent_id, msg.sender_id))\n self.mailbox.msgs = []\n\n def get_R(self):\n for msg in self.mailbox.msgs:\n if type(msg) == Message.AcceptFriendshipMessage:\n self.R = msg.R\n self.bestValueOption = msg.receiverAlternativeDomVal\n self.offered = True\n self.offered_agent_id = msg.sender_id\n break\n else: #didnt get a friendship\n self.offered = False\n self.offered_agent_id = None\n self.choose_best_domval() # updates R and bestValueOption\n\n self.mailbox.msgs = []\n\n\n def send_R_to_neighbors(self): #send R messages to all neighbors\n for neighbor in self.agent_view.keys():\n self.outgoingMail.msgs.append(Message.RMessage(self.agent_id, neighbor, self.R))\n\n def check_if_can_swap(self): #checks if can swap- if i am single and better than everyone, swap, if i am not single and me and my partner are each better than everyone, swap.\n\n self.swap=True\n for msg in self.mailbox.msgs:\n if msg.R > self.R or (msg.R == self.R and msg.sender_id!=self.offered_agent_id and msg.sender_id < self.agent_id ): #someone else is better than me\n self.swap = False\n break\n\n\n self.mailbox.msgs = []\n\n if self.swap is True and self.R>0: # self.R is the max - will swap\n if self.offered is True:\n self.outgoingMail.msgs.append(Message.CanSwapMessage(self.agent_id, self.offered_agent_id))\n else:\n self.swap = False\n if self.offered is True:\n self.outgoingMail.msgs.append(Message.CannotSwapMessage(self.agent_id, self.offered_agent_id))\n\n for neighbor in self.agent_view.keys(): #fake message to all that arent my partner\n if neighbor != self.offered_agent_id:\n self.outgoingMail.msgs.append(Message.SwapPhaseNotPartners(self.agent_id, neighbor))\n\n\n def find_out_if_can_swap(self): #partners decide if can swap together\n if self.swap is True:\n for msg in self.mailbox.msgs:\n if type(msg) is Message.CannotSwapMessage:\n self.swap = False\n\n self.mailbox.msgs = []\n\n def send_value_to_neighbors(self):#send a value message to each of my neighbors\n for neighbor in self.agent_view.keys():\n self.outgoingMail.msgs.append(\n Message.ValueMessage(sender_id=self.agent_id, receiver_id=neighbor, context=self.variable_value))\n\n\nclass Agent_DSA(Agent):\n def __init__(self, agent_id, randnum, p=0.7):\n Agent.__init__(self, agent_id)\n self.p = p\n self.rand = randnum\n self.current_cost = None\n\n def initialize(self): #this will be a random decision of a value\n index = self.rand.randint(0, len(self.domain)-1)\n self.variable_value = self.domain[index]\n\n def send_msgs(self): #sends messages to all neighbors with current value\n msgs = []\n for neighbor in self.agent_view.keys():\n msgs.append(Message.ValueMessage(sender_id=self.agent_id, receiver_id=neighbor, context=self.variable_value))\n\n return msgs\n\n def reactToMsg(self, msg): #update agent view\n self.agent_view[msg.sender_id] = msg.context\n\n def compute(self): #here we write the DSA algorithm for a single agent\n self.receive_messages()\n self.current_cost = calculateCostIfUsingValue(self.variable_value, self.constraints, self.agent_view)\n\n minCost = None\n bestVal = None\n for domval in self.domain:\n if domval != self.variable_value: #only alternative values\n cost = calculateCostIfUsingValue(domval, self.constraints, self.agent_view)\n if minCost is None:\n minCost = cost\n bestVal = domval\n elif cost < minCost:\n minCost = cost\n bestVal = domval\n if minCost <= self.current_cost and self.rand.random() < self.p:\n self.variable_value = bestVal\n\n\ndef calculateCostIfUsingValue(domval, constraints, agent_view): #if this agent will choose to use domval, the cost will be domvalcost\n domvalcost = 0\n for constraint in constraints:\n if (domval == constraint.myValue) and (constraint.otherAgentValue == agent_view[constraint.otherAgent_id]):\n domvalcost += constraint.cost\n return domvalcost","sub_path":"Artificial-Intelligence/multiAgents/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":11671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"272124056","text":"from django.shortcuts import render_to_response,render,redirect,HttpResponse\nfrom read_statistics.utils import get_seven_days_read_data,get_7_days_hot_data\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.cache import cache\nfrom django.contrib import auth\nfrom django.urls import reverse\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User\nfrom blog.models import Blog\nfrom.forms import LoginForm,RegisterForm\n\ndef home(request):\n\tblog_content_type = ContentType.objects.get_for_model(Blog)\n\tdates,read_nums = get_seven_days_read_data(blog_content_type)\n\n\t#获取7天热门博客的缓存数据\n\t'''\n\thot_blogs_for_7_days = cache.get('hot_blogs_for_7_days')\n\tif hot_blogs_for_7_days is None:\n\t\thot_blogs_for_7_days = get_7_days_hot_data\n\t\tcaches.set('hot_blogs_for_7_days',hot_blogs_for_7_days,3600)\n\t'''\n\tcontext = {}\n\tcontext['read_nums'] = read_nums\n\tcontext['dates'] = dates\n\n\treturn render(request,'home.html',context)\n\n\ndef login(request):\n\n\tif request.method == 'POST':\n\t\tlogin_form = LoginForm(request.POST)\n\t\tif login_form.is_valid(): #验证传递过来的数据是否有效\n\t\t\tuser = login_form.cleaned_data['user']\n\t\t\tauth.login(request,user)\n\t\t\treturn redirect(request.GET.get('from',reverse('home')))\t\n\telse:\n\t\tlogin_form = LoginForm()\n\n\tcontext = {}\n\tcontext['login_form'] = login_form\n\treturn render(request,'login.html',context)\n\ndef login_for_medal(request):\n\tdata = {}\n\tlogin_form = LoginForm(request.POST)\n\tif login_form.is_valid(): #验证传递过来的数据是否有效\n\t\tuser = login_form.cleaned_data['user']\n\t\tauth.login(request,user)\n\t\tdata['status'] = 'SUCCESS'\n\telse:\n\t\tdata['status'] = 'ERROR'\n\treturn JsonResponse(data)\n\ndef register(request):\n\n\tif request.method == 'POST':\n\t\tregister_form = RegisterForm(request.POST)\n\t\tif register_form.is_valid():\n\t\t\tusername = register_form.cleaned_data['username']\n\t\t\temail = register_form.cleaned_data['email']\n\t\t\tpassword = register_form.cleaned_data['password']\n\t\t\t# 创建用户\n\t\t\tuser = User.objects.create_user(username,email,password)\n\t\t\tuser.save()\n\t\t\t# 登录用户\n\t\t\tuser = auth.authenticate(username=username,password=password)\n\t\t\tauth.login(request,user)\n\n\t\t\treturn redirect(request.GET.get('from',reverse('home'))) \n\telse:\n\t\tregister_form = RegisterForm()\n\t\t\n\tcontext = {}\t\n\tcontext['register_form'] = register_form\n\n\treturn render(request,'register.html',context) \n\n\ndef logout(request):\n\tauth.logout(request)\n\treturn redirect(request.GET.get('from',reverse('home')))\n\ndef user_info(request):\n\tcontext = {}\n\treturn render(request,'user_info.html',context);\n\n\n\n\n","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"639936662","text":"name1 = input(\"Write a name: \")\nname2 = input(\"Write a name: \")\nname3 = input(\"Write a name: \")\nname4 = input(\"Write a name: \")\nname5 = input(\"Write a name: \")\nprint(name5, name4, name3, name2, name1)\n# This vill print: \"Write a name\" five times and then print the five names written\n\nname = [\"Goran\", \"Klas\", \"Henrik\"]\nk = 0\nwhile k < 3:\n sum = name[k]\n print(sum)\n k += 1\n# This will print: Goran, Klas, Henrik\n\nanswer = [\"This\", \"is\", \"a\", \"tryout\"]\ny = 1\nanswer[0] = input(\"Give one word: \")\nwhile y < 4:\n answer[y] = input(\"Give one word: \")\n y += 1\n# This will print: \"Give one word:\" four times.\n\nx = [1, 2, 3]\nx.append([4, 5])\nprint(x)\n# This will print: [1, 2, 3, [4, 5]]\n\nx = [1, 2, 3]\nx.extend([4, 5])\nprint(x) \n# This will print: [1, 2, 3, 4, 5]\n\nstinasList = [\"this\", \"might\", \"work\"]\nprint(stinasList)\n# This will print: [\"this, might, work\"]\n\nprint(stinasList[0])\nprint(stinasList[1])\nprint(stinasList[2])\nprint(stinasList[-1])\n# This will print: \"this, might work, work\" but on different rows\n\nname = [\"Tina\", \"Greta\", \"Philip\", \"Madde\"]\nname.append([\"Stina\"])\nprint(name)\n# This will print: ['Tina', 'Greta', 'Philip', 'Madde', ['Stina']]\n\nname = []\nname.append(\"This\")\nname.append(\"will\")\nname.append(\"work\")\nname.append(\"too!\")\nprint(name)\n# This will print: ['This', 'will', 'work', 'too!']\n\nname = [\"Write a name\"]\nt = 0\nwhile t < 5:\n print(\"write a name\")\n t += 1\n# This will print: \"write a name\" five times on different rows","sub_path":"array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"134349807","text":"from flask_script import Manager\nfrom app.models import db, Categories, Items\nfrom run import app\nimport uuid\n\nmanager = Manager(app)\n\n\n@manager.command\ndef init_db_load():\n category1 = Categories(name=\"Soccer\")\n category2 = Categories(name=\"Basketball\")\n category3 = Categories(name=\"Baseball\")\n category4 = Categories(name=\"Frisbee\")\n category5 = Categories(name=\"Snowboarding\")\n category6 = Categories(name=\"Rock Climbing\")\n category7 = Categories(name=\"Foosball\")\n category8 = Categories(name=\"Skating\")\n category9 = Categories(name=\"Hockey\")\n db.session.add(category1)\n db.session.commit()\n db.session.add(category2)\n db.session.commit()\n db.session.add(category3)\n db.session.commit()\n db.session.add(category4)\n db.session.commit()\n db.session.add(category5)\n db.session.commit()\n db.session.add(category6)\n db.session.commit()\n db.session.add(category7)\n db.session.commit()\n db.session.add(category8)\n db.session.commit()\n db.session.add(category9)\n db.session.commit()\n\n\n@manager.command\ndef delete_all_categories():\n db.session.query(Categories).delete()\n db.session.commit()\n\n# Test Scripts\n# @manager.command\n# def update_category():\n# category = Categories.query.filter_by(name='Soccer').first()\n# category.name = \"Soccer123\"\n# db.session.add(category)\n# db.session.commit()\n#\n#\n# @manager.command\n# def get_items():\n# item = Items.query.filter_by(name='Football').first()\n# print dir(item)\n#\n#\n# @manager.command\n# def get_categories():\n# result = Categories.query.all()\n# for r in result:\n# items = r.items\n# for i in items:\n# print i.name\n\n\nif __name__ == \"__main__\":\n manager.run()\n","sub_path":"populate_db.py","file_name":"populate_db.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"561033822","text":"\"\"\"Module allows to access images from GTA Crimes dataset\"\"\"\n\nimport tarfile\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torchvision.datasets import ImageFolder\nfrom torchvision.transforms import Compose, Resize, ToTensor\n\n\nclass GTADataset(Dataset):\n \"\"\"Provides access to GTA Crime images dataset\"\"\"\n\n CLASSES = {\n 'Normal': 0,\n 'Arrest': 1,\n 'Arson': 2,\n 'Assault': 3,\n 'Explosion': 4,\n 'Fight': 5,\n 'Robbery': 6,\n 'Shooting': 7,\n 'Vandalism': 8,\n }\n\n def __init__(self, archive_file: str, transform=None):\n self.archive = tarfile.open(archive_file, 'r:gz')\n self.transform = transform\n self.closed = False\n\n def __len__(self):\n if self.closed:\n raise Exception('Dataset has been closed')\n return len(self.archive.getnames())\n\n def __getitem__(self, idx):\n if self.closed:\n raise Exception('Dataset has been closed')\n\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n name = self.archive.getnames()[idx]\n image_class = name.split('/')[0]\n image_file = self.archive.extractfile(name)\n image_data = Image.open(image_file)\n\n if self.transform:\n image_data = self.transform(image_data)\n\n return image_data, self.CLASSES[image_class]\n\n def close(self):\n \"\"\"Close the underlying archive file\"\"\"\n if not self.closed:\n self.archive.close()\n self.closed = True\n\n\ndef create_gta_dataloaders(dataset_path, transform=ToTensor(), batch_size=16):\n dataset = ImageFolder(dataset_path, transform=transform)\n shuffled_indices = np.random.permutation(len(dataset))\n train_idx = shuffled_indices[:int(0.8 * len(dataset))]\n val_idx = shuffled_indices[int(0.8 * len(dataset)):]\n\n train_loader = DataLoader(dataset,\n batch_size=batch_size,\n drop_last=True,\n sampler=SubsetRandomSampler(train_idx),\n num_workers=1,\n pin_memory=True)\n val_loader = DataLoader(dataset,\n batch_size=batch_size,\n drop_last=False,\n sampler=SubsetRandomSampler(val_idx),\n num_workers=1,\n pin_memory=True)\n return train_loader, val_loader\n","sub_path":"gta.py","file_name":"gta.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"241017008","text":"\n#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom socket import *\nfrom select import *\nimport sys\nfrom time import ctime\n\n\ndef socketOpen():\n HOST = '172.16.110.171'\n# HOST = '127.0.0.1'\n PORT = 10000\n BUFSIZE = 1024\n ADDR = (HOST, PORT)\n\n # 소켓 생성\n serverSocket = socket(AF_INET, SOCK_STREAM)\n\n # 소켓 주소 정보 할당\n serverSocket.bind(ADDR)\n print('bind')\n\n # 연결 수신 대기 상태\n serverSocket.listen(100)\n print('listen')\n\n # 연결 수락\n clientSocket, addr_info = serverSocket.accept()\n print('accept')\n print('--clinet information--')\n print(clientSocket)\n\n # 클라이언트로부터 메시지를 가져옴\n while True:\n data = clientSocket.recv(65535)\n print('recieve data : ', data.decode())\n msg = data.decode()\n if msg == 'exit':\n break\n\n # 소켓 종료\n clientSocket.close()\n serverSocket.close()\n print('close')\n\ndef socketClient():\n# HOST = '127.0.0.1'\n HOST = '172.16.110.171'\n\n PORT = 10000\n BUFSIZE = 1024\n ADDR = (HOST, PORT)\n\n clientSocket = socket(AF_INET, SOCK_STREAM) # 서버에 접속하기 위한 소켓을 생성한다.\n\n try:\n clientSocket.connect(ADDR) # 서버에 접속을 시도한다.\n\n except Exception as e:\n print('%s:%s' % ADDR)\n sys.exit()\n\n print('connect is success')\n\n while True:\n sendData = input(\"input data : \") + \"\\n\"\n clientSocket.send(sendData.encode())\n print(\"send success\")\n\ndef connect(clientSocket, ADDR): # 서버에 소켓 접속\n try:\n clientSocket.connect(ADDR) # 서버에 접속을 시도한다.\n\n except Exception as e:\n print('%s:%s' % ADDR)\n sys.exit()\n\n print('connect is success')\n\ndef sendData(clientSocket, data):\n data = data+'\\n'\n print(data)\n clientSocket.send(data.encode())\n print(data.encode())\n return \"send success\"","sub_path":"kwon/Django/mioo/clothes/socket.py","file_name":"socket.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"311457347","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\n#import paypalrestsdk\nfrom paypal import PayPalInterface\nimport logging\nfrom openerp import tools\nimport openerp.addons.decimal_precision as dp\nfrom openerp.osv import fields,osv\nimport datetime\n\nclass wf_refund_payment(osv.osv):\n _name = \"wf_refund.payment\"\n\n _columns = {\n 'invoice_id': fields.many2one('account.invoice', 'Invoice', required=True),\n 'partner_id': fields.many2one('res.partner', 'Partner'),\n 'payment_type': fields.many2one('payment.type', 'Payment Type'),\n 'currency_id': fields.many2one('res.currency', 'Currency'),\n 'company_id': fields.many2one('res.company', 'company'),\n 'payment_release': fields.selection([(1,'not know'), (2,'pay'), (3,'not pay'), (4,'payed')], 'Payment Release'),\n 'date_done': fields.date('Date done'),\n 'amount': fields.float('Amount'),\n 'credit_debit': fields.char('Debit / Credit', size=10),\n 'invoice_number': fields.related('invoice_id', 'number', type='char', string='Number', size=64),\n 'order_id': fields.many2one('sale.order', 'Sale Order'),\n }\n \n _defaults = {\n 'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account.invoice', context=c),\n }\n\n def action_payment_release(self, cr, uid, ids, context=None):\n record = self.browse(cr,uid,ids[0],context)\n \n if not context:\n context = {}\n if 'wf_release' in context:\n self.write(cr,uid,ids[0],{'payment_release': context['wf_release']}) \n if 'wf_release' in context and context['wf_release'] == 2 and 'PayPal' in record.payment_type.name and record.order_id:\n #Paypal Confirguration\n done_payment_obj = self.pool.get('wf_done.payments')\n if record.invoice_id.company_id.wf_api_type == 'classic' and record.invoice_id.company_id.wf_api_username:\n refund = False\n paypal_api = PayPalInterface(API_USERNAME= record.invoice_id.company_id.wf_api_username,\n API_PASSWORD= record.invoice_id.company_id.wf_api_password,\n API_SIGNATURE= record.invoice_id.company_id.wf_api_signature,\n DEBUG_LEVEL=0,\n API_ENVIRONMENT= record.invoice_id.company_id.wf_api_enviroment,\n HTTP_TIMEOUT=30)\n\n transactions = paypal_api._call('TransactionSearch',\n STARTDATE= record.order_id.create_date,\n TRANSACTIONID= record.order_id.wf_transactions_id,\n STATUS='Success')\n if transactions:\n refund = paypal_api._call('RefundTransaction',\n TRANSACTIONID= record.order_id.wf_transactions_id,\n INVOICEID=record.invoice_id.id,\n REFUNDTYPE='Partial',\n AMT=record.amount,\n CURRENCYCODE=record.currency_id.name,\n NOTE='Erstattung entsprechend der Gutschrift Nr. %s vom %s Mit frdl. Gruss Ihre AWN Crew' %(record.invoice_number, record.invoice_id.date_invoice))\n if refund:\n done_payment_obj.create(cr,uid,{\n 'credit_debit': 'H',\n 'partner_ref': record.invoice_id.partner_id.ref,\n 'date_payed': datetime.date.today(),\n 'payment_type': record.payment_type.id,\n 'transid': refund['REFUNDTRANSACTIONID'],\n 'company_id': record.invoice_id.company_id.id,\n 'currency_id': record.currency_id.id,\n 'invoice_name': record.invoice_number,\n 'invoice_id': record.invoice_id.id,\n 'order_id': record.order_id.id,\n 'partner_id': record.invoice_id.partner_id.id,\n 'amount': record.amount\n })\n self.write(cr,uid,ids[0],{'date_done': datetime.date.today()})\n \n\n if record.invoice_id.company_id.wf_api_type == 'rest' and record.invoice_id.company_id.wf_paypal_client:\n logging.basicConfig(level=logging.INFO)\n paypalrestsdk.configure({\n \"mode\": record.invoice_id.company_id.wf_paypal_mode, # sandbox or live\n \"client_id\": record.invoice_id.company_id.wf_paypal_client,\n \"client_secret\": record.invoice_id.company_id.wf_paypal_secret})\n sale = paypalrestsdk.Sale.find(record.order_id.wf_transactions_id)\n \n # Make Refund API call\n # Set amount only if the refund is partial\n refund = sale.refund({\n \"amount\": {\n \"total\": record.amount,\n \"currency\": record.invoice_id.currency_id.name } })\n\n # Check refund status\n if refund.success():\n print(\"Refund[%s] Success\"%(refund.id))\n self.write(cr,uid,ids[0],{'date_done': datetime.date.today()})\n else:\n print(\"Unable to Refund\")\n print(refund.error)\n\n\n return True\n\n def action_payment_release_2(self, cr, uid, ids, context=None):\n context.update({'wf_release':2})\n return self.action_payment_release(cr,uid,ids, context)\n\n def action_payment_release_3(self, cr, uid, ids, context=None):\n context.update({'wf_release':3})\n return self.action_payment_release(cr,uid,ids, context)\n\nwf_refund_payment()\n\n","sub_path":"wf_refund_payment/wf_refund_payment.py","file_name":"wf_refund_payment.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"73506038","text":"import tensorflow as tf\nimport numpy as np\nimport sys, os\nimport cv2\nfrom sklearn.utils import shuffle\nfrom scipy.misc import imread\nfrom scipy.misc import imresize\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom sklearn.preprocessing import OneHotEncoder\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom skimage.transform import resize\n\ndef tf_relu(x): return tf.nn.relu(x)\ndef d_tf_relu(x): return tf.cast(tf.greater(x,0),tf.float32)\ndef tf_elu(x): return tf.nn.elu(x)\ndef d_tf_elu(x): return tf.cast(tf.greater(x,0),tf.float32) + tf_elu(tf.cast(tf.less_equal(x,0),tf.float32)*x)\n\nnp.random.seed(676)\ntf.set_random_seed(6787)\n\n# data\ndata_location = \"../../Dataset/Semanticdataset100/image/\"\ntrain_data = [] # create an empty list\nfor dirName, subdirList, fileList in sorted(os.walk(data_location)):\n for filename in fileList:\n if \".jpg\" in filename.lower():\n train_data.append(os.path.join(dirName,filename))\n\ndata_location = \"../../Dataset/Semanticdataset100/ground-truth/\"\ntrain_data_gt = [] # create an empty list\nfor dirName, subdirList, fileList in sorted(os.walk(data_location)):\n for filename in fileList:\n if \".png\" in filename.lower() :\n train_data_gt.append(os.path.join(dirName,filename))\n\ntrain_images = np.zeros(shape=(850,256,256,3))\ntrain_labels = np.zeros(shape=(850,256,256,1))\n\nfor file_index in range(len(train_data)):\n train_images[file_index,:,:] = imresize(imread(train_data[file_index]),(256,256))\n train_labels[file_index,:,:] = np.expand_dims(imresize(imread(train_data_gt[file_index],mode='F',flatten=True),(256,256)),axis=2)\n\ntrain_images[:,:,:,0] = (train_images[:,:,:,0] - train_images[:,:,:,0].min(axis=0)) / (train_images[:,:,:,0].max(axis=0) - train_images[:,:,:,0].min(axis=0))\ntrain_images[:,:,:,1] = (train_images[:,:,:,1] - train_images[:,:,:,1].min(axis=0)) / (train_images[:,:,:,1].max(axis=0) - train_images[:,:,:,1].min(axis=0))\ntrain_images[:,:,:,2] = (train_images[:,:,:,2] - train_images[:,:,:,2].min(axis=0)) / (train_images[:,:,:,2].max(axis=0) - train_images[:,:,:,2].min(axis=0))\ntrain_labels = (train_labels - train_labels.min(axis=0)) / (train_labels.max(axis=0) - train_labels.min(axis=0))\n\n# class\nclass cnn():\n \n def __init__(self,k,inc,out):\n self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=0.005))\n self.m,self.v = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n\n def feedforward(self,input,maxpool=True):\n self.input = input\n self.layer = tf.nn.conv2d(input,self.w,strides=[1,1,1,1],padding='SAME')\n self.layer = tf_relu(self.layer) \n if maxpool: return self.layer\n else: return tf.nn.max_pool(self.layer,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\n\n def backprop(self,gradient):\n grad_part_1 = gradient \n grad_part_2 = d_tf_relu(self.layer) \n grad_part_3 = self.input\n\n grad_middle = grad_part_1 * grad_part_2\n\n grad = tf.nn.conv2d_backprop_filter(\n input = grad_part_3,\n filter_sizes = self.w.shape,out_backprop = grad_middle,\n strides=[1,1,1,1],padding='SAME'\n )\n\n grad_pass = tf.nn.conv2d_backprop_input(\n input_sizes = [batch_size] + list(grad_part_3.shape[1:]),\n filter= self.w,out_backprop = grad_middle,\n strides=[1,1,1,1],padding='SAME'\n )\n\n grad_update = []\n grad_update.append(tf.assign(self.m,tf.add(beta1*self.m, (1-beta1)*grad)))\n grad_update.append(tf.assign(self.v,tf.add(beta2*self.v, (1-beta2)*grad**2)))\n \n m_hat = self.m / (1-beta1)\n v_hat = self.v / (1-beta2)\n\n adam_middel = learning_rate/(tf.sqrt(v_hat) + adam_e)\n grad_update.append(tf.assign(self.w,tf.subtract(self.w,tf.multiply(adam_middel,m_hat))))\n\n return grad_pass,grad_update \n\nclass DeCNN():\n \n def __init__(self,k,inc,out):\n self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=0.005))\n self.m,self.v = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n\n def feedforward(self,input,de_stride = 2,shape=2):\n self.input = input\n output_shape = self.input.shape[2].value * shape\n self.layer = tf.nn.conv2d_transpose(input,self.w,output_shape=[batch_size,output_shape,output_shape,60],strides=[1,de_stride,de_stride,1],padding='SAME')\n return self.layer\n\n def backprop(self,gradient):\n half_shape = gradient.shape[1].value//2\n grad_part_1 = gradient \n grad_part_3 = self.input\n\n grad = tf.nn.conv2d_backprop_filter(\n input = grad_part_3,\n filter_sizes = self.w.shape,out_backprop = grad_part_1,\n strides=[1,1,1,1],padding='SAME'\n )\n\n grad_pass = tf.nn.conv2d_backprop_input(\n input_sizes = [batch_size] + list(grad_part_3.shape[1:]),\n filter= self.w,out_backprop = grad_part_1,\n strides=[1,1,1,1],padding='SAME'\n )\n\n grad_update = []\n grad_update.append(tf.assign(self.m,tf.add(beta1*self.m, (1-beta1)*grad)))\n grad_update.append(tf.assign(self.v,tf.add(beta2*self.v, (1-beta2)*grad**2)))\n \n m_hat = self.m / (1-beta1)\n v_hat = self.v / (1-beta2)\n\n adam_middel = learning_rate/(tf.sqrt(v_hat) + adam_e)\n grad_update.append(tf.assign(self.w,tf.subtract(self.w,tf.multiply(adam_middel,m_hat))))\n\n return grad_pass,grad_update \n\n# hyper\nnum_epoch = 100\nbatch_size = 50\nlearning_rate = 0.000001\n\n# define class\nl1a = cnn(3,3,64)\nl1b = cnn(3,64,128)\n\nl2a = cnn(3,128,128)\nl2b = cnn(3,128,256)\n\nl3a = cnn(3,256,256)\nl3b = cnn(3,256,256)\nl3c = cnn(3,256,512)\n\nl4a = cnn(3,512,512)\nl4b = cnn(3,512,512)\nl4c = cnn(3,512,512)\n\nl5a = cnn(3,512,512)\nl5b = cnn(3,512,512)\nl5c = cnn(3,512,512)\n\nl6 = cnn(7,512,4096)\nl7 = cnn(1,4096,4096)\n\nl8a = DeCNN(4,60,4096)\nl8b = cnn(1,60,60)\n\nl9a = DeCNN(4,60,60)\nl9b = cnn(3,60,60)\n\nl10 = DeCNN(16,60,60)\n\nl11a = cnn(1,60,12)\nl11b = cnn(1,12,1)\n\n# graph\nx = tf.placeholder(shape=[None,256,256,3],dtype=tf.float32)\ny = tf.placeholder(shape=[None,256,256,1],dtype=tf.float32)\n\nlayer1a = l1a.feedforward(x)\nlayer1b = l1b.feedforward(layer1a,False) \n\nlayer2a = l2a.feedforward(layer1b) \nlayer2b = l2b.feedforward(layer2a,False) \n\nlayer3a = l3a.feedforward(layer2b) \nlayer3b = l3b.feedforward(layer3a) \nlayer3c = l3c.feedforward(layer3b,False) \n\nlayer4a = l4a.feedforward(layer3c) \nlayer4b = l4b.feedforward(layer4a) \nlayer4c = l4c.feedforward(layer4b,False) \n\nlayer5a = l5a.feedforward(layer4c) \nlayer5b = l5b.feedforward(layer5a) \nlayer5c = l5c.feedforward(layer5b,False) \n\nlayer6 = tf.nn.dropout(l6.feedforward(layer5c),0.5)\nlayer7 = tf.nn.dropout(l7.feedforward(layer6),0.5)\n\nlayer8a = l8a.feedforward(layer7)\nlayer8b = l8b.feedforward(layer8a)\n\nlayer9a = l9a.feedforward(layer8b)\nlayer9b = l9b.feedforward(layer9a)\n\nlayer10 = l10.feedforward(layer9b,de_stride=8,shape=8)\n\nlayer11a = l11a.feedforward(layer10)\nlayer11b = l11b.feedforward(layer11a)\n\ncost = tf.reduce_mean(tf.square(layer11b-y) * 0.5)\n\n# -- auto train --\nauto_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# session\nwith tf.Session() as sess: \n\n sess.run(tf.global_variables_initializer())\n\n for iter in range(num_epoch):\n for current_batch_index in range(0,len(train_images),batch_size):\n current_batch = train_images[current_batch_index:current_batch_index+batch_size,:,:,:]\n current_label = train_labels[current_batch_index:current_batch_index+batch_size,:,:,:]\n sess_results = sess.run([cost,auto_train],feed_dict={x:current_batch,y:current_label})\n print(' Iter: ', iter, \" Cost: %.32f\"% sess_results[0],end='\\r')\n print('\\n-----------------------')\n train_images,train_labels = shuffle(train_images,train_labels)\n\n if iter % 2 == 0:\n test_example = train_images[:batch_size,:,:,:]\n test_example_gt = train_labels[:batch_size,:,:,:]\n sess_results = sess.run([layer11b],feed_dict={x:test_example})\n\n sess_results = sess_results[0][0,:,:,:]\n test_example = test_example[0,:,:,:]\n test_example_gt = test_example_gt[0,:,:,:]\n\n plt.figure()\n plt.imshow(np.squeeze(test_example),cmap='gray')\n plt.axis('off')\n plt.title('epoch_'+str(iter)+'Original Image')\n plt.savefig('train_change/epoch_'+str(iter)+\"a_Original_Image.png\")\n\n plt.figure()\n plt.imshow(np.squeeze(test_example_gt),cmap='gray')\n plt.axis('off')\n plt.title('epoch_'+str(iter)+'Ground Truth Mask')\n plt.savefig('train_change/epoch_'+str(iter)+\"b_Original_Mask.png\")\n\n plt.figure()\n plt.imshow(np.squeeze(sess_results),cmap='gray')\n plt.axis('off')\n plt.title('epoch_'+str(iter)+'Generated Mask')\n plt.savefig('train_change/epoch_'+str(iter)+\"c_Generated_Mask.png\")\n\n plt.figure()\n plt.imshow(np.multiply(np.squeeze(test_example),np.squeeze(test_example_gt)),cmap='gray')\n plt.axis('off')\n plt.title('epoch_'+str(iter)+\"Ground Truth Overlay\")\n plt.savefig('train_change/epoch_'+str(iter)+\"d_Original_Image_Overlay.png\")\n\n plt.figure()\n plt.axis('off')\n plt.imshow(np.multiply(np.squeeze(test_example),np.squeeze(sess_results)),cmap='gray')\n plt.title('epoch_'+str(iter)+\"Generated Overlay\")\n plt.savefig('train_change/epoch_'+str(iter)+\"e_Generated_Image_Overlay.png\")\n\n plt.close('all')\n\n # save image if it is last epoch\n if iter == num_epoch - 1:\n train_images,train_labels = shuffle(train_images,train_labels)\n \n for current_batch_index in range(0,len(train_images),batch_size):\n current_batch = train_images[current_batch_index:current_batch_index+batch_size,:,:,:]\n current_label = train_labels[current_batch_index:current_batch_index+batch_size,:,:,:]\n sess_results = sess.run([layer5_res],feed_dict={x:current_batch,y:current_label})\n\n plt.figure()\n plt.imshow(np.squeeze(current_batch[0,:,:,:]),cmap='gray')\n plt.axis('off')\n plt.title(str(current_batch_index)+\"a_Original Image\")\n plt.savefig('gif/'+str(current_batch_index)+\"a_Original_Image.png\")\n\n plt.figure()\n plt.imshow(np.squeeze(current_label[0,:,:,:]),cmap='gray')\n plt.axis('off')\n plt.title(str(current_batch_index)+\"b_Original Mask\")\n plt.savefig('gif/'+str(current_batch_index)+\"b_Original_Mask.png\")\n \n plt.figure()\n plt.imshow(np.squeeze(sess_results[0][0,:,:,:]),cmap='gray')\n plt.axis('off')\n plt.title(str(current_batch_index)+\"c_Generated Mask\")\n plt.savefig('gif/'+str(current_batch_index)+\"c_Generated_Mask.png\")\n\n plt.figure()\n plt.imshow(np.multiply(np.squeeze(current_batch[0,:,:,:]),np.squeeze(current_label[0,:,:,:])),cmap='gray')\n plt.axis('off')\n plt.title(str(current_batch_index)+\"d_Original Image Overlay\")\n plt.savefig('gif/'+str(current_batch_index)+\"d_Original_Image_Overlay.png\")\n \n plt.figure()\n plt.imshow(np.multiply(np.squeeze(current_batch[0,:,:,:]),np.squeeze(sess_results[0][0,:,:,:])),cmap='gray')\n plt.axis('off')\n plt.title(str(current_batch_index)+\"e_Generated Image Overlay\")\n plt.savefig('gif/'+str(current_batch_index)+\"e_Generated_Image_Overlay.png\")\n\n plt.close('all')\n# -- end code --","sub_path":"NeuralNetwork/Saliency/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":11907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125429497","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom youngman import views\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'youngrak.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^login/', views.login, 'youngman.views.login'),\n url(r'^logout/', views.logout, 'youngman.views.logout'),\n url(r'^$', views.home)\n\n)\n\n\nif settings.DEBUG:\n # static files (images, css, javascript, etc.)\n urlpatterns += patterns('',\n (r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT}))","sub_path":"youngrak/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"568305703","text":"\"\"\"\n\nExplanation\nGiven the number of bags,\nreturn the minimum capacity of each bag,\nso that we can put items one by one into all bags.\n\nWe binary search the final result.\nThe left bound is max(A),\nThe right bound is sum(A).\n\n\nMore Good Binary Search Problems\nHere are some similar binary search problems.\nAlso find more explanations.\nGood luck and have fun.\n\nFind the Smallest Divisor Given a Threshold\nDivide Chocolate\nCapacity To Ship Packages In N Days\nKoko Eating Bananas\nMinimize Max Distance to Gas Station\nSplit Array Largest Sum\n\"\"\"\n\n\nclass Solution:\n def shipWithinDays(self, weights, D: int) -> int:\n left = max(weights)\n right = left * len(weights) // D\n while left < right:\n mid = (left + right) // 2\n count = 1\n summ = 0\n for w in weights:\n if summ + w > mid:\n count += 1\n summ = 0\n summ += w\n\n if count > D:\n left = mid + 1\n else:\n right = mid\n\n return left\n\n","sub_path":"LeetcodeNew/python2/LC_1011.py","file_name":"LC_1011.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"500623832","text":"#! pytest\nimport attr\nimport pytest\nfrom eth_utils import to_checksum_address\nfrom tldeploy.identity import (\n Identity,\n MetaTransaction,\n UnexpectedIdentityContractException,\n deploy_identity_implementation,\n deploy_identity_proxy_factory,\n deploy_proxied_identity,\n)\nfrom web3 import Web3\n\nfrom relay.blockchain.delegate import (\n Delegate,\n DelegationFees,\n GasPriceMethod,\n InvalidDelegationFeesException,\n InvalidMetaTransactionException,\n)\n\n\n@pytest.fixture(scope=\"session\")\ndef delegate_address(web3):\n return web3.eth.coinbase\n\n\n@pytest.fixture(scope=\"session\")\ndef delegate_config():\n return {\n \"gas_price_method\": GasPriceMethod.FIXED,\n \"gas_price\": 2_000_000_000,\n \"max_gas_limit\": 1_000_000,\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef delegate(\n web3, delegate_address, contracts, proxy_factory, currency_network, delegate_config\n):\n identity_contract_abi = contracts[\"Identity\"][\"abi\"]\n base_fee = 0\n return Delegate(\n web3,\n delegate_address,\n identity_contract_abi,\n [proxy_factory.address],\n delegation_fees=[\n DelegationFees(\n base_fee=base_fee, currency_network_of_fees=currency_network.address\n )\n ],\n config=delegate_config,\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef delegate_with_one_fees(\n web3, delegate_address, contracts, proxy_factory, currency_network, delegate_config\n):\n identity_contract_abi = contracts[\"Identity\"][\"abi\"]\n base_fee = 1\n return Delegate(\n web3,\n delegate_address,\n identity_contract_abi,\n [proxy_factory.address],\n delegation_fees=[\n DelegationFees(\n base_fee=base_fee, currency_network_of_fees=currency_network.address\n )\n ],\n config=delegate_config,\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef owner(accounts):\n return accounts[0]\n\n\n@pytest.fixture(scope=\"session\")\ndef owner_key(account_keys):\n return account_keys[0]\n\n\n@pytest.fixture(scope=\"session\")\ndef proxy_factory(web3):\n\n return deploy_identity_proxy_factory(web3=web3)\n\n\n@pytest.fixture(scope=\"session\")\ndef identity_implementation(web3):\n\n return deploy_identity_implementation(web3=web3)\n\n\n@pytest.fixture(scope=\"session\")\ndef signature_of_owner_on_implementation(\n owner_key, identity_implementation, proxy_factory\n):\n abi_types = [\"bytes1\", \"bytes1\", \"address\", \"address\"]\n to_hash = [\"0x19\", \"0x00\", proxy_factory.address, identity_implementation.address]\n to_sign = Web3.solidityKeccak(abi_types, to_hash)\n return owner_key.sign_msg_hash(to_sign).to_bytes()\n\n\n@pytest.fixture()\ndef identity_contract(\n web3,\n proxy_factory,\n identity_implementation,\n signature_of_owner_on_implementation,\n owner,\n):\n identity_contract = deploy_proxied_identity(\n web3,\n proxy_factory.address,\n identity_implementation.address,\n signature_of_owner_on_implementation,\n )\n web3.eth.sendTransaction(\n {\"to\": identity_contract.address, \"from\": owner, \"value\": 1_000_000}\n )\n\n return identity_contract\n\n\n@pytest.fixture()\ndef identity(identity_contract, owner_key):\n return Identity(contract=identity_contract, owner_private_key=owner_key)\n\n\n@pytest.fixture()\ndef chain_id(web3):\n return int(web3.eth.chainId)\n\n\n@pytest.fixture()\ndef build_meta_transaction(chain_id):\n \"\"\"Adds chain_id and build meta-tx from given args\"\"\"\n\n def f(*args, **kwargs):\n return MetaTransaction(*args, **kwargs, chain_id=chain_id)\n\n return f\n\n\n@pytest.fixture()\ndef signed_meta_transaction(identity, owner_key, accounts, build_meta_transaction):\n meta_transaction = build_meta_transaction(\n from_=identity.address,\n to=accounts[2],\n value=123,\n data=(1234).to_bytes(10, byteorder=\"big\"),\n nonce=1,\n )\n\n return meta_transaction.signed(owner_key)\n\n\ndef meta_transaction_for_currency_network_transfer(\n currency_network, identity, source, destination\n):\n\n trustlines = [(source, destination, 100, 100)]\n currency_network.setup_trustlines(trustlines)\n meta_transaction = currency_network.transfer_meta_transaction(\n 100, 0, [source, destination]\n )\n meta_transaction = identity.filled_and_signed_meta_transaction(meta_transaction)\n\n return meta_transaction\n\n\ndef test_delegate_meta_transaction(delegate, identity, web3, signed_meta_transaction):\n \"\"\"\"\n Tests that a transaction is sent by the delegate upon receiving a meta-transaction.\n \"\"\"\n\n tx_hash = delegate.send_signed_meta_transaction(signed_meta_transaction)\n tx = web3.eth.getTransaction(tx_hash)\n\n assert tx[\"from\"] == web3.eth.coinbase\n assert to_checksum_address(tx[\"to\"]) == identity.address\n\n\ndef test_delegated_transaction_trustlines_flow(\n currency_network, identity, delegate, accounts\n):\n \"\"\"\"\n Tests that the relaying of the metatransaction by the relay server works on a currency network contract\n \"\"\"\n\n source = identity.address\n destination = accounts[3]\n\n meta_transaction = meta_transaction_for_currency_network_transfer(\n currency_network, identity, source, destination\n )\n\n delegate.send_signed_meta_transaction(meta_transaction)\n\n assert currency_network.get_balance(source, destination) == -100\n\n\ndef test_deploy_identity(\n currency_network,\n delegate,\n accounts,\n proxy_factory,\n owner_key,\n identity_implementation,\n signature_of_owner_on_implementation,\n):\n \"\"\"\n Tests that the deployment of an identity contract by the relay server delegate works\n by using it to execute a meta-transaction\n \"\"\"\n\n identity_contract_address = delegate.deploy_identity(\n proxy_factory.address,\n identity_implementation.address,\n signature_of_owner_on_implementation,\n )\n\n destination = accounts[3]\n\n meta_transaction = currency_network.transfer_meta_transaction(\n 100, 0, [identity_contract_address, destination]\n )\n signed_meta_transaction = attr.evolve(\n meta_transaction, from_=identity_contract_address, nonce=0\n ).signed(owner_key)\n\n currency_network.setup_trustlines(\n [(identity_contract_address, destination, 100, 100)]\n )\n delegate.send_signed_meta_transaction(signed_meta_transaction)\n assert currency_network.get_balance(identity_contract_address, destination) == -100\n\n\ndef test_next_nonce(\n delegate, identity_contract, accounts, owner_key, build_meta_transaction\n):\n\n source = identity_contract.address\n destination = accounts[3]\n\n meta_transaction = build_meta_transaction(\n from_=source, to=destination, value=123, nonce=delegate.calc_next_nonce(source)\n )\n signed_meta_transaction = meta_transaction.signed(owner_key)\n\n assert delegate.calc_next_nonce(source) == 1\n delegate.send_signed_meta_transaction(signed_meta_transaction)\n assert delegate.calc_next_nonce(source) == 2\n\n meta_transaction = build_meta_transaction(\n from_=source, to=destination, value=123, nonce=delegate.calc_next_nonce(source)\n )\n signed_meta_transaction = meta_transaction.signed(owner_key)\n\n assert delegate.calc_next_nonce(source) == 2\n delegate.send_signed_meta_transaction(signed_meta_transaction)\n assert delegate.calc_next_nonce(source) == 3\n\n\ndef test_delegated_transaction_invalid_signature(\n identity, delegate, accounts, account_keys, build_meta_transaction\n):\n to = accounts[2]\n value = 1000\n\n meta_transaction = build_meta_transaction(\n from_=identity.address, to=to, value=value, nonce=0\n ).signed(account_keys[3])\n\n with pytest.raises(InvalidMetaTransactionException):\n delegate.send_signed_meta_transaction(meta_transaction)\n\n\ndef test_delegated_transaction_invalid_nonce(identity, delegate, accounts):\n to = accounts[2]\n value = 1000\n\n meta_transaction1 = identity.filled_and_signed_meta_transaction(\n MetaTransaction(to=to, value=value, nonce=1)\n )\n meta_transaction2 = identity.filled_and_signed_meta_transaction(\n MetaTransaction(to=to, value=value, nonce=1)\n )\n\n delegate.send_signed_meta_transaction(meta_transaction1)\n\n with pytest.raises(InvalidMetaTransactionException):\n delegate.send_signed_meta_transaction(meta_transaction2)\n\n\ndef test_delegated_transaction_invalid_identity_contract(\n delegate, accounts, account_keys, build_meta_transaction\n):\n from_ = accounts[1]\n to = accounts[2]\n value = 1000\n\n meta_transaction = build_meta_transaction(\n from_=from_, to=to, value=value, nonce=0\n ).signed(account_keys[3])\n\n with pytest.raises(UnexpectedIdentityContractException):\n delegate.send_signed_meta_transaction(meta_transaction)\n\n\ndef test_meta_transaction_fees_valid(\n delegate_with_one_fees, signed_meta_transaction, owner_key\n):\n \"\"\"\n Check that no exception is raised when validating a valid meta_transaction\n \"\"\"\n\n delegation_fees = delegate_with_one_fees.calculate_fees_for_meta_transaction(\n signed_meta_transaction\n )[0]\n meta_transaction_with_fees = attr.evolve(\n signed_meta_transaction,\n base_fee=delegation_fees.base_fee,\n currency_network_of_fees=delegation_fees.currency_network_of_fees,\n )\n signed_meta_transaction_with_fees = meta_transaction_with_fees.signed(owner_key)\n delegate_with_one_fees.validate_meta_transaction_fees(\n signed_meta_transaction_with_fees\n )\n\n\ndef test_meta_transaction_fees_invalid_value(\n delegate_with_one_fees, signed_meta_transaction, owner_key\n):\n \"\"\"\n Check that an exception is raised when validating an invalid meta_transaction\n \"\"\"\n\n delegation_fees = delegate_with_one_fees.calculate_fees_for_meta_transaction(\n signed_meta_transaction\n )[0]\n\n wrong_fees_value = 0\n assert delegation_fees.base_fee >= wrong_fees_value\n\n meta_transaction_with_fees = attr.evolve(\n signed_meta_transaction,\n base_fee=wrong_fees_value,\n currency_network_of_fees=delegation_fees.currency_network_of_fees,\n )\n signed_meta_transaction_with_fees = meta_transaction_with_fees.signed(owner_key)\n\n with pytest.raises(InvalidDelegationFeesException):\n delegate_with_one_fees.validate_meta_transaction_fees(\n signed_meta_transaction_with_fees\n )\n\n\ndef test_meta_transaction_fees_invalid_network(\n delegate_with_one_fees, signed_meta_transaction, owner_key\n):\n \"\"\"\n Check that an exception is raised when validating an invalid meta_transaction\n \"\"\"\n\n delegation_fees = delegate_with_one_fees.calculate_fees_for_meta_transaction(\n signed_meta_transaction\n )[0]\n\n wrong_network = signed_meta_transaction.from_\n assert delegation_fees.currency_network_of_fees != wrong_network\n\n meta_transaction_with_fees = attr.evolve(\n signed_meta_transaction,\n base_fee=delegation_fees.base_fee,\n currency_network_of_fees=wrong_network,\n )\n signed_meta_transaction_with_fees = meta_transaction_with_fees.signed(owner_key)\n\n with pytest.raises(InvalidDelegationFeesException):\n delegate_with_one_fees.validate_meta_transaction_fees(\n signed_meta_transaction_with_fees\n )\n\n\ndef test_meta_transaction_fee_recipient_invalid(\n delegate_with_one_fees, signed_meta_transaction, owner_key\n):\n \"\"\"\n Check that an exception is raised when validating an meta_transaction with invalid fee recipient\n \"\"\"\n\n delegation_fees = delegate_with_one_fees.calculate_fees_for_meta_transaction(\n signed_meta_transaction\n )[0]\n\n wrong_recipient = signed_meta_transaction.from_\n assert delegation_fees.fee_recipient != wrong_recipient\n\n meta_transaction_with_fees = attr.evolve(\n signed_meta_transaction,\n base_fee=delegation_fees.base_fee,\n currency_network_of_fees=delegation_fees.currency_network_of_fees,\n fee_recipient=wrong_recipient,\n )\n signed_meta_transaction_with_fees = meta_transaction_with_fees.signed(owner_key)\n with pytest.raises(InvalidDelegationFeesException):\n delegate_with_one_fees.validate_meta_transaction_fees(\n signed_meta_transaction_with_fees\n )\n\n\n@pytest.mark.parametrize(\n \"gas_price_config, gas_price\",\n [\n (\n {\"gas_price_method\": GasPriceMethod.FIXED, \"gas_price\": 2_000_000_000},\n 2_000_000_000,\n ),\n # Assumes the default gas price of the chain is 1\n ({\"gas_price_method\": GasPriceMethod.RPC}, 1),\n (\n {\n \"gas_price_method\": GasPriceMethod.BOUND,\n \"min_gas_price\": 1_000_000_000,\n \"max_gas_price\": 5_000_000_000,\n },\n 1_000_000_000,\n ),\n (\n {\n \"gas_price_method\": GasPriceMethod.BOUND,\n \"min_gas_price\": 0,\n \"max_gas_price\": 0,\n },\n 0,\n ),\n ],\n)\ndef test_gas_pricing(delegate, delegate_config, gas_price_config, gas_price):\n config = dict(**delegate_config)\n config.update(gas_price_config)\n\n delegate.config = config\n\n assert delegate._calculate_gas_price(MetaTransaction()) == gas_price\n","sub_path":"tests/chain_integration/test_delegate.py","file_name":"test_delegate.py","file_ext":"py","file_size_in_byte":13234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"327152036","text":"from django.db import models\n\nclass ChessBoard:\n def __init__(self):\n # the ChessBoard model won't persist in the database\n managed = False\n self.fen = ''\n self.moved = ''\n \n # return a text representation of the chess board\n def text(self):\n board_text = \"\"\n # convert fen to rows and columns\n rows = self.__fen_to_rows();\n row_divider = \" ---------------------------------\"\n for row_index, row in rows.items():\n # print row divider\n board_text += row_divider + \"\\n\"\n # print row label\n board_text += str(row_index) + \" \"\n # print columns in this row\n for col_index, col in row.items():\n board_text += \"| \" + col + \" \"\n board_text += \"|\\n\"\n # print last row divider\n board_text += row_divider + \"\\n\"\n # print column labels\n board_text += \" a b c d e f g h \\n\"\n return board_text\n\n # move a chess piece\n def move(self, start_end):\n # get column and row indexes for start and end positions\n start_col = start_end[0:1]\n start_row = start_end[1:2]\n end_col = start_end[2:3]\n end_row = start_end[3:4]\n # convert fen to rows and columns\n rows = self.__fen_to_rows()\n # get the chess piece at start position\n piece = rows[int(start_row)][start_col]\n # replace it with a space\n rows[int(start_row)][start_col] = \" \"\n # put the piece in the end position, replacing whatever is there\n rows[int(end_row)][end_col] = piece\n # update 'moved'\n self.moved = start_end\n # update this ChessBoard's fen\n self.__rows_to_fen(rows)\n\n # convert FEN notation into rows and columns (dictionary)\n # rows are indexed from 8 to 1, columns within rows are indexed a-h\n def __fen_to_rows(self):\n # ignore everything after the first space\n rows = self.fen.split(\" \", 1)[0]\n # split into rows divided by /\n rows = rows.split(\"/\")\n # for each row create column indexes a-h\n rows = [self.__create_col_indexes(row) for row in rows]\n # create row indexes, counting down from 8 to 1\n rows = {8-k: v for k, v in enumerate(rows)}\n return rows\n \n # convert a given row (list) into a dictionary with a-h indexes\n # private function used by fen_to_rows()\n def __create_col_indexes(self, row):\n new_row = \"\"\n # add n spaces when there is a number\n for char in row:\n if(char.isdigit()):\n new_row += \" \" * int(char)\n else:\n new_row += char\n # create a-h indexes using chr(n+97)\n new_row = {chr(n+97): v for n, v in enumerate(new_row)}\n return new_row\n \n # convert rows and columns (dictionary) into fen notation\n def __rows_to_fen(self, rows):\n fen = \"\"\n row_num = 0\n for row_index, row in rows.items():\n row_num += 1\n if(row_num > 1):\n fen += \"/\"\n spaces = 0\n for col_index, col in row.items():\n # convert spaces to an integer (# of consecutive spaces)\n if(col == \" \"):\n spaces += 1\n if(col != \" \" and spaces > 0):\n fen += str(spaces)\n fen += col\n spaces = 0\n elif(col != \" \"):\n fen += col\n elif(col_index == \"h\"):\n fen += str(spaces)\n spaces = 0\n # add end of FEN string back to the end\n end_of_fen = self.fen.split(\" \", 1)[1]\n self.fen = fen + ' ' + end_of_fen\n\n","sub_path":"byucodechallenge/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"573287951","text":"# coding = utf-8\nfrom PyQt5.QtWidgets import *\nfrom uis.ui_webchatmain import Ui_Form\nfrom PyQt5.QtGui import *\n\n\nclass WidChatMain(QWidget):\n def __init__(self, chat_):\n super().__init__()\n self.chat = chat_\n self.ui_main = Ui_Form()\n self.ui_main.setupUi(self)\n\n\n #self.chat.sign_coming_msg.connect(self.show_msg)\n\n #添加模式\n self.model = QStandardItemModel()\n self.ui_main.listView.setModel(self.model)\n\n # 绑定信号处理\n self.ui_main.listView.clicked.connect(self.select_user)\n # 点击信息\n self.ui_main.pushButton.clicked.connect(self.send_msg)\n\n def select_user(self, index):\n # print(index, type(index))\n # # 获取当前用户\n # self.current_user = []\n # row = index.row()\n # data1 = index.data()\n # print(data1)\n # data2 = self.model.item(row).data()\n # print(data2)\n # self.current_user.append(data1)\n row = index.row()\n self.current_user = self.model.item(row).data()\n\n def show_msg(self):\n pass\n\n def send_msg(self):\n # 获取文本信息\n msg = self.ui_main.lineEdit.text()\n # 发送(辅助类)\n self.chat.send_msg(self.current_user, msg)\n\n\n\n def show_user_list(self):\n # 调用辅助类获取列表\n lst_users = self.chat.get_friends()\n print(lst_users)\n # 显示列表到列表框\n for user_ in lst_users:\n user_name = user_['UserName']\n nick_name = user_['NickName']\n icon_head = QIcon('imgs/user.jpg')\n item_ = QStandardItem(icon_head, nick_name)\n item_.setData(user_name)\n self.model.appendRow(item_)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"uis/widchatmain.py","file_name":"widchatmain.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"179850170","text":"from django.urls import path\nfrom . import views\n\napp_name='shop'\n\nurlpatterns=[\n path('',views.home,name='home'),\n path('/',views.detail_product,name='detail'),\n path('category/',views.home ,name='category')\n]","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"367239707","text":"import json\r\nimport django\r\ndjango.setup()\r\nfrom collections import OrderedDict\r\nfrom sefaria.utils.talmud import section_to_daf\r\nfrom parsing_utilities.util import getGematria, he_char_ord, he_ord, inv_gematria\r\nfrom rif_utils import path, tags_map\r\n\r\ninv_he_char = {}\r\nfor key in he_char_ord:\r\n inv_he_char[he_char_ord[key]] = key\r\n\r\ndef num_to_gem(num):\r\n return getGematria(inv_he_char[int(num)])\r\n\r\ndef gem_to_num(gem):\r\n if gem == 0: return 0\r\n else: return he_ord(inv_gematria[gem])\r\n\r\ndef next_gem(gem, cycle=True):\r\n if gem == 0: return 1\r\n num = gem_to_num(gem) + 1\r\n if num > 22:\r\n if cycle:\r\n next = num % 22\r\n else:\r\n raise ValueError('No letter after 22')\r\n else:\r\n next = num_to_gem(num)\r\n return next\r\n\r\ndef open_tags(masechet):\r\n with open(path+'/tags/tags_{}.json'.format(masechet)) as fp:\r\n data = json.load(fp)\r\n return data\r\n\r\ndef save_tags(tags, masechet):\r\n old = open_tags(masechet)\r\n old.update(tags)\r\n with open(path+'/tags/tags_{}.json'.format(masechet), 'w') as fp:\r\n json.dump(old, fp)\r\n\r\ndef mefaresh_tags(tags_dict, mefaresh):\r\n return {tag: tags_dict[tag] for tag in tags_dict if tags_dict[tag]['referred text'] == mefaresh}\r\n\r\ndef page_tags(tags_dict, page: int):\r\n return {tag: tags_dict[tag] for tag in tags_dict if int(tag[1:4]) == page}\r\n\r\ndef tags_by_criteria(tags_or_masechet, key=lambda x: True, value=lambda x: True):\r\n if type(tags_or_masechet) == str:\r\n tags = open_tags(tags_or_masechet)\r\n else:\r\n tags = tags_or_masechet\r\n return {tag: tags[tag] for tag in tags if key(tag) and value(tags[tag])}\r\n\r\ndef pages_range(tags_dict):\r\n pages = [int(tag[1:4]) for tag in tags_dict]\r\n return range(min(pages), max(pages)+1)\r\n\r\ndef generate_mefaresh_and_page(tags_dict, mefarshim=range(0,10)):\r\n for page in pages_range(tags_dict):\r\n page_dict = page_tags(tags_dict, page)\r\n for mefaresh in mefarshim:\r\n yield mefaresh_tags(page_dict, mefaresh)\r\n\r\ndef out_of_orders(lis: list):\r\n if len(lis) < 2: return []\r\n out_indexes = []\r\n gap = 2\r\n exp_lis = [0] + lis + [next_gem(lis[-2])] #last element for checking the original last element\r\n for n, num in enumerate(lis):\r\n if gem_to_num(exp_lis[n+gap]) - gem_to_num(exp_lis[n+gap-2]) == 1 and num != exp_lis[n+gap] and num != exp_lis[n+gap-2]:\r\n if n == 0 and num == 1: continue #in this case it can be just omission of the first\r\n if n == len(lis)-1 and num >= lis[-2]: continue # when > it can be omission before the last (especiall when Ran has one tag with gap of tags in Rashi). When = we can't know what tag is the redundant\r\n out_indexes.append(n)\r\n exp_lis.pop(n+gap-1)\r\n gap -= 1\r\n if out_indexes == []: return []\r\n elif out_indexes[-1] == len(lis) - 1 and gem_to_num(lis[-1]) - gem_to_num(lis[-2]) == 2: #in that case it can be a missing tag\r\n return out_indexes[:-1]\r\n return out_indexes\r\n\r\ndef exclude_redundant(tags_dict):\r\n #if tag is out of ordered, changes the 'refered text' to unknown and returns the new dict\r\n for subdict in generate_mefaresh_and_page(tags_dict, mefarshim=range(1,10)):\r\n tags_list = sorted(subdict.items(), key = lambda x: int(x[0]))\r\n for redundant in out_of_orders([tag[1]['gimatric number'] for tag in tags_list]):\r\n tags_dict[tags_list[redundant][0]]['referred text'] = 0\r\n return tags_dict\r\n\r\ndef add_from_unknowns(tags_dict):\r\n for page in pages_range(tags_dict):\r\n page_dict = page_tags(tags_dict, page)\r\n for mefaresh in range(1,10):\r\n m_dict = mefaresh_tags(page_dict, mefaresh)\r\n m_dict = OrderedDict(sorted(m_dict.items(), key=lambda x: int(x[0])))\r\n prev, prev_key = 0, 0\r\n for tag in m_dict:\r\n if tags_dict[tag]['gimatric number'] == num_to_gem((prev + 1) % 22 + 1):\r\n optionals = []\r\n unk_dict = mefaresh_tags(page_dict, 0)\r\n for key, value in unk_dict.items():\r\n if value['style'] == tags_dict[tag]['style'] and value['gimatric number'] == num_to_gem((prev) % 22 + 1) and prev_key < int(key) < int(tag):\r\n optionals.append(key)\r\n if len(optionals) == 1:\r\n tags_dict[optionals[0]]['referred text'] = mefaresh\r\n if tags_dict[tag]['gimatric number'] != 0:\r\n prev = gem_to_num(tags_dict[tag]['gimatric number'])\r\n prev_key = int(tag)\r\n return tags_dict\r\n\r\ndef check_sequence(tags_dict, masechet):\r\n with open(f'{path}/tags_exceptions.txt', encoding='utf-8') as fp:\r\n exceptions = fp.read()\r\n exceptions = exceptions.split('&')\r\n exceptions = [e for e in exceptions if masechet in e]\r\n if exceptions: exceptions = exceptions[0]\r\n tags_dict = OrderedDict(sorted(tags_dict.items(), key=lambda x: int(x[0])))\r\n prev = 0\r\n check = True\r\n for tag in tags_dict:\r\n if tag[0] == '2' and tags_dict[tag]['num_in_page'] == 1: #jump between if and mefaresh is allowed (for Rashi)\r\n if tags_dict[tag]['gimatric number'] <= prev:\r\n p = 'page {} last tag in Rif is {} and first in mefaresh is {} (mefaresh={})'.format(\r\n section_to_daf(int(tag[1:4])+1), prev, tags_dict[tag]['gimatric number'], tags_dict[tag]['referred text'])\r\n if p not in exceptions:\r\n print(p)\r\n elif tags_dict[tag]['gimatric number'] != next_gem(prev):\r\n if prev == 0:\r\n p = 'page {} first tag is {} tag {} (mefaresh={}) {}'.format(\r\n section_to_daf(int(tag[1:4])+1), tags_dict[tag]['gimatric number'], tags_dict[tag]['original'], tags_dict[tag]['referred text'], tag)\r\n if p not in exceptions:\r\n print(p)\r\n else:\r\n p = 'page {}: {} comes after {} in {} tag {} (mefaresh={}) {}'.format(\r\n section_to_daf(int(tag[1:4])+1), tags_dict[tag]['gimatric number'], prev, 'Rif' if tag[0]=='1' else 'SG' if tag[0]=='3' else 'mefaresh', tags_dict[tag]['original'], tags_dict[tag]['referred text'], tag)\r\n if p not in exceptions:\r\n print(p)\r\n prev = tags_dict[tag]['gimatric number']\r\n\r\ndef ad_hocs(tags, masechet):\r\n for tag in tags_by_criteria(tags, value=lambda x: x['referred text']==0 and x['style']==2):\r\n tags[tag]['referred text'] = 5\r\n if masechet == 'Pesachim':\r\n tags['20341000']['referred text'] = 2\r\n elif masechet == 'Sukkah':\r\n tags['30040100']['referred text'] = 3\r\n elif masechet == 'Beitzah':\r\n tags['30049900']['referred text'] = 3\r\n tags['30059900']['referred text'] = 3\r\n elif masechet == 'Yevamot':\r\n tags['40480000']['referred text'] = 3\r\n elif masechet == 'Kiddushin':\r\n tags['30089900']['referred text'] = 3\r\n elif masechet == 'Bava Kamma':\r\n tags['20431000']['referred text'] = 2\r\n return tags\r\n\r\ndef execute():\r\n for masechet in tags_map:\r\n print(masechet)\r\n data = open_tags(masechet)\r\n data = exclude_redundant(data)\r\n data = add_from_unknowns(data)\r\n for subdict in generate_mefaresh_and_page(data, range(1,11)):\r\n check_sequence(subdict, masechet)\r\n data = ad_hocs(data, masechet)\r\n save_tags(data, masechet)\r\n\r\nif __name__ == '__main__':\r\n execute()\r\n","sub_path":"sources/newRif/tags_fix_and_check.py","file_name":"tags_fix_and_check.py","file_ext":"py","file_size_in_byte":7557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"208520540","text":"import urllib2\nfrom django.contrib import admin\nfrom craigslist.models import Poll, Choice\n\n\nfrom urllib import urlencode\nimport urllib2\nfrom xml.dom.minidom import parseString\nimport xml.etree.ElementTree as ET\n\ndef fix_spaces(self, string):\n return string.replace(' ', '%20').lower()\n\nclass CraigsListReader():\n\n RSS = 'http://%s.craigslist.org/search/cta?autoMakeModel=%s&catAbb=cta&minAsk=%s&maxAsk=%s&s=0&format=rss'\n\n def hit_site(self, min_price=5000, max_price=15000, **kwargs):\n opener = urllib2.build_opener()\n city_results = []\n make_models = kwargs.get('make_models')\n for city in kwargs.get('cities'):\n list = []\n for make_model in make_models:\n kwargs = {'city': city, 'make_model': make_model}\n url = self.build_craigslist_url(min_price, max_price, **kwargs)\n f = opener.open(url)\n resp = f.read()\n list.append({'make_model': make_model, 'listings':self.parse_xml(resp)})\n\n city_results.append({'city':city, 'listings': list})\n\n return city_results\n\n def build_craigslist_url(self, min_price=0, max_price=5000, **kwargs):\n city = (kwargs.get('city') if kwargs.get('city') else 'ames').lower()\n make_model = self.fix_spaces(kwargs.get('make_model') if kwargs.get('make_model') else 'honda accord')\n return self.RSS % (city, make_model, min_price, max_price)\n\n def parse_xml(self, resp):\n \"\"\"\n return a list of {link:'', description:'', title''}\n \"\"\"\n dom = parseString(resp)\n xml_tags = dom.getElementsByTagName('item')\n entries = []\n for tag in xml_tags:\n node = {}\n for kids in [tags for tags in tag.childNodes if not tags.nodeType == 3]:\n # Not sure what dc is there for but I don't want it\n if 'dc' in kids.nodeName:\n continue\n for n in kids.childNodes:\n n.nodeValue = n.nodeValue.replace('$', '$')\n node[kids.nodeName] = n.nodeValue\n entries.append(node)\n\n return entries","sub_path":"base/rss_reader.py","file_name":"rss_reader.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"111866569","text":"#!/usr/bin/python3\n\"\"\"\nTask 100 Fabric script (based on the file 3-deploy_web_static.py)\nthat deletes out-of-date archives, using the function do_clean\ndeploy\n\"\"\"\n\nfrom fabric.api import *\nfrom datetime import datetime\nfrom os.path import isfile\n\nenv.user = 'ubuntu'\nenv.hosts = ['35.231.61.116', '54.197.26.225']\n\n\ndef do_pack():\n \"\"\" Generate a .tgz archive from the contents of the web_static folder \"\"\"\n time = datetime.now()\n name = 'web_static_' + str(time.year) + str(time.month) + str(time.day)\n name = name + str(time.hour) + str(time.minute) + str(time.second) + '.tgz'\n local('mkdir -p versions')\n archive = local('tar -cvzf versions/{} web_static'.format(name))\n if archive.failed:\n return None\n return 'versions/{}'.format(name)\n\n\ndef do_deploy(archive_path):\n \"\"\" Distribute an archive to the web servers \"\"\"\n if not isfile(archive_path):\n return False\n put(archive_path, '/tmp/')\n archive = archive_path.replace('.tgz', '')\n archive = archive.replace('versions/', '')\n run('mkdir -p /data/web_static/releases/{}/'.format(archive))\n run('tar -xzf /tmp/{}.tgz -C /data/web_static/releases/{}/'\n .format(archive, archive))\n run('rm /tmp/{}.tgz'.format(archive))\n run('mv /data/web_static/releases/{}/web_static/* '.format(archive) +\n '/data/web_static/releases/{}/'.format(archive))\n run('rm -rf /data/web_static/releases/{}/web_static'.format(archive))\n run('rm -rf /data/web_static/current')\n run('ln -s /data/web_static/releases/{}/ /data/web_static/current'\n .format(archive))\n print('New version deployed!')\n return True\n\n\ndef deploy():\n \"\"\" Create and distribute an archive to the web servers \"\"\"\n archive_path = do_pack()\n if archive_path is None:\n return False\n return do_deploy(archive_path)\n\n\ndef do_clean(number=0):\n \"\"\" Deletes out-of-date archives \"\"\"\n\n try:\n number = int(number)\n except:\n return None\n\n if number < 0:\n return None\n\n number = 2 if (number == 0 or number == 1) else (number + 1)\n\n with lcd(\"./versions\"):\n local('ls -t | tail -n +{:d} | xargs rm -rf --'.\n format(number))\n\n with cd(\"/data/web_static/releases\"):\n run('ls -t | tail -n +{:d} | xargs rm -rf --'.\n format(number))\n","sub_path":"100-clean_web_static.py","file_name":"100-clean_web_static.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"382025328","text":"from tf_agents.policies import tf_policy\nfrom tf_agents.trajectories import policy_step\n\nclass PlayerPolicy(tf_policy.Base):\n def __init__(self, time_step_spec, action_spec, name=None):\n super(PlayerPolicy, self).__init__(time_step_spec, action_spec)\n\n self._last_action = None\n self._action_max_repeat = 10\n self._action_repeat_counter = 0\n\n self._policy_info = ()\n \n def _action(self, time_step, policy_state, seed):\n if self._last_action is None:\n self._action_repeat_counter = 0\n try:\n # print('Take action from {}'.format(self.action_spec))\n action = int(input())\n # print('Your action is {}'.format(action))\n except:\n action = 0\n pass\n self._last_action = action\n else:\n action = self._last_action\n self._action_repeat_counter += 1\n if self._action_repeat_counter == self._action_max_repeat:\n self._last_action = None\n return policy_step.PolicyStep(action, policy_state, self._policy_info)\n","sub_path":"examples/rab_example/player_policy/player_policy.py","file_name":"player_policy.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"227175442","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.keras.layers as layers\n\nclass Linear(layers.Layer):\n def __init__(self, idx, alpha, beta, input_dim=32):\n super(Linear, self).__init__()\n initializer = tf.random_normal_initializer(0., 0.02)\n initializer0 = tf.zeros_initializer()\n self.v = tf.Variable(initial_value=initializer(shape=(1, input_dim), dtype='float32'),\n trainable=True, name='tru/v/'+idx)\n self.mu = tf.Variable(initial_value=initializer0(shape=(1, input_dim), dtype='float32'),\n trainable=True, name='tru/mu/'+idx)\n # training hyper-parameters\n self.alpha = alpha\n self.beta = beta\n # mean, eigenvalue and trace for each mini-batch\n self.mu_of_visit = 0\n self.eigenvalue = 0.\n self.trace = 0.\n\n def call(self, x, mask, training):\n norm_v = self.v / (tf.norm(self.v) + 1e-8)\n norm_v_t = tf.transpose(norm_v, [1, 0])\n num_of_visit = tf.reduce_sum(mask)\n\n if training and num_of_visit > 1:\n # use only the visiting samples\n index = tf.where(tf.greater(mask[:, 0], tf.constant(0.)))\n index_not = tf.where(tf.equal(mask[:, 0], tf.constant(0.)))\n x_sub = tf.gather_nd(x, index) - tf.stop_gradient(self.mu)\n x_not = tf.gather_nd(x, index_not)\n x_sub_t = tf.transpose(x_sub, [1, 0])\n\n # compute the covariance matrix, eigenvalue, and the trace\n covar = tf.matmul(x_sub_t, x_sub) / num_of_visit\n eigenvalue = tf.reshape(tf.matmul(tf.matmul(norm_v, covar), norm_v_t), [])\n trace = tf.linalg.trace(covar)\n # compute the route loss\n # print(tf.exp(-self.alpha * eigenvalue), self.beta * trace)\n route_loss = tf.exp(-self.alpha * eigenvalue) + self.beta * trace\n uniq_loss = -tf.reduce_mean(tf.square(tf.matmul(x_sub, norm_v_t))) + \\\n tf.reduce_mean(tf.square(tf.matmul(x_not, norm_v_t)))\n # compute mean and response for this batch\n self.mu_of_visit = tf.reduce_mean(x_sub, axis=0, keepdims=True)\n self.eigenvalue = eigenvalue\n self.trace = trace\n x -= tf.stop_gradient(self.mu_of_visit)\n route_value = tf.matmul(x, norm_v_t)\n else:\n self.mu_of_visit = self.mu\n self.eigenvalue = 0.\n self.trace = 0.\n x -= self.mu\n route_value = tf.matmul(x, norm_v_t)\n route_loss = 0.\n uniq_loss = 0.\n\n return route_value, route_loss, uniq_loss\n\nclass Downsample(tf.keras.Model):\n def __init__(self, filters, size, padding='SAME', apply_batchnorm=True):\n super(Downsample, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n initializer = tf.random_normal_initializer(0., 0.02)\n filters = int(filters)\n self.conv1 = layers.Conv2D(filters,\n (size, size),\n strides=2,\n padding=padding,\n kernel_initializer=initializer,\n use_bias=False)\n if self.apply_batchnorm:\n self.batchnorm = tf.keras.layers.BatchNormalization()\n\n def call(self, x, training):\n x = self.conv1(x)\n if self.apply_batchnorm:\n x = self.batchnorm(x, training=training)\n x = tf.nn.leaky_relu(x)\n return x\n\nclass Upsample(tf.keras.Model):\n def __init__(self, filters, size, apply_dropout=False):\n super(Upsample, self).__init__()\n self.apply_dropout = apply_dropout\n initializer = tf.random_normal_initializer(0., 0.02)\n filters = int(filters)\n self.up_conv = tf.keras.layers.Conv2DTranspose(filters,\n (size, size),\n strides=2,\n padding='same',\n kernel_initializer=initializer,\n use_bias=False)\n self.batchnorm = tf.keras.layers.BatchNormalization()\n if self.apply_dropout:\n self.dropout = tf.keras.layers.Dropout(0.5)\n\n def call(self, x, training):\n x = self.up_conv(x)\n x = self.batchnorm(x, training=training)\n if self.apply_dropout:\n x = self.dropout(x, training=training)\n x = tf.nn.leaky_relu(x)\n return x\n\nclass Conv(tf.keras.Model):\n def __init__(self, filters, size, stride=1, activation=True, padding='SAME', apply_batchnorm=True):\n super(Conv, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n self.activation = activation\n initializer = tf.random_normal_initializer(0., 0.02)\n filters = int(filters)\n self.conv1 = layers.Conv2D(filters,\n (size, size),\n strides=stride,\n padding=padding,\n kernel_initializer=initializer,\n use_bias=False)\n if self.apply_batchnorm:\n self.batchnorm = layers.BatchNormalization()\n\n def call(self, x, training):\n x = self.conv1(x)\n if self.apply_batchnorm:\n x = self.batchnorm(x, training=training)\n if self.activation:\n x = tf.nn.leaky_relu(x)\n return x\n\nclass Dense(tf.keras.Model):\n def __init__(self, filters, activation=True, apply_batchnorm=True, apply_dropout=False):\n super(Dense, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n self.activation = activation\n self.apply_dropout = apply_dropout\n initializer = tf.random_normal_initializer(0., 0.02)\n filters = int(filters)\n self.dense = layers.Dense(filters,\n kernel_initializer=initializer,\n use_bias=False)\n if self.apply_batchnorm:\n self.batchnorm = layers.BatchNormalization()\n if self.apply_dropout:\n self.dropout = tf.keras.layers.Dropout(0.3)\n\n def call(self, x, training):\n x = self.dense(x)\n if self.apply_batchnorm:\n x = self.batchnorm(x, training=training)\n if self.activation:\n x = tf.nn.leaky_relu(x)\n if self.apply_dropout:\n x = self.dropout(x, training=training)\n return x\n\nclass CRU(tf.keras.Model):\n\n def __init__(self, filters, size=3, stride=2, apply_batchnorm=True):\n super(CRU, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n self.stride = stride\n initializer = tf.random_normal_initializer(0., 0.02)\n filters = int(filters)\n\n self.conv1 = layers.Conv2D(filters,\n (size, size),\n strides=1,\n padding='SAME',\n kernel_initializer=initializer,\n use_bias=False)\n self.conv2 = layers.Conv2D(filters,\n (size, size),\n strides=1,\n padding='SAME',\n kernel_initializer=initializer,\n use_bias=False)\n self.conv3 = layers.Conv2D(filters,\n (size, size),\n strides=1,\n padding='SAME',\n kernel_initializer=initializer,\n use_bias=False)\n self.conv4 = layers.Conv2D(filters,\n (size, size),\n strides=1,\n padding='SAME',\n kernel_initializer=initializer,\n use_bias=False)\n\n self.batchnorm1 = tf.keras.layers.BatchNormalization()\n self.batchnorm2 = tf.keras.layers.BatchNormalization()\n self.batchnorm3 = tf.keras.layers.BatchNormalization()\n self.batchnorm4 = tf.keras.layers.BatchNormalization()\n\n def call(self, x, training):\n # first residual block\n _x = self.conv1(x)\n _x = self.batchnorm1(_x, training=training)\n _x = tf.nn.leaky_relu(_x)\n _x = self.conv2(_x)\n _x = self.batchnorm2(_x, training=training)\n _x = x + _x\n x = tf.nn.leaky_relu(_x)\n\n # second residual block\n _x = self.conv3(x)\n _x = self.batchnorm3(_x, training=training)\n _x = tf.nn.leaky_relu(_x)\n _x = self.conv4(_x)\n _x = self.batchnorm4(_x, training=training)\n _x = x + _x\n x = tf.nn.leaky_relu(_x)\n\n if self.stride > 1:\n x = tf.nn.max_pool(x, 3, 2, padding='SAME')\n return x\n\nclass TRU(tf.keras.Model):\n\n def __init__(self, filters, idx, alpha=1e-3, beta=1e-4, size=3, apply_batchnorm=True):\n super(TRU, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n # variables\n self.conv1 = Downsample(filters, size)\n self.conv2 = Downsample(filters, size)\n self.conv3 = Downsample(filters, size)\n self.flatten = layers.Flatten()\n self.project = Linear(idx, alpha, beta, input_dim=2048)\n\n\n def call(self, x, mask, training):\n # Downsampling\n x_small = self.conv1(x, training=training)\n depth = 0\n if x_small.shape[1] > 16:\n x_small = self.conv2(x_small, training=training)\n depth += 1\n if x_small.shape[1] > 16:\n x_small = self.conv3(x_small, training=training)\n depth += 1\n x_small_shape = x_small.shape\n x_flatten = self.flatten(tf.nn.avg_pool(x_small, ksize=3, strides=2, padding='SAME'))\n\n # PCA Projection\n route_value, route_loss, uniq_loss = self.project(x_flatten, mask, training=training)\n\n # Generate the splitting mask\n mask_l = mask * tf.cast(tf.greater_equal(route_value, tf.constant(0.)), tf.float32)\n mask_r = mask * tf.cast(tf.less(route_value, tf.constant(0.)), tf.float32)\n\n return [mask_l, mask_r], route_value, [route_loss, uniq_loss]\n\nclass SFL(tf.keras.Model):\n\n def __init__(self, filters, size=3, apply_batchnorm=True):\n super(SFL, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n # depth map\n self.cru1 = CRU(filters, size, stride=1)\n self.conv1 = Conv(2, size, activation=False, apply_batchnorm=False)\n\n # class\n self.conv2 = Downsample(filters*1, size)\n self.conv3 = Downsample(filters*1, size)\n self.conv4 = Downsample(filters*2, size)\n self.conv5 = Downsample(filters*4, 4, padding='VALID')\n self.flatten = layers.Flatten()\n self.fc1 = Dense(256)\n self.fc2 = Dense(1, activation=False, apply_batchnorm=False)\n\n self.dropout = tf.keras.layers.Dropout(0.3)\n\n def call(self, x, training):\n # depth map branch\n xd = self.cru1(x)\n xd = self.conv1(xd)\n dmap = tf.nn.sigmoid(xd)\n # class branch\n x = self.conv2(x) # 16*16*32\n x = self.conv3(x) # 8*8*64\n x = self.conv4(x) # 4*4*128\n x = self.conv5(x) # 1*1*256\n x = self.flatten(x)\n x = self.dropout(x, training=training)\n x = self.fc1(x)\n cls = self.fc2(x)\n return dmap, cls\n\n############################################################\n# Deep Tree Network (DTN)\n############################################################\n\nclass DTN(tf.keras.models.Model):\n def __init__(self, filters):\n super(DTN, self).__init__()\n \n TRU_PARAMETERS = {\n \"alpha\": 1e-3,\n \"beta\": 1e-2,\n \"mu_update_rate\": 1e-3,\n }\n \n layer = [1, 2, 4, 8, 16]\n self.conv1 = Conv(filters, 5, apply_batchnorm=False)\n # CRU\n self.cru0 = CRU(filters)\n self.cru1 = CRU(filters)\n self.cru2 = CRU(filters)\n self.cru3 = CRU(filters)\n self.cru4 = CRU(filters)\n self.cru5 = CRU(filters)\n self.cru6 = CRU(filters)\n # TRU\n alpha = TRU_PARAMETERS['alpha']\n beta = TRU_PARAMETERS['beta']\n self.tru0 = TRU(filters, '1', alpha, beta)\n self.tru1 = TRU(filters, '2', alpha, beta)\n self.tru2 = TRU(filters, '3', alpha, beta)\n self.tru3 = TRU(filters, '4', alpha, beta)\n self.tru4 = TRU(filters, '5', alpha, beta)\n self.tru5 = TRU(filters, '6', alpha, beta)\n self.tru6 = TRU(filters, '7', alpha, beta)\n # SFL\n self.sfl0 = SFL(filters)\n self.sfl1 = SFL(filters)\n self.sfl2 = SFL(filters)\n self.sfl3 = SFL(filters)\n self.sfl4 = SFL(filters)\n self.sfl5 = SFL(filters)\n self.sfl6 = SFL(filters)\n self.sfl7 = SFL(filters)\n\n @tf.function\n def call(self, x, label, training):\n if training:\n mask_spoof = label\n mask_live = 1 - label\n else:\n mask_spoof = tf.ones_like(label)\n mask_live = tf.zeros_like(label)\n ''' Tree Level 1 '''\n x = self.conv1(x, training)\n x_cru0 = self.cru0(x)\n x_tru0, route_value0, tru0_loss = self.tru0(x_cru0, mask_spoof, training)\n\n ''' Tree Level 2 '''\n x_cru00 = self.cru1(x_cru0, training)\n x_cru01 = self.cru2(x_cru0, training)\n x_tru00, route_value00, tru00_loss = self.tru1(x_cru00, x_tru0[0], training)\n x_tru01, route_value01, tru01_loss = self.tru2(x_cru01, x_tru0[1], training)\n\n ''' Tree Level 3 '''\n x_cru000 = self.cru3(x_cru00, training)\n x_cru001 = self.cru4(x_cru00, training)\n x_cru010 = self.cru5(x_cru01, training)\n x_cru011 = self.cru6(x_cru01, training)\n x_tru000, route_value000, tru000_loss = self.tru3(x_cru000, x_tru00[0], training)\n x_tru001, route_value001, tru001_loss = self.tru4(x_cru001, x_tru00[1], training)\n x_tru010, route_value010, tru010_loss = self.tru5(x_cru010, x_tru01[0], training)\n x_tru011, route_value011, tru011_loss = self.tru6(x_cru011, x_tru01[1], training)\n\n ''' Tree Level 4 '''\n map0, cls0 = self.sfl0(x_cru000, training)\n map1, cls1 = self.sfl1(x_cru000, training)\n map2, cls2 = self.sfl2(x_cru001, training)\n map3, cls3 = self.sfl3(x_cru001, training)\n map4, cls4 = self.sfl4(x_cru010, training)\n map5, cls5 = self.sfl5(x_cru010, training)\n map6, cls6 = self.sfl6(x_cru011, training)\n map7, cls7 = self.sfl7(x_cru011, training)\n ''' Output '''\n maps = [map0, map1, map2, map3, map4, map5, map6, map7]\n clss = [cls0, cls1, cls2, cls3, cls4, cls5, cls6, cls7]\n route_value = [route_value0, route_value00, route_value01,\n route_value000, route_value001, route_value010, route_value011]\n x_tru0000 = tf.concat([x_tru000[0], mask_live], axis=1)\n x_tru0001 = tf.concat([x_tru000[1], mask_live], axis=1)\n x_tru0010 = tf.concat([x_tru001[0], mask_live], axis=1)\n x_tru0011 = tf.concat([x_tru001[1], mask_live], axis=1)\n x_tru0100 = tf.concat([x_tru010[0], mask_live], axis=1)\n x_tru0101 = tf.concat([x_tru010[1], mask_live], axis=1)\n x_tru0110 = tf.concat([x_tru011[0], mask_live], axis=1)\n x_tru0111 = tf.concat([x_tru011[1], mask_live], axis=1)\n leaf_node_mask = [x_tru0000, x_tru0001, x_tru0010, x_tru0011, x_tru0100, x_tru0101, x_tru0110, x_tru0111]\n\n return maps, clss, route_value, leaf_node_mask\n","sub_path":"xDeepFake-Test/model/dtn.py","file_name":"dtn.py","file_ext":"py","file_size_in_byte":15989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"527234102","text":"#!/usr/bin/python3\n# Description: game where you guess a number between 0 and 100\n# Author: Paul Hivert\n# Date: 23/10/2018\n\nimport random\nimport time\nimport sys\n\nsecret = random.randint(1, 100)\ninputCheck = False\nsuccess = False\nbig = \"too big\\n\"\nsmall = \"too small\\n\"\nwin = \"you win\\n\"\n\nwhile success is not True:\n with open(\"2a-write.txt\", \"r\") as f:\n lst = f.readlines()\n lastLine = lst[len(lst) - 1]\n\n with open(\"2a-write.txt\", \"w\") as f:\n f.write(\"bienvenue au jeu du + ou -\\n\")\n\n while inputCheck is not True:\n try:\n lastLine = int(lastLine)\n inputCheck = True\n\n except Exception as e:\n sys.stdout.write(str(e))\n sys.stdout.write(\" écrivez votre numéro à la fin du ficher 2a-mol.py, se rafraichi toutes les 5 secondes\\n\")\n time.sleep(5)\n\n with open(\"2a-write.txt\", \"r\") as f:\n lst = f.readlines()\n lastLine = lst[len(lst) - 1]\n\n if inputCheck is True:\n inputCheck = False\n\n if lastLine == secret:\n with open(\"2a-write.txt\", \"w\") as f:\n f.write(win)\n success = True\n\n elif lastLine < secret:\n with open(\"2a-write.txt\", \"w\") as f:\n f.write(small)\n\n elif lastLine > secret:\n with open(\"2a-write.txt\", \"w\") as f:\n f.write(big)\n","sub_path":"scripts/2a-mol.py","file_name":"2a-mol.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61851862","text":"import sys\n\nfrom loguru import logger\nfrom webforces.server.auth import Auth\nfrom webforces.server.interface.dbworker import DBWorker\nfrom webforces.server.mongodbworker import MongoDBWorker\n\n\nclass Core:\n _instance = None\n auth: Auth = None\n _is_done: bool = False\n db: DBWorker = None\n\n def __init__(self, validation=False) -> None:\n \"\"\"Initialization of webforces core\n\n Parameters:\n validation (bool): Set to true if validation is running\n \"\"\"\n if self._is_done:\n return\n self._setup_logging()\n logger.debug(\"Core init\")\n self.auth = Auth()\n self.db = MongoDBWorker(validation)\n self._is_done = True\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super(Core, cls).__new__(cls)\n return cls._instance\n\n def _setup_logging(self, level=\"DEBUG\"):\n logger.remove()\n fmt = (\n \"{time:YYYY-MM-DD HH:mm:ss.SSS} | \"\n \"{level: ^7} | {name}.{function}:{line} - \"\n \"{message}\")\n logger.add(sys.stderr, level=level, format=fmt)\n","sub_path":"webforces/server/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"434099929","text":"\n\n# Given an unsorted array nums, reorder it such that nums[0] < nums[1] > nums[2] < nums[3]....\n# You may assume all input has valid answer.\n\n# Follow Up:\n# Can you do it in O(n) time and/or in-place with O(1) extra space?\n\n# 接临的不能相等\n\n\n\n#class Solution(object):\n# def wiggleSort(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: void Do not return anything, modify nums in-place instead.\n# \"\"\"\n# # time O(n log n), space O(n)\n# temp = sorted(nums)\n# n = len(nums)\n# even, odd = (n + 1) >> 1, n\n# for i in range(n):\n# if i & 1 == 0:\n# even -= 1\n# nums[i] = temp[even]\n# else:\n# odd -= 1\n# nums[i] = temp[odd]\n \n#class Solution(object):\n# def wiggleSort(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: void Do not return anything, modify nums in-place instead.\n# \"\"\" \n# nums.sort()\n# mid=(len(nums)-1)>>1\n# nums[::2],nums[1::2]=nums[mid::-1],nums[:mid:-1] # assignment must be reverse, even=reversed first half, odd=reversed second half\n# return nums\n\nclass Solution(object):\n def wiggleSort(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n n = len(nums)\n snums = sorted(nums)\n for x in [i for i in range(1,n,2)]+[i for i in range(0,n,2)]:\n nums[x] = snums.pop()\n return nums\n \nif __name__==\"__main__\":\n print(Solution().wiggleSort([1,2,3,4,5,6]))\n print(Solution().wiggleSort([3,4,5,6,7]))\n print(Solution().wiggleSort([4,5,5,6]))\n","sub_path":"324. Wiggle Sort II.py","file_name":"324. Wiggle Sort II.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"454820707","text":"import unittest\nfrom zoo import Zoo\nfrom Animal import Animal\n\n\nclass Test_Zoo(unittest.TestCase):\n\n def setUp(self):\n self.zoo = Zoo([], 10, 30)\n self.animal = Animal(\"name\", 18, \"male\", 87, \"species\", 10, \"meat\", 9, 20, 100, 200, 20)\n self.animal1 = Animal(\"name1\", 18, \"female\", 87, \"species\", 10, \"meat\", 9, 20, 100, 200, 20)\n self.zoo.accommodate_animal(self.animal)\n self.zoo.accommodate_animal(self.animal1)\n\n def test_init(self):\n self.assertEqual(self.zoo.animals, [self.animal, self.animal1])\n self.assertEqual(self.zoo.capacity, 10)\n self.assertEqual(self.zoo.budget, 30)\n\n def test_accommodate_animal(self):\n output = [self.animal, self.animal1]\n result = self.zoo.animals\n self.assertEqual(output, result)\n\n def test_daily_incomes(self):\n self.animal2 = Animal(\"name\", 18, \"male\", 87, \"species\", 10, \"meat\", 9, 20, 100, 200, 20)\n output = 120\n result = self.zoo.get_daily_incomes()\n self.assertEqual(output, result)\n\n def test_daily_outcomes(self):\n output = 8\n result = self.zoo.get_daily_outcomes()\n self.assertEqual(output, result)\n\n def test_reproduction(self):\n self.zoo.animal_reproduce()\n self.assertEqual(len(self.zoo.babies), 1)\n\n def test_list_of_pregnants_animal_when_reproduced(self):\n output = [self.animal1]\n self.zoo.animal_reproduce()\n self.assertEqual(output, self.zoo.pregnants)\n\n def test_if_mother_can_reproduce_before_reproduce_ban_expired(self):\n self.zoo.animal_reproduce()\n self.animal1.pregnancy_ban = 6\n self.assertFalse(self.zoo.animal_reproduce())\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"zoo_tests.py","file_name":"zoo_tests.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"646617416","text":"import csv\nimport api\nimport common_functions as cf\n\nelection_id = 'europarl.europa.eu-cz-2009'\n\nn2id = {}\nar = cf.get_all_items(\"options\",where={\"other_identifiers.election_id\":election_id})\nparties = []\nfor p in ar:\n for oi in p['other_identifiers']:\n if oi['election_id'] == election_id:\n n2id[oi['identifier']] = p['identifier']\n parties.append(oi['identifier'])\n\nsummary_items = ['eligibles','attendees','received_ballots','valid_votes']\n\ni = 0\ndata = []\nnames = []\nwith open(\"../data/ep_2009_okrsky.csv\") as infile:\n csv_reader = csv.reader(infile)\n for row in csv_reader:\n item = {}\n j = 0\n for it in row:\n if i == 0:\n names.append(it)\n else:\n item[names[j]] = it\n j = j + 1\n if i > 0:\n data.append(item)\n i = i + 1\n\nresults = []\ncheck = 0\nfor row in data:\n summary = []\n for item in summary_items:\n summary.append({'name':item,'value':int(row[item])})\n counts = []\n for party in parties:\n counts.append({'votes': int(row[party]), 'option_identifier': n2id[party]})\n result = {\n \"election_id\": election_id,\n \"area_id\": row['municipality']+'-'+row['district'],\n 'area_classification': 'district',\n 'summary': summary,\n 'counts': counts\n }\n check = check + int(row['1'])\n #r = api.post(\"results\",result)\n results.append(result)\n #if r['_status'] == 'ERR':\n # print(result['area_id'],r['_issues'])\n #raise(Exception)\n \napi.post(\"results\",results)\n","sub_path":"scripts/result_extractor_ep2009_csv.py","file_name":"result_extractor_ep2009_csv.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"196287946","text":"# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration\n\n\n# ********************* All Tools/Functions for the TriggerEDM **********************\n# Keeping all functions from the original TriggerEDM.py (Run 2 EDM) in this file\n# with this name to not break backwards compatibility\n# Where possible, functions will be adapted to also work with Run 3, they will then be moved\n# to the Run 3 section\n# ***********************************************************************************\n\nfrom TrigEDMConfig.TriggerEDMRun1 import TriggerL2List,TriggerEFList,TriggerResultsRun1List\nfrom TrigEDMConfig.TriggerEDMRun2 import TriggerResultsList,TriggerLvl1List,TriggerIDTruth,TriggerHLTList,EDMDetails,EDMLibraries,TriggerL2EvolutionList,TriggerEFEvolutionList\nfrom TrigEDMConfig.TriggerEDMRun3 import TriggerHLTListRun3, AllowedOutputFormats\n\nfrom AthenaCommon.Logging import logging\nlog = logging.getLogger('TriggerEDM')\n\nimport six\n\n#************************************************************\n#\n# For Run 3\n#\n#************************************************************\n\ndef getTriggerEDMList(key, runVersion):\n \"\"\"\n List (Literally Python dict) of trigger objects to be placed with flags:\n key can be\" 'ESD', 'AODSLIM', 'AODFULL', 'DS'\n additionally for Run 3 the key can be 'AODSMALL' and 'AODLARGE'\n run can be: '1 (Run1)', '2 (Run2)', '3' (Run 3)\n \"\"\"\n if runVersion == 2:\n if 'SLIM' in key:\n return getTriggerEDMSlimList(key)\n else:\n return getTriggerObjList(key,[TriggerHLTList, TriggerResultsList])\n\n elif runVersion ==3:\n if key in AllowedOutputFormats: # AllowedOutputFormats is the entire list of output formats including ESD\n #if 'SLIM' in key or 'SMALL' in key or 'LARGE' in key : #keeping for refernece/potential revert\n # this keeps only the dynamic variables that have been specificied in TriggerEDMRun3\n return getRun3TrigEDMSlimList(key)\n\n else:\n log.warning('Output format: %s is not in list of allowed formats, please check!', key)\n return getRun3TrigObjList(key, [TriggerHLTListRun3])\n\n else:\n return getTriggerObjList(key,[TriggerL2List,TriggerEFList, TriggerResultsRun1List])\n\n\n\ndef getRun3TrigObjProducedInView(theKey, trigEDMList):\n \"\"\"\n Run 3 only\n Finds a given key from within the trigEDMList.\n Returns true if this collection is produced inside EventViews\n (Hence, has the special viewIndex Aux decoration applied by steering)\n \"\"\"\n import itertools\n for item in itertools.chain(*trigEDMList):\n if len(item) < 4:\n continue\n if theKey not in item[0]:\n continue\n return (\"inViews\" in item[3])\n return False\n\n\ndef handleRun3ViewContainers( el ):\n if 'Aux.' in el:\n # Get equivalent non-aux string (fragile!!!)\n keyNoAux = el.split('.')[0].replace('Aux','')\n # Check if this interface container is produced inside a View\n inView = getRun3TrigObjProducedInView(keyNoAux, [TriggerHLTListRun3])\n if el.split('.')[1] == '':\n # Aux lists zero dynamic vars to save ...\n if inView:\n # ... but it was produced in a View, so we need to add the viewIndex dynamic aux\n return el.split('.')[0]+'.viewIndex'\n else:\n # ... and was not in a View, strip all dynamic\n return el.split('.')[0]+'.-'\n else:\n # Aux lists one or more dynamic vars to save ...\n if inView:\n # ... and was produced in a View, so add the viewIndex dynamic as well\n return el+'.viewIndex'\n else:\n # ... and was not produced in a View, keep user-supplied list\n return el\n else: # no Aux\n return el\n\n\ndef getRun3BSList(keys):\n \"\"\"\n The keys should contain BS and all the identifiers used for scouting\n \"\"\"\n\n from TrigEDMConfig.TriggerEDMRun3 import persistent\n keys = set(keys[:])\n collections = []\n for definition in TriggerHLTListRun3:\n\n typename,collkey = definition[0].split(\"#\")\n # normalise collection name and the key (decorations)\n typename = persistent(typename)\n collkey = handleRun3ViewContainers( collkey )\n destination = keys & set(definition[1].split())\n if len(destination) > 0:\n collections.append( (typename+\"#\"+collkey, list(destination ) ) )\n return collections\n\n\ndef getRun3TrigObjList(destination, trigEDMList):\n \"\"\"\n Run 3 version\n Gives back the Python dictionary with the content of ESD/AOD (dst) which can be inserted in OKS.\n \"\"\"\n dset = set(destination.split())\n from collections import OrderedDict\n toadd = OrderedDict()\n import itertools\n\n for item in itertools.chain(*trigEDMList):\n if item[1] == '': # no output has been defined\n continue\n\n confset = set(item[1].split())\n\n if dset & confset: # intersection of the sets\n t,k = getTypeAndKey(item[0])\n colltype = t\n\n if colltype in toadd:\n if k not in toadd[colltype]:\n toadd[colltype] += [k]\n else:\n toadd[colltype] = [k]\n\n return toadd\n\n\ndef getRun3TrigEDMSlimList(key):\n \"\"\"\n Run 3 version\n Modified EDM list to remove all dynamic variables\n Requires changing the list to have 'Aux.-'\n \"\"\"\n _edmList = getRun3TrigObjList(key,[TriggerHLTListRun3])\n from collections import OrderedDict\n output = OrderedDict()\n for k,v in _edmList.items():\n newnames = []\n for el in v:\n newnames.append( handleRun3ViewContainers( el ) )\n output[k] = newnames\n return output\n\n#************************************************************\n#\n# For Run 1 and Run 2 (not modified (so far))\n#\n#************************************************************\ndef getTriggerEDMSlimList(key):\n \"\"\"\n Run 2 version\n Modified EDM list to remove all dynamic variables\n Requires changing the list to have 'Aux.-'\n \"\"\"\n _edmList = getTriggerObjList(key,[TriggerHLTList, TriggerResultsList])\n output = {}\n for k,v in _edmList.items():\n newnames = []\n for el in v:\n if 'Aux' in el:\n newnames+=[el.split('.')[0]+'.-']\n else:\n newnames+=[el]\n output[k] = newnames\n return output\n\ndef getCategory(s):\n \"\"\" From name of object in AOD/ESD found by checkFileTrigSize.py, return category \"\"\"\n\n \"\"\" Clean up object name \"\"\"\n s = s.strip()\n\n # To-do\n # seperate the first part of the string at the first '_'\n # search in EDMDetails for the key corresponding to the persistent value\n # if a key is found, use this as the first part of the original string\n # put the string back together\n\n if s.count('.') : s = s[:s.index('.')]\n if s.count('::'): s = s[s.index(':')+2:]\n if s.count('<'): s = s[s.index('<')+1:]\n if s.count('>'): s = s[:s.index('>')]\n if s.count('.') : s = s[:s.index('.')]\n if s.count('Dyn') : s = s[:s.index('Dyn')]\n\n # containers from Run 1-2 and 3 require different preprocessing\n # s12 is for Run 1-2, s is for Run 3\n s12 = s\n\n if s12.startswith('HLT_xAOD__') or s12.startswith('HLT_Rec__') or s12.startswith('HLT_Analysis__') :\n s12 = s12[s12.index('__')+2:]\n s12 = s12[s12.index('_')+1:]\n #if s12.count('.') : s12 = s12[:s12.index('.')]\n s12 = \"HLT_\"+s12\n elif s12.startswith('HLT_'):\n #if s.count('Dyn') : s = s[:s.index('Dyn')]\n if s12.count('_'): s12 = s12[s12.index('_')+1:]\n if s12.count('_'): s12 = s12[s12.index('_')+1:]\n s12 = \"HLT_\"+s12\n\n TriggerListRun1 = TriggerL2List + TriggerEFList + TriggerResultsRun1List\n TriggerListRun2 = TriggerResultsList + TriggerLvl1List + TriggerIDTruth + TriggerHLTList\n TriggerListRun3 = TriggerHLTListRun3\n\n category = ''\n bestMatch = ''\n\n \"\"\" Loop over all objects already defined in lists (and hopefully categorized!!) \"\"\"\n for item in TriggerListRun1+TriggerListRun2:\n t,k = getTypeAndKey(item[0])\n\n \"\"\" Clean up type name \"\"\"\n if t.count('::'): t = t[t.index(':')+2:]\n if t.count('<'): t = t[t.index('<')+1:]\n if t.count('>'): t = t[:t.index('>')]\n if (s12.startswith(t) and s12.endswith(k)) and (len(t) > len(bestMatch)):\n bestMatch = t\n category = item[2]\n\n if k.count('.'): k = k[:k.index('.')]\n if (s12 == k):\n bestMatch = k\n category = item[2]\n\n for item in TriggerListRun3:\n t,k = getTypeAndKey(item[0])\n\n \"\"\" Clean up type name \"\"\"\n if t.count('::'): t = t[t.index(':')+2:]\n if t.count('<'): t = t[t.index('<')+1:]\n if t.count('>'): t = t[:t.index('>')]\n\n if (s.startswith(t) and s.endswith(k)) and (len(t) > len(bestMatch)):\n bestMatch = t\n category = item[2]\n\n if k.count('.'): k = k[:k.index('.')]\n if (s == k):\n bestMatch = k\n category = item[2]\n if category == '': return 'NOTFOUND'\n return category\n\n\n\ndef getTypeAndKey(s):\n \"\"\" From the strings containing type and key of trigger EDM extract type and key\n \"\"\"\n return s[:s.index('#')], s[s.index('#')+1:]\n\ndef keyToLabel(key):\n \"\"\" The key is usually HLT_*, this function returns second part of it or empty string\n \"\"\"\n if '_' not in key:\n return ''\n else:\n return key[key.index('_'):].lstrip('_')\n\ndef getTriggerObjList(destination, lst):\n \"\"\"\n Gives back the Python dictionary with the content of ESD/AOD (dst) which can be inserted in OKS.\n \"\"\"\n dset = set(destination.split())\n\n toadd = {}\n import itertools\n\n for item in itertools.chain(*lst):\n if item[1] == '':\n continue\n confset = set(item[1].split())\n if dset & confset: # intersection of the sets\n t,k = getTypeAndKey(item[0])\n colltype = t\n if 'collection' in EDMDetails[t]:\n colltype = EDMDetails[t]['collection']\n if colltype in toadd:\n if k not in toadd[colltype]:\n toadd[colltype] += [k]\n else:\n toadd[colltype] = [k]\n return InsertContainerNameForHLT(toadd)\n\n\ndef getTrigIDTruthList(dst):\n \"\"\"\n Gives back the Python dictionary with the truth trigger content of ESD/AOD (dst) which can be inserted in OKS.\n \"\"\"\n return getTriggerObjList(dst,[TriggerIDTruth])\n\ndef getLvl1ESDList():\n \"\"\"\n Gives back the Python dictionary with the lvl1 trigger result content of ESD which can be inserted in OKS.\n \"\"\"\n return getTriggerObjList('ESD',[TriggerLvl1List])\n\ndef getLvl1AODList():\n \"\"\"\n Gives back the Python dictionary with the lvl1 trigger result content of AOD which can be inserted in OKS.\n \"\"\"\n return getTriggerObjList('AODFULL',[TriggerLvl1List])\n\n\n\ndef getL2PreregistrationList():\n \"\"\"\n List (Literally Python list) of trigger objects to be preregistered i.e. this objects we want in every event for L2\n \"\"\"\n l = []\n for item in TriggerL2List:\n if len (item[1]) == 0: continue\n t,k = getTypeAndKey(item[0])\n if('Aux' in t):\n continue #we don't wat to preregister Aux containers\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getEFPreregistrationList():\n \"\"\"\n List (Literally Python list) of trigger objects to be preregistered i.e. this objects we want in every event for EF\n \"\"\"\n l = []\n for item in TriggerEFList:\n if len (item[1]) == 0: continue\n t,k = getTypeAndKey(item[0])\n if('Aux' in t):\n continue #we don't wat to preregister Aux containers\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getHLTPreregistrationList():\n \"\"\"\n List (Literally Python list) of trigger objects to be preregistered i.e. this objects we want in every event for merged L2/EF in addition to default L2 and EF\n \"\"\"\n l = []\n for item in TriggerHLTList:\n if len (item[1]) == 0: continue\n t,k = getTypeAndKey(item[0])\n if('Aux' in t):\n continue #we don't wat to preregister Aux containers\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\n\ndef getPreregistrationList(version=2):\n \"\"\"\n List (Literally Python list) of trigger objects to be preregistered i.e. this objects we want for all levels\n version can be: '1 (Run1)', '2 (Run2)'\n \"\"\"\n\n l=[]\n if version==2:\n l = getHLTPreregistrationList()\n else:\n l=list(set(getL2PreregistrationList()+getEFPreregistrationList()+getHLTPreregistrationList()))\n return l\n\n\n\ndef getEFDSList():\n \"\"\"\n List (Literally Python list) of trigger objects to be placed in RAW data. i.e. BS after EF\n \"\"\"\n l = []\n for item in TriggerEFList:\n if 'DS' in item[1].split():\n t,k = getTypeAndKey(item[0])\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getHLTDSList():\n \"\"\"\n List (Literally Python list) of trigger objects to be placed in RAW data. i.e. BS after merged L2EF\n \"\"\"\n l = []\n for item in TriggerHLTList:\n if 'DS' in item[1].split():\n t,k = getTypeAndKey(item[0])\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getL2BSList():\n \"\"\"\n List (Literally Python list) of L2 trigger objects to be placed in output BS\n \"\"\"\n l = []\n for item in TriggerL2List:\n if 'BS' in item[1]:\n t,k = getTypeAndKey(item[0])\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getEFBSList():\n \"\"\"\n List (Literally Python list) of EF trigger objects to be placed in output BS\n \"\"\"\n l = []\n for item in TriggerEFList:\n if 'BS' in item[1]:\n t,k = getTypeAndKey(item[0])\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getHLTBSList():\n \"\"\"\n List (Literally Python list) of merged HLT trigger objects to be placed in output BS\n \"\"\"\n l = []\n for item in TriggerHLTList:\n if 'BS' in item[1]:\n t,k = getTypeAndKey(item[0])\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getL2BSTypeList():\n \"\"\" List of L2 types to be read from BS, used by the TP\n \"\"\"\n l = []\n for item in TriggerL2List:\n t,k = getTypeAndKey(item[0])\n ctype = t\n if 'collection' in EDMDetails[t]:\n ctype = EDMDetails[t]['collection']\n l += [ctype]\n return l\n\ndef getEFBSTypeList():\n \"\"\" List of EF types to be read from BS, used by the TP\n \"\"\"\n l = []\n for item in TriggerEFList:\n t,k = getTypeAndKey(item[0])\n ctype = t\n if 'collection' in EDMDetails[t]:\n ctype = EDMDetails[t]['collection']\n l += [ctype]\n return l\n\ndef getHLTBSTypeList():\n \"\"\" List of HLT types to be read from BS, used by the TP\n \"\"\"\n l = []\n for item in TriggerHLTList:\n t,k = getTypeAndKey(item[0])\n ctype = t\n if 'collection' in EDMDetails[t]:\n ctype = EDMDetails[t]['collection']\n l += [ctype]\n return l\n\ndef getEFDSTypeList():\n \"\"\" List of types to be placed in BS after EF\n \"\"\"\n l = []\n for item in TriggerEFList:\n if 'DS' in item[1].split():\n t,k = getTypeAndKey(item[0])\n ctype = t\n if 'collection' in EDMDetails[t]:\n ctype = EDMDetails[t]['collection']\n l += [ctype]\n return l\n\ndef getHLTDSTypeList():\n \"\"\" List of types to be placed in BS after L2EF\n \"\"\"\n l = []\n for item in TriggerHLTList:\n if 'DS' in item[1].split():\n t,k = getTypeAndKey(item[0])\n ctype = t\n if 'collection' in EDMDetails[t]:\n ctype = EDMDetails[t]['collection']\n l += [ctype]\n return l\n\n\ndef getTPList(version=2):\n \"\"\"\n Mapping of Transient objects to Peristent during serialization (BS creation)\n version can be: '1 (Run1)', '2 (Run2)'\n \"\"\"\n l = {}\n if version==2:\n bslist = getHLTBSTypeList()\n else:\n bslist = list(set(getL2BSTypeList() + getEFBSTypeList()))\n \n for t,d in six.iteritems (EDMDetails):\n colltype = t\n if 'collection' in d:\n colltype = EDMDetails[t]['collection']\n if colltype in bslist:\n l[colltype] = d['persistent']\n return l\n\n\n#FPP: how to change this for the merged HLT in view of splitting?\ndef getARATypesRenaming():\n \"\"\"\n Defines how to rename collecion keys in ARA when two types have the same key.\n i.e. TrigRoiDescriptorCollection#HLT\n and TrigTau#HLT\n After the remapping they will be named HLT_tau and HLT_roi so are distinct.\n \"\"\"\n edm = set(TriggerL2List + TriggerEFList + TriggerHLTList)\n keys = [ getTypeAndKey(i[0])[1] for i in edm]\n # find repeating keys\n tmp = [ i for i in keys if keys.count(i) > 1 ]\n nonunique = {}\n for i in tmp:\n nonunique[i] = 1\n # nonunique = nonunique.keys()\n\n # define remens for all object of which key appeared in nonunique\n renames = {}\n for entry in edm:\n t, key = getTypeAndKey(entry[0])\n if key in nonunique: # potential problem we have to do something\n\n if 'typealias' not in EDMDetails[t] or EDMDetails[t]['typealias'] == '':\n if nonunique[key] == 1:\n # First time's ok.\n nonunique[key] = t\n elif nonunique[key] == t:\n # Duplicate entry; ok.\n continue\n else:\n log.error(\"types/keys will catch %s %s\", t, key)\n continue\n else:\n obj = t\n if 'collection' in EDMDetails[t]:\n obj = EDMDetails[t]['collection']\n\n # form the branch name\n bname = key+'_'+EDMDetails[t]['typealias']\n\n renames[(key, obj)] = bname\n\n return renames\n\ndef getEDMLibraries():\n return EDMLibraries\n\ndef InsertContainerNameForHLT(typedict):\n import re\n output = {}\n for k,v in six.iteritems (typedict):\n newnames = []\n for el in v:\n if el.startswith('HLT_') or el == 'HLT':\n prefixAndLabel = el.split('_',1) #only split on first underscore\n containername = k if 'Aux' not in k else EDMDetails[k]['parent'] #we want the type in the Aux SG key to be the parent type #104811\n #maybe this is not needed anymore since we are now versionless with the CLIDs but it's not hurting either\n containername = re.sub('::','__',re.sub('_v[0-9]+$','',containername))\n newnames+=['_'.join([prefixAndLabel[0],containername]+([prefixAndLabel[1]] if len(prefixAndLabel) > 1 else []))]\n else:\n newnames+=[el]\n output[k] = newnames\n return output\n\ndef getEFRun1BSList():\n \"\"\"\n List of EF trigger objects that were written to ByteStream in Run 1\n \"\"\"\n l = []\n for item in TriggerEFEvolutionList:\n if len (item[1]) == 0: continue\n t,k = getTypeAndKey(item[0])\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getEFRun2EquivalentList():\n \"\"\"\n List of Run-2 containers equivalent to Run-1 EF containers\n \"\"\"\n l = []\n for item in TriggerEFEvolutionList:\n if len (item[1]) == 0: continue\n t,k = getTypeAndKey(item[1])\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getL2Run1BSList():\n \"\"\"\n List of L2 trigger objects that were written to ByteStream in Run 1\n \"\"\"\n l = []\n for item in TriggerL2EvolutionList:\n if len (item[1]) == 0: continue\n t,k = getTypeAndKey(item[0])\n l += [t+\"#\"+keyToLabel(k)]\n return l\n\ndef getL2Run2EquivalentList():\n \"\"\"\n List of Run-2 containers equivalent to Run-1 L2 containers\n \"\"\"\n l = []\n for item in TriggerL2EvolutionList:\n if len (item[1]) == 0: continue\n t,k = getTypeAndKey(item[1])\n l += [t+\"#\"+keyToLabel(k)]\n return l\n","sub_path":"Trigger/TriggerCommon/TrigEDMConfig/python/TriggerEDM.py","file_name":"TriggerEDM.py","file_ext":"py","file_size_in_byte":20096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"598283873","text":"import datetime, os, logging\n\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import taskqueue\n\nfrom google.appengine.ext import db\nfrom google.appengine.ext import webapp \nfrom google.appengine.ext.webapp import template\n\nfrom django.utils import simplejson \n\nfrom app.model.accounts import Accounts\nfrom app.model.place import Place\nfrom app.model.places import Places\nfrom app.model.ticket import Ticket\nfrom app.model.tickets import Tickets\n\nfrom app.system import oauth \nfrom app.system.foursquare import Foursquare \nfrom app.webapp.context import Context\n \n# Handlers \n# --------\n\n# Ticket List\nclass TicketsPage(webapp.RequestHandler):\n def get(self):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n \n tickets = Tickets.loadForAccount(context.account)\n template_values = {\n 'context': context,\n 'tickets': tickets,\n }\n self.response.out.write(template.render(context.template('tickets.html'), template_values))\n \n# Ticket List\nclass GetList(webapp.RequestHandler):\n def get(self):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n \n date = self.request.get(\"date\")\n \n dt = None\n if date != \"\":\n dt = datetime.datetime.strptime(date, \"%d-%m-%Y_%H:%M:%S\")\n\n tickets = Tickets.loadForAccount(context.account, dt)\n template_values = {\n 'context': context,\n 'tickets': tickets,\n }\n self.response.out.write(template.render(context.template('tickets-more.html'), template_values))\n \n \n# Ticket Details\n# --------------\n\n# Create a Ticket\nclass TicketHandler(webapp.RequestHandler):\n def get(self, key=\"\"):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n \n def post(self, key=\"\"):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n\n\n if key == 'new':\n ticket = Ticket()\n ticket.account = context.account \n ticket.source = 'web'\n else:\n # Get place and verify if permissions\n ticket = Tickets.loadOne(key)\n if ticket.account.key() != context.account.key():\n self.response.set_status(401)\n self.response.out.write('Permission Denied') \n return\n\n # Update the ticket from the parameters\n params = {}\n for k in self.request.arguments():\n params[k] = self.request.get(k)\n\n # Make sure to fix the Foursquare stuff\n if 'foursquare' in params: \n Foursquare.verifyPlace(context.account, params['foursquare']) \n\n ticket.update(params)\n #ticket.store()\n\n if key == 'new':\n # ticket.queueProcess()\n ticket.process()\n self.redirect('/tickets')\n\n #self.response.out.write('New: ' + str(ticket.key())) \n else:\n self.response.out.write('Update') \n\n def delete(self, key=\"\"):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n\n ticket = Tickets.loadOne(key)\n if ticket.account.key() != context.account.key():\n self.response.set_status(401)\n self.response.out.write('Permission Denied') \n else:\n ticket.delete()\n self.response.out.write('Success') \n \nclass TicketDelete(webapp.RequestHandler):\n def get(self, key=''):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n\n t = Tickets.loadOne(key)\n \n # Need to check the permissions here...\n if t:\n if context.isAdmin:\n t.delete()\n else:\n if t.account:\n if t.account.key() == context.account.key() :\n t.delete() \n\n self.redirect(self.request.referer)\n \n \n# Detail handlers\nclass TicketDetailHandler(webapp.RequestHandler):\n def get(self, key=\"\", topic=\"\"):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n\n ticket = Tickets.loadOne(key)\n if ticket.account.key() != context.account.key():\n self.response.set_status(401)\n self.response.out.write('Permission Denied') \n return\n \n # Deal with the topics\n if topic == \"image\":\n tv = {'context': context, 'ticket': ticket }\n self.response.out.write(template.render(context.template('detail/ticket-image.html'), tv))\n \n elif topic == \"nearby\" and ticket.location:\n v = []\n # Get favorites\n result = Place.proximity_fetch(Place.all().filter('account =', context.account), ticket.location, max_results=8, max_distance=750)\n for r in result:\n v.append({'key': r.key(), 'name': r.name, 'address': r.address, 'city': r.city, 'foursquare': r.foursquare, 'favorite': True})\n \n # get more suggestions if too few results \n count = len(result)\n if count < 8:\n #if context.account.hasConnected('Foursquare'): # Get foursquare results\n# client = oauth.getClient(context.account, 'foursquare', '')\n# url = 'http://api.foursquare.com/v2/venues/search'\n urlparams ={'limit': 8, 'll': str(ticket.location.lat) + ',' + str(ticket.location.lon)} \n\n url = 'http://api.foursquare.com/v2/venues/search?client_id=P0IG5GZ20OEPGUK3Q15VH5BO31D3DR5HHS5PI5GCJX42L5KT&client_secret=QPPL4IOSR5N0JRAG5PWYNWZJVZGYOXXUPEYTJT4UGDRTGJUL&limit=8&ll=' + urlparams['ll']\n\n\n result = urlfetch.fetch(url)\n# result = client.fetch(url, urlparams) \n if result.status_code == 200: \n json = simplejson.loads(result.content)\n for g in json['response']['groups']:\n if g['type'] == 'nearby':\n for r in g['items']:\n # check if not already in the recommendations\n found = False\n for x in v:\n if r['id'] == x['foursquare']:\n found = True\n \n if not found and count < 8:\n address = ''\n if 'address' in r['location']:\n address = r['location']['address']\n\n city = ''\n if 'city' in r['location']:\n city = r['location']['city']\n \n v.append({'key': r['id'], 'name': r['name'], 'address': address, 'city': city, 'foursquare': r['id'], 'favorite': False})\n count = count + 1\n \n # Also do Gowalla??? \n \n \n tv = {'context': context, 'ticket': ticket, 'tips': v }\n self.response.out.write(template.render(context.template('detail/ticket-nearby.html'), tv))\n \n elif topic == \"checkin\":\n s = ''\n\n if ticket.date: # Sometimes tickes have no date, this happens when processing fails\n # Get the most recent checkins\n client = oauth.getClient(context.account, 'foursquare', '')\n url = 'http://api.foursquare.com/v2/users/self/checkins'\n urlparams ={\"limit\": 20} \n tdMin = datetime.timedelta(days=1)\n v_id = None\n \n result = client.fetch(url, urlparams) \n if result.status_code == 200: \n json = simplejson.loads(result.content)\n data = json['response']['checkins']\n \n for c in data['items']:\n # find the nearest (in time) checkin\n dt = datetime.datetime.fromtimestamp(c['createdAt'])\n td = abs(ticket.date - dt)\n \n if td < tdMin and 'venue' in c:\n # keep the venue, until one nearer comes\n v = c['venue']\n \n if 'id' in v:\n v_id = v['id']\n v_name = v['name']\n v_date = context.account.localizeDate(dt).strftime(\"%H:%M\")\n \n tdMin = td\n \n # found nothing or the nearest checkin \n if v_id:\n s = '' + v_name + ' at ' + v_date + ' - use as location'\n \n self.response.out.write(s) \n \n\n \n# Ticket Image\nclass TicketImageHandler(webapp.RequestHandler):\n def get(self, key=\"\"):\n ticket = Tickets.loadOne(key)\n if ticket.file:\n self.response.headers['Content-Type'] = \"image/jpg\"\n self.response.out.write(ticket.file)\n else:\n self.response.out.write(\"No image\")\n\n \n# Ticket system functions\n# -----------------------\n\n# Process a ticket \nclass TicketProcessHandler(webapp.RequestHandler):\n def get(self, key=\"\"):\n ticket = Tickets.loadOne(key) \n ticket.process()\n # Redirect back to the right place\n# s = self.request.referer\n self.redirect(self.request.referer)\n\n def post(self, key=\"\"):\n k = self.request.get('key')\n ticket = Tickets.loadOne(k) \n ticket.process()\n\n# Display raw info of the Ticket\nclass TicketInfoHandler(webapp.RequestHandler):\n def get(self, key=\"\"):\n ticket = Tickets.loadOne(key)\n\n ms = StringIO.StringIO(ticket.file)\n tags = EXIF.process_file(ms)\n\n self.response.out.write(\"\")\n self.response.out.write('')\n self.response.out.write(\"\")\n for tag in sorted(tags.keys()):\n if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'EXIF MakerNote', 'EXIF UserComment'):\n# if tag in ('EXIF DateTimeOriginal', 'Image DateTime', 'Image Orientation', 'GPS GPSLatitude', 'GPS GPSLongitude', 'EXIF ExifImageLength', 'EXIF ExifImageWidth'):\n v = tags[tag]\n self.response.out.write(\"\")\n\n self.response.out.write(\"
\" + tag + \"\" + v.printable + \"
\")\n\n # Check for Geo and create a map\n if ticket.location:\n lat = str(ticket.location.lat)\n lon = str(ticket.location.lon)\n \n self.response.out.write('')\n self.response.out.write('\"Map\"')\n self.response.out.write('')\n\n self.response.out.write(\"\")\n\n\n# Bulk Process\n# ------------\nclass TicketsProcessHandler(webapp.RequestHandler):\n def _process(self):\n action = self.request.get('action')\n if action == \"not-allocated\":\n tickets = Tickets.processNotAllocated() # process all not allocated tickets\n elif action == 'no-arrival-location': \n tickets = Tickets.processNoArrivalLocation()\n\n elif action == 'new-place':\n account = self.request.get('account')\n if account != '':\n tickets = Tickets.processNoPlace(account)\n \n \n def get(self):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n\n self._process()\n self.redirect(self.request.referer)\n\n def post(self):\n self._process()\n\nclass TicketsReportHandler(webapp.RequestHandler):\n def _handler(self):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n\n y = datetime.datetime.now().year\n m = datetime.datetime.now().month\n\n x = self.request.get(\"year\")\n if x != '':\n y = int(x)\n\n x = self.request.get(\"month\")\n if x != '':\n m = int(x)\n\n\n template_values = {\n 'context': context,\n 'year': y,\n 'month': m,\n 'rows': Tickets.asMatrix(context.account, y, m, True)\n }\n self.response.out.write(template.render(context.template('tickets-report.html'), template_values))\n \n def get(self):\n self._handler()\n\n def post(self):\n self._handler()\n\nclass TicketsReportDetailHandler(webapp.RequestHandler):\n def get(self, view=\"\"):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n \n \n if view == 'list':\n t = 'detail/account-report-list.html'\n tv = {'context': context, 'tickets': Tickets.loadForAccount(context.account)}\n \n elif view == 'matrix':\n y = int(self.request.get(\"year\"))\n m = int(self.request.get(\"month\"))\n \n t = 'detail/account-report-matrix.html'\n tv = {'context': context, 'rows': Tickets.asMatrix(context.account, y, m, True)}\n\n self.response.out.write(template.render(context.template(t), tv))\n\n\n# Dashboard\n# ---------\nclass TicketsDashboardHandler(webapp.RequestHandler):\n def get(self, key=\"\"):\n context = Context(self.request)\n if not context.isAuthenticated:\n return context.authenticate(self.response)\n\n if key == 'export':\n # Is user connected to Google?\n hasGoogle = context.account.hasConnected('Google')\n template_values = {\n 'context': context,\n 'hasGoogle': hasGoogle,\n }\n self.response.out.write(template.render(context.template('dashboard/tickets-export.html'), template_values))\n\n","sub_path":"app/webapp/tickets.py","file_name":"tickets.py","file_ext":"py","file_size_in_byte":15042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"582665474","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 11 11:45:07 2020\r\n\r\n@author: voyno\r\n\"\"\"\r\n\r\nimport pandas as pd\r\n\r\n# import preprocessing, fill_nan scripts\r\nfrom preprocessing import preprocess\r\nfrom fill_nans import fill_nans\r\n\r\n\r\ndef run_pipeline(file):\r\n if type(file) is str: # One file\r\n # Read raw data from yfinance api\r\n df = pd.read_csv(file)\r\n \r\n # preprocessing on raw data\r\n df = preprocess(df)\r\n \r\n # removing nan values\r\n df = fill_nans(df)\r\n else: # Multiple files\r\n # Read raw data from yfinance api for first file in list\r\n df = pd.read_csv(file[0])\r\n \r\n # preprocessing on raw data\r\n df = preprocess(df)\r\n \r\n # removing nan values\r\n df = fill_nans(df)\r\n \r\n # looping over the rest of the files in the list\r\n for i in range(1, len(file)):\r\n # Read raw data\r\n next_df = pd.read_csv(file[i])\r\n # preprocess\r\n next_df = preprocess(next_df)\r\n # fill nans\r\n next_df = fill_nans(next_df)\r\n # concatenate to df\r\n # inner join: only shared columns are kept\r\n df = pd.concat([df, next_df], join=\"inner\") \r\n\r\n # now data is prepared to model\r\n return df\r\n","sub_path":"data_pipeline.py","file_name":"data_pipeline.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"116300553","text":"#create manhattan plots with both FDR & bonferroni corrections displayed\r\n#output to pdfResWriter.py\r\nimport pandas as pd, numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n#set params\r\nDATA = 'MLM_tassle_BLB_all.txt' # a Tassel results output file\r\noutdir = '.' #output dir\r\ndpi = 300 #figure dpi\r\ncolors = ['darkred', 'red'] #plot cols\r\nfigsize=(15, 5) #figure dimensions (cm)\r\nfontsize = 18\r\nab = 1 #bootstrap & FDR ablines (0 = no abline)\r\n\r\n#read data\r\ndata = pd.read_csv(DATA, header = 0, sep='\\t')\r\ntraits = data.Trait.unique() #column name of phenotypic data tested against genotypic\r\n\r\ndef makePlot(df, trait, fthr):\r\n df_grouped = df.groupby(('Chr')) #plot grouping\r\n bthr = math.log10(0.05 / df.shape[0]) * -1 #calc bootstrap threshold\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n x_labels = []\r\n x_labels_pos = []\r\n for num, (name, group) in enumerate(df_grouped):\r\n group.plot(kind='scatter', x='ind', y='p', figsize=figsize, color=colors[num % len(colors)], ax=ax, s=12)\r\n x_labels.append(name)\r\n x_labels_pos.append((group['ind'].iloc[-1] - (group['ind'].iloc[-1] - group['ind'].iloc[0])/2))\r\n plt.rcParams['xtick.labelsize'] = fontsize \r\n plt.rcParams['ytick.labelsize'] = fontsize \r\n ax.set_xticks(x_labels_pos)\r\n ax.set_xticklabels(x_labels)\r\n ax.set_xlim([0, len(df)])\r\n ax.set_ylim([0, math.ceil(pd.concat([pd.DataFrame([bthr]), df['p']], axis=0).max() + 1)]) #ylim = val above largest score or threshold + 1\r\n ax.set_xlabel('Chromosome', fontsize = fontsize)\r\n ax.set_ylabel('-log10(p)', fontsize = fontsize)\r\n if ab != 0 and fthr != 0:\r\n plt.axhline(y=bthr, color='k', linestyle='dashed') \r\n plt.axhline(y=fthr, color='k', linestyle='dashdot')\r\n fig.savefig(f\"{outdir}/{trait}.mnhtn_{dpi}dpi.png\", dpi = dpi)\r\n plt.show()\r\n\r\n#evaluate False Dicovery Rate\r\ndef fdr(p):\r\n p_values = np.sort(p)\r\n N = len(p_values)\r\n i = np.arange(1, N+1) # the 1-based i index of the p values, as in p(i)\r\n q = 0.05\r\n\r\n below = p_values < (q * i / N) # True where p(i) 0.00001\n\n for x, y in zip(initial_robot_state.joint_vel, new_robot_state.joint_vel):\n assert ( np.abs(x - y) > 0.00001 or np.abs(x - y) == 0 )\n\ndef test_msj_ros_bridge_proxy_read_state():\n \"\"\"calling the read state function doesn't change the robot state\"\"\" \n\n initial_robot_state = proxy.read_state()\n new_robot_state = proxy.read_state()\n\n assert np.allclose(initial_robot_state.joint_angle, new_robot_state.joint_angle)\n assert np.allclose(initial_robot_state.joint_vel, new_robot_state.joint_vel)","sub_path":"gym-roboy/envs/tests/test_msj_ros_bridge_proxy.py","file_name":"test_msj_ros_bridge_proxy.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"225723764","text":"__author__ = 'Micke'\n\nclass Car(object):\n\n wheels = 4\n\n def __init__(self, make, model):\n self.make = make\n self.model = model\n\n def DriveForward(self):\n print(\"Car drives forward\")\n\n\n\nmustang = Car(\"Ford\", \"Mustang\")\n\nprint (\"This is a \" + mustang.make + \" \" + mustang.model + \" with \" + str(mustang.wheels) + \" wheels.\")\n\nmustang.DriveForward()","sub_path":"PythonPrograms/Car.py","file_name":"Car.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"610140526","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ndefinitions.py\n\nDefinitions file to hold and handle all possible compression,\ndecompression and contents definitions dictionaries for native\nlinux utilities. They are used by the compress.py and contents.py classes.\n\nIf you have other contents defintions,\nplease send them along for inclusion in the main repo.\n\nMaintained in full by:\n Brian Dolbec \n\n\"\"\"\n\nfrom collections import OrderedDict\n\nDEFINITION_FIELDS = OrderedDict([\n (\"func\", str),\n (\"cmd\", str),\n (\"args\", list),\n (\"id\", str),\n (\"extensions\", list)\n ]\n)\n\nDEFINITION_HELP = \"\"\"\nThe definition entries are to follow the the definition_types\nwith the exception of the first entry \"Type\" which is a mode identifier\nfor use in the class as a type ID and printable output string.\n\nDefiniton entries are composed of the following:\n access key: list of definition fields values.\n eg:\n \"tar\": [ <== access key: list of DEFINITION_FIELDS\n \"_common\", <== the class function that runs the external utility\n \"tar\", <== the external utility command\n [\"-cpf\", \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"],\n ^^ a list of the arguments to pass to the utility\n \"TAR\", <== ID string that identifies the utility\n [\"tar\"] <== file extensions list\n ],\n\n\nAvailable named string variables that will be substituted with the passed in\nvalues during run time:\n\"%(filename)s\" filename parameter to pass to the utility\n\"%(basedir)s\" the base source directory where source originates from\n\"%(source)s\" the file or directory being acted upon\n\"%(destination)s\" the destination file or directory\n\"%(arch)s\" the arch filter to pass in ie. Available filters: x86,\n arm, armthumb, powerpc, sparc, ia64\n\"\"\"\n\n\nCOMPRESS_DEFINITIONS = {\n \"Type\": [\"Compression\", \"Compression definitions loaded\"],\n \"rsync\": [\n \"rsync\", \"rsync\",\n [\"-a\", \"--delete\", \"%(source)s\", \"%(destination)s\"],\n \"RSYNC\", None\n ],\n \"lbzip2\": [\n \"_common\", \"tar\",\n [\n \"-I\", \"lbzip2\", \"-cf\", \"%(filename)s\", \"-C\",\n \"%(basedir)s\", \"%(source)s\"\n ],\n \"LBZIP2\", [\"tar.bz2\"]\n ],\n \"lbzip2_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-I\", \"lbzip2\",\n \"-cf\", \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"\n ],\n \"LBZIP2\", [\"tar.bz2\"]\n ],\n \"bzip2\": [\n \"_common\", \"tar\",\n [\"-cpjf\", \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"],\n \"BZIP2\", [\"tar.bz2\"]\n ],\n \"bzip2_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-cpjf\",\n \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\",\n ],\n \"BZIP2\", [\"tar.bz2\"]\n ],\n \"tar\": [\n \"_common\", \"tar\",\n [\"-cpf\", \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"],\n \"TAR\", [\"tar\"]\n ],\n \"tar_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-cpf\",\n \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"\n ],\n \"TAR\", [\"tar\"]\n ],\n \"xz\": [\n \"_common\", \"tar\",\n [\"-cpJf\", \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"],\n \"XZ\", [\"tar.xz\"]\n ],\n \"xz_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-cpJf\",\n \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"\n ],\n \"XZ\", [\"tar.xz\"]\n ],\n \"pixz\": [\n \"_common\", \"tar\",\n [\n \"-I\", \"pixz\", \"-cpf\", \"%(filename)s\", \"-C\", \"%(basedir)s\",\n \"%(source)s\"\n ],\n \"PIXZ\", [\"tar.xz\"]\n ],\n \"pixz_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-I\", \"pixz\", \"-cpf\",\n \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"\n ],\n \"PIXZ\", [\"tar.xz\"]\n ],\n \"gzip\": [\n \"_common\", \"tar\",\n [\"-cpzf\", \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"],\n \"GZIP\", [\"tar.gz\"]\n ],\n \"gzip_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-cpzf\",\n \"%(filename)s\", \"-C\", \"%(basedir)s\", \"%(source)s\"\n ],\n \"GZIP\", [\"tar.gz\"]\n ],\n \"squashfs\": [\n \"_sqfs\", \"mksquashfs\",\n [\n \"%(basedir)s/%(source)s\", \"%(filename)s\", \"-comp\", \"xz\",\n \"-Xbcj\", \"%(arch)s\", \"-b\", \"1M\"\n ],\n \"SQUASHFS\", [\"squashfs\", \"sfs\"]\n ],\n }\n\n\nDECOMPRESS_DEFINITIONS = {\n \"Type\": [\"Decompression\", \"Decompression definitions loaded\"],\n \"rsync\": [\n \"rsync\", \"rsync\",\n [\"-a\", \"--delete\", \"%(source)s\", \"%(destination)s\"],\n \"RSYNC\", None\n ],\n \"lbzip2\": [\n \"_common\", \"tar\",\n [\"-I\", \"lbzip2\", \"-xpf\", \"%(source)s\", \"-C\", \"%(destination)s\"],\n \"LBZIP2\", [\"tar.bz2\", \"bz2\", \"tbz2\"]\n ],\n \"lbzip2_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-I\", \"lbzip2\",\n \"-xpf\", \"%(source)s\", \"-C\", \"%(destination)s\"\n ],\n \"LBZIP2\", [\"tar.bz2\", \"bz2\", \"tbz2\"]\n ],\n \"bzip2\": [\n \"_common\", \"tar\",\n [\"-xpf\", \"%(source)s\", \"-C\", \"%(destination)s\"],\n \"BZIP2\", [\"tar.bz2\", \"bz2\", \"tbz2\"]\n ],\n \"bzip2_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-xpf\", \"%(source)s\",\n \"-C\", \"%(destination)s\"\n ],\n \"BZIP2\", [\"tar.bz2\", \"bz2\", \"tbz2\"]\n ],\n \"tar\": [\n \"_common\", \"tar\",\n [\"-xpf\", \"%(source)s\", \"-C\", \"%(destination)s\"],\n \"TAR\", [\"tar\"]\n ],\n \"tar_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-xpf\", \"%(source)s\",\n \"-C\", \"%(destination)s\"\n ],\n \"TAR\", [\"tar\"]\n ],\n \"xz\": [\n \"_common\", \"tar\",\n [\"-xpf\", \"%(source)s\", \"-C\", \"%(destination)s\"],\n \"XZ\", [\"tar.xz\", \"xz\"]\n ],\n \"xz_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-xpf\", \"%(source)s\",\n \"-C\", \"%(destination)s\"\n ],\n \"XZ\", [\"tar.xz\", \"xz\"]\n ],\n \"pixz\": [\n \"_common\", \"tar\",\n [\"-I\", \"pixz\", \"-xpf\", \"%(source)s\", \"-C\", \"%(destination)s\"],\n \"PIXZ\", [\"tar.xz\", \"xz\"]\n ],\n \"pixz_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-I\", \"pixz\", \"-xpf\",\n \"%(source)s\", \"-C\", \"%(destination)s\"\n ],\n \"PIXZ\", [\"tar.xz\", \"xz\"]\n ],\n \"gzip\": [\n \"_common\", \"tar\",\n [\"-xpzf\", \"%(source)s\", \"-C\", \"%(destination)s\"],\n \"GZIP\", [\"tar.gz\", \"gz\"]\n ],\n \"gzip_x\": [\n \"_common\", \"tar\",\n [\n \"--xattrs\", \"--xattrs-include=security.capability\",\n \"--xattrs-include=user.pax.flags\", \"-xpzf\", \"%(source)s\",\n \"-C\", \"%(destination)s\"\n ],\n \"GZIP\", [\"tar.gz\", \"gz\"]\n ],\n \"squashfs\": [\n \"_common\", \"unsquashfs\",\n [\"-d\", \"%(destination)s\", \"%(basedir)s/%(source)s\"],\n \"SQUASHFS\", [\"squashfs\", \"sfs\"]\n ],\n }\n\n\nDECOMPRESSOR_SEARCH_ORDER = [\n \"pixz\", \"lbzip2\", \"squashfs\", \"gzip\", \"xz\", \"bzip2\", \"tar\"\n]\n\nDECOMPRESSOR_XATTR_SEARCH_ORDER = [\n \"pixz_x\", \"lbzip2_x\", \"squashfs\", \"gzip_x\", \"xz_x\", \"bzip2_x\", \"tar_x\"\n]\n\n\"\"\"Configure this here in case it is ever changed.\nThis is the only edit point required then.\"\"\"\nEXTENSION_SEPARATOR = '.'\n\n\nCONTENTS_DEFINITIONS = {\n \"tar\": [\n \"_common\", \"tar\",\n [\"--xattrs\", \"-tvf\", \"%(source)s\"],\n \"TAR\", [\".tar\"]\n ],\n \"gzip\": [\n \"_common\", \"tar\",\n [\"--xattrs\", \"-tvzf\", \"%(source)s\"],\n \"GZIP\", [\".tgz\", \".tar.gz\", \"gz\"]\n ],\n \"lbzip2\": [\n \"_common\", \"tar\",\n [\"--xattrs\", \"-I\", \"lbzip2\", \"-tvf\", \"%(source)s\"],\n \"LBZIP2\", [\".tbz2\", \"bz2\", \".tar.bz2\"]\n ],\n \"bzip2\": [\n \"_common\", \"tar\",\n [\"--xattrs\", \"-tvf\", \"%(source)s\"],\n \"BZIP2\", [\".tbz2\", \"bz2\", \".tar.bz2\"]\n ],\n \"xz\": [\n \"_common\", \"tar\",\n [\"--xattrs\", \"-tvf\", \"%(source)s\"],\n \"XZ\", [\"tar.xz\", \"xz\"]\n ],\n \"pixz\": [\n \"_common\", \"tar\",\n [\"--xattrs\", \"-I\", \"pixz\", \"-tvf\", \"%(source)s\"],\n \"PIXZ\", [\"tar.xz\", \"xz\"]\n ],\n \"isoinfo_l\": [\n \"_common\", \"isoinfo\",\n [\"-l\", \"-i\", \"%(source)s\"],\n \"ISOINFO\", ['.iso']\n ],\n \"isoinfo_f\": [\n \"_common\", \"isoinfo\",\n [\"-f\", \"-i\", \"%(source)s\"],\n \"ISOINFO\", ['.iso']\n ],\n \"squashfs\": [\n \"_common\", \"unsquashfs\",\n [\n \"-ll\", \"%(source)s\",\n ],\n \"SQUASHFS\", [\"squashfs\", \"sfs\"]\n ],\n}\n\n# isoinfo_f should be a last resort only\nCONTENTS_SEARCH_ORDER = [\n \"pixz\", \"lbzip2\", \"isoinfo_l\", \"squashfs\",\n \"gzip\", \"xz\", \"bzip2\", \"tar\", \"isoinfo_f\"\n]\n","sub_path":"DeComp/definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":11495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"409972943","text":"# -*- coding: utf-8 -*-\n\"\"\"Episode: 7-2.私と両親/こころ\n\"\"\"\n## path\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nsys.path.append('storybuilder')\n## local libs\nfrom storybuilder.builder.world import World\nfrom storybuilder.builder.writer import Writer\n\n\n## define alias\nW = Writer\n_ = W.getWho()\n\n\n## scenes\ndef sc_beout(w: World):\n sana, noto = W(w.sana), W(w.noto)\n return w.scene(\"先生は不在\",\n sana.come(),\n sana.explain(\"翌朝も$azuとは顔を合わすこともなく、何だか気分が落ち込んだ状態で外回りに出た\",\"&\"),\n sana.do(\"この日は気分転換も兼ねて、久しぶりに社用車を使わせてもらって都内を走る\",\n \"免許は大学時代に取っておいて良かったけれど、今にして思えばよくマニュアル免許なんて選んだなと$nirasakiにも笑われた\"),\n sana.do(\"まだ昼には少し早かったが、流石に先生も起きているだろう\",\n \"そんなつもりで車を近くのパーキングに停め、アパートまで足を伸ばす\"),\n sana.talk(\"先生? おはようございます?\"),\n sana.do(\"インタフォンを押すが返答がない\",\n \"けれどこういったことにもこの半年ばかりで随分と慣れた\",\n \"$Sは合鍵を取り出してドアを開けると「お邪魔しますよ、先生」と一声掛けて中に入る\"),\n sana.think(\"まだ寝てるのかな\"),\n sana.do(\"炬燵で寝ていないことを確認すると、奥の寝室をそっと開けてみる\",\n \"中は既にカーテンが開けてあり、起きた後だと分かったが、\",\n \"となると風呂とか近所へ昼食の買い出しとか、そういった可能性を考えるべきだろう\"),\n sana.think(\"このまま待っている、という選択肢もあるけれど、\", \"&\"),\n sana.do(\"と思って何気なく炬燵の天板を見た時にそれに気づいた\"),\n sana.talk(\"え?\"),\n sana.do(\"一枚だけ千切られた原稿用紙が消しゴムで留め置かれていて、そこには先生の手書きの地図付きで、ある店の名が書かれていた\"),\n camera=w.sana,\n stage=w.on_hisapart,\n day=w.in_visitgrave, time=w.at_afternoon,\n )\n\ndef sc_nearrestaurant(w: World):\n sana, noto = W(w.sana), W(w.noto)\n exterior = W(w.exterior)\n return w.scene(\"待ち合わせの洋食屋に向かう路地\",\n w.symbol(\"    ◆\"),\n sana.come(\"先生のアパートからは徒歩で十五分くらいの、閑静な住宅街だった\"),\n sana.do(\"六階に満たないマンションが立ち並び、コンビニやドラッグストアはあるがあまり他の店は見当たらない\",\n \"十字路にあったコンビニで店の場所を聞いてみると、坂を降りたところだと教わり、お礼を言ってレジ脇に置かれていたホットのレモンティーを購入した\"),\n sana.think(\"明日はバレンタインデーだから一応洋菓子があまり好きではない先生にチョコレート以外の甘いものでも買ってこようと思っていたのに、\",\n \"相変わらずの自由さに初対面でのSランク認定が蘇る\"),\n sana.talk(\"あれかな\"),\n sana.do(\"坂の下、右手側に見えてきたのは赤レンガ調の壁に深緑の庇が出た、落ち着いた雰囲気の喫茶店ぽい店だ\"),\n exterior.look(\"#レンガ調の壁に深い緑の庇\"),\n _.look(\"鉄製のドアの前には『OPEN』と木製のプレートが掛かっているが、あまりの静けさに本当に営業中なのだろうかと疑ってしまう\"),\n sana.talk(\"すみません\"),\n sana.do(\"こんな時にスマートフォンを持っていてくれたら、と思いつつ伺うように慎重な声を出すと、ゆっくりドアを押し開けた\"),\n stage=w.on_street,\n time=w.at_noon,\n )\n\ndef sc_waiting(w: World):\n sana, noto = W(w.sana), W(w.noto)\n chiyo = W(w.chiyoda)\n return w.scene(\"待ち合わせ\",\n sana.come(\"先生の書き置きにあったので、いつもの喫茶店にやってくる\"),\n noto.be(\"ご飯を食べ終えて、まったりと座っている\"),\n chiyo.be(\"$notoに食後のコーヒーを出している\"),\n chiyo.talk(\"あら、$sanaちゃん。いらっしゃい\"),\n noto.talk(\"やあ\"),\n noto.do(\"モーニングを食べ終えてお腹を擦っている\"),\n sana.talk(\"よく$meが来るって分かりましたね\"),\n noto.talk(\"もう二ヶ月以上の付き合いだからね。そろそろ寝癖まで分かるようになる\"),\n sana.talk(\"え!\"),\n noto.talk(\"あはは。冗談だよ\"),\n noto.talk(\"それより、少し車を出してもらいたくてね\"),\n sana.talk(\"はい。いいですよ\"),\n noto.do(\"ゆっくりとコーヒーを飲む\"),\n sana.talk(\"じゃあ、一旦会社に戻って取ってきます\"),\n sana.go(\"出ていく\"),\n stage=w.on_mastercafe,\n ).omit()\n\ndef sc_restaurant(w: World):\n sana, noto = W(w.sana), W(w.noto)\n hana = W(w.hanamaki)\n return w.scene(\"先生と洋食屋\",\n sana.come(\"#入ってくる\"),\n sana.hear(\"ドアベルが鳴り、店主らしき女性の声が答える\"),\n noto.be(\"#既に座っている\"),\n hana.be(\"#カウンターの掃除をしている\"),\n hana.talk(\"はぁーい、いらっしゃいませ\"),\n noto.talk(\"おう、$sana君\", \"来たね\"),\n sana.talk(\"先生!\"),\n noto.do(\"一番奥まったテーブル席で優雅に新聞を読んでいた先生は$Sに気づくと、右手を挙げてにこやかに応える\"),\n hana.talk(\"あら、先生のお知り合いですか\"),\n sana.talk(\"はい\", \"昨年秋から担当編集をしてます、$ln_sanaと言います\"),\n hana.look(\"目尻に皺が、前髪に白髪が目立つその女性は、紺のセーターの上にかすみ草みたいな柄のエプロンを付けていた\", \"&\"),\n hana.do(\"挨拶をした$CSににこやかに会釈をし、奥の席へと案内してくれる\"),\n noto.talk(\"特別な日にはね、ここに来ることにしているんだ\"),\n sana.talk(\"もし$meが今日あの書き置きを見なかったらどうするつもりだったんですか?\",\n \"行き違いになったり、待ちぼうけになったりしたら困るでしょう?\"),\n noto.talk(\"困るね\"),\n sana.talk(\"だからこういう時の為にもせめて携帯電話をですね\"),\n sana.do(\"席に腰を下ろした$Sの前に店主が水を運んできたけれど、注文は先生が済ませてしまい「同じものでいいね?」と言うので言葉を仕舞って頷いた\"),\n noto.talk(\"困ってはいけないかな?\"),\n sana.talk(\"また禅問答ですか?\"),\n noto.talk(\"いや、人生というのはね、計画通りにはいかないし予定通りにもいかないし、常にアクシデントが待ち構えているようなものだ\",\n \"何が起こるか分からない\",\n \"その『分からない』ことを書くのが小説だとは思わないかね?\"),\n sana.talk(\"だから困ってもいい、と?\"),\n noto.talk(\"良いか悪いか、という話ではないよ\",\n \"ただ、困らないように困らないようにと生きていくのは、どうにも性に合わなくてね\"),\n noto.do(\"そう言って先生はやや困ったように人差し指で額を掻いた\"),\n sana.think(\"相変わらず掴みどころのない人だ、とは思う\",\n \"けれど先生の言葉の端々にはキラリと光る不思議な玉が隠されていて、それを見つけた時に$Sは、ちょっぴり嬉しくなるのだ\",\n \"ああ、やはり先生は言葉の国の人なのだと感じるから\"),\n noto.talk(\"そうだった\", \"今日は少し頼みたいことがある\"),\n sana.talk(\"なんですか?\", \"脱ぐ以外ならいいですよ?\"),\n noto.talk(\"まだあの件を根に持っているのかい?\",\n \"ちょっとした冗談のつもり、というのは通じない世界に変わってきたねえ\"),\n sana.talk(\"あれが冗談で済まされるならセクハラって言葉は生まれてません\"),\n noto.do(\"あまりに強い目で$CSが睨んだからか、先生は恐縮して頭を下げる\"),\n noto.talk(\"それはそれとして、今日は車を出してもらいたいんだ\", \"遠くではないけれど、歩くには少し距離があってね\"),\n sana.talk(\"ええ、いいですよ?\"),\n sana.explain(\"それから料理が来るまで、最近読んだ小説の話や長編小説を書く時の疑問点、先生の原稿の進捗や$Sのプロットの状況などを話した\",\n \"先生はいつもと変わらない受け答えで、特に小説の書き方について今日は人物描写のことを教えてもらったのだけれど、その流れで$Sは$azuのことを口に出してしまった\"),\n noto.talk(\"友人が一次選考で落ちたことが、とてもショックだったと?\"),\n sana.talk(\"それが、応募した先がうちの新人賞だったんですよ\"),\n hana.talk(\"お待たせしました\",\n \"和風ハンバーグとけんちん汁のセットをお二つです\"),\n sana.explain(\"先生の掌くらい大きなハンバーグに大根おろしと和風のタレが掛かっている\",\n \"レタスにトマトやハムを合わせたサラダの脇に置かれた汁椀には、$Sが思う野菜だらけのけんちん汁ではなく、\",\n \"汁の上にやや固いおぼろ豆腐を煮崩したようなものがたっぷり乗せられていた\"),\n noto.talk(\"洋食のお店なんだけどね、ちょっと田舎の母親を思い出す味付けがされていて、定期的に通いたくなるんだ\"),\n sana.do(\"どんなものだろうと、いただきますを言って、まず未体験のけんちん汁から味わってみる\"),\n sana.talk(\"これ、素敵です!\"),\n noto.talk(\"だろう?\"),\n noto.do(\"先生は声を上げた$CSを見て嬉しそうだ\"),\n sana.think(\"豆腐はしっかり水切りした後に油で炒めてあるみたいで、鶏の脂が滲んだ出汁と絡んで口の中でほろほろと崩れる\",\n \"大きくカットされたゴボウに人参、大根も柔らかくて美味しい\"),\n sana.talk(\"$azuにも教えてあげよう\"),\n sana.think(\"そう言ってしまってから、彼女の件を再び思い出す\"),\n noto.talk(\"先程の友人の件だけれど、\",\n \"小説を書いている時はね、自分自身とその物語だけでいいんだ\",\n \"けどいざその作品を公募に出したり、出版したりということになると、今まで自分の手の内にあったその子を全くの赤の他人が評価をすることになる\",\n \"彼女はおそらく、まだちゃんと知らない誰かに読まれるという経験をしたことがなかったのだろう\",\n \"もっと言えば自分が信じた作品を誰かに否定されるという感覚を味わったことがなかったのだろうね\"),\n sana.talk(\"そうだと、思います\"),\n sana.explain(\"もっと言えばその否定をしたのは、友人である$Sだと思い込んでいるのだろう\"),\n sana.do(\"口の中に入れたハンバーグの肉汁の旨味が胃袋に落ちていくけれど、今日は少し胸焼けしそうな心地だった\"),\n stage=w.on_masterrestaurant,\n )\n\n\ndef sc_inthecar(w: World):\n sana, noto = W(w.sana), W(w.noto)\n return w.scene(\"車の中で\",\n sana.talk(\"もっと頼ってもらっていいんですよ\"),\n noto.talk(\"以前のように担当が一人の作家だけ抱えていればいいという時代ではないからね\"),\n noto.talk(\"時々眠そうにしているのを知っているよ\"),\n sana.talk(\"すみません\"),\n noto.talk(\"長編を書き上げる前にね、どうしても行っておきたかったんだ\"),\n sana.talk(\"どこですか?\"),\n noto.talk(\"大切な人の墓参りだ\"),\n w.eventPoint(\"十三日に不在の謎\", \"墓参りに行っていた\"),\n stage=w.on_car,\n ).omit()\n\n## episode\ndef ep_parents_and_me(w: World):\n return w.episode(\"2.中 先生と私\",\n ## NOTE\n sc_beout(w),\n sc_nearrestaurant(w),\n sc_waiting(w),\n sc_restaurant(w),\n sc_inthecar(w),\n )\n","sub_path":"src/kokoro/e2_parentsandme.py","file_name":"e2_parentsandme.py","file_ext":"py","file_size_in_byte":13736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"230399690","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 11 17:20:13 2018\n\n@author: narendra\n\"\"\"\n\n\ndef product(numbers):\n prod = 1\n for num in numbers:\n prod = prod*num\n return prod\n \nprint(product([4,6,2,6,87,34]))","sub_path":"Pyhton tutorial /129Loop_product.py","file_name":"129Loop_product.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"594269475","text":"from threading import Thread\nimport logging\nimport requests\n\nclass RequestThread(Thread):\n def __init__(self, id, url):\n Thread.__init__(self)\n\n self.id = id + 1\n self.name = 'RequestThread'\n self.url = url\n self.logger = logging.getLogger()\n\n self.logger.info(f'Creating thread {self.name} with id {self.id}')\n \n def run(self):\n self.logger.info(f'Starting thread {self.id}')\n\n response = requests.get(self.url)\n\n self.logger.info(f'Finished thread {self.id} with response status {response.status_code}')\n \n @staticmethod\n def create_many(quantity, url):\n return list(map(lambda i: RequestThread(i, url), range(quantity)))\n \n @staticmethod\n def start_many(threads):\n for thread in threads:\n thread.start()","sub_path":"bot/thread/request_thread.py","file_name":"request_thread.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"226845428","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nN = 68\n\n\n\ndemand = [50,\n272,\n1205.5,\n3210.25,\n2684,\n1712.25,\n1046.25,\n587.75]\n\n\nX = [2,4,6,8,10,12,14,16]\n\n\n#plot\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.set_xlabel('weeks')\nax.set_ylabel('#patients')\n\nplt.plot(X, demand, 'bs-', label='demand')\n\n#plt.xlim( 0, 15 )\n\nplt.legend()\n\nplt.savefig('plot_demand')\n\n\n\n","sub_path":"figs/Incremental_results/old_2weeks/Liberia/kmed/plot_demand.py","file_name":"plot_demand.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"414957331","text":"import logging\nimport traceback\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\n\n\nclass Notify():\n\n def __init__(self):\n self.logger = logging.getLogger('django')\n\n def send_email(self, subject, body, to, copy_to_adms=True, html=True):\n \"\"\"\n Send an email message.\n :param subject: The email subject.\n :param body: The email body. It can be a simple text or an HTML.\n :param to: list([]) The email recipient. I can be a string, for a single email, or a list, for multiple ones.\n :param copy_to_adms: Boolean: Option to send a copy to administrators.\n \"\"\"\n\n self.logger.debug(\"Sending mail notification.\")\n\n try:\n from_email = settings.EMAIL_NOTIFICATION\n except Exception:\n raise Exception(\n \"The EMAIL_NOTIFICATION variable is not configured in settings.\")\n\n self.logger.debug(\"FROM: %s\" % from_email)\n\n # Se o parametro to nao for uma lista corverter para lista.\n if not isinstance(to, list):\n to = list([to])\n\n if copy_to_adms:\n try:\n copy_to = settings.EMAIL_NOTIFICATION_COPY_TO\n to = to + copy_to\n except Exception:\n raise Exception(\n \"The EMAIL_NOTIFICATION_COPY_TO variable is not configured in settings.\")\n\n self.logger.debug(\"TO: %s\" % to)\n\n # Subject\n subject = (\"LIneA Minicurso - %s\" % (subject))\n\n self.logger.debug(\"SUBJECT: %s\" % subject)\n\n try:\n msg = EmailMessage(\n subject=subject,\n body=body,\n from_email=from_email,\n to=to,\n )\n\n if html is True:\n msg.content_subtype = \"html\"\n\n msg.send(fail_silently=False)\n\n except Exception as e:\n trace = traceback.format_exc()\n self.logger.error(trace)\n self.logger.error(e)\n\n def send_html_email(self, subject, to, template, context, copy_to_adms=True):\n\n context.update({\n \"application_name\": settings.APPLICATION_NAME,\n \"host_url\": settings.HOST_URL\n })\n\n body = render_to_string(template, context)\n\n self.send_email(subject, body, to)\n","sub_path":"backend/common/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"644560578","text":"#en los programas de selenium desde python\nimport time\n\nfrom selenium import webdriver #navegador\nfrom selenium.webdriver.firefox.options import Options\n\n#config del driver que en este caso el navegador es firefox\n\n\nopciones=Options()\nopciones.headless = True #hacer el trabajo sin ver pantalla del ordenador\n\nnavegador= webdriver.Firefox(executable_path='./drivers/geckodriver', options=opciones)\nnavegador.set_window_position(0,0)\nnavegador.set_window_size(800,500)\n#abrir navegador en una ruta\nnavegador.get('https://google.es')\n##para tener tiempo para ver 5 segundos\n\n#identificar elementos y actuacion\nnavegador.find_element_by_xpath(\"//input[@name='q'\").send_keys('Sevilla')\n\ntime.sleep(2)\n#navegador.find_element_by_xpath(\"//input[@name='btnK'\").click()\ntime.sleep(3)\nestadisticas=navegador.find_element_by_xpath(\"//div[@id='resul-stats']\").text\nprint(estadisticas)\n#cerrar navegador\nnavegador.quit()\n","sub_path":"python/selenium1.py","file_name":"selenium1.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"59050177","text":"import sys\r\nfin = sys.stdin\r\nfout = sys.stdout\r\n\r\ndirections = []\r\ncoordinates = []\r\nnorth = []\r\neast = []\r\nwho = []\r\nwhen = []\r\nlog = []\r\n\r\nn = int(fin.readline())\r\n\r\nfor i in range(n):\r\n line = fin.readline().split()\r\n direct = line[0]\r\n directions.append(direct)\r\n x = int(line[1])\r\n y = int(line[2])\r\n coord = [x, y]\r\n coordinates.append(coord)\r\n if direct == 'N':\r\n north.append(coord)\r\n elif direct == 'E':\r\n east.append(coord)\r\n\r\nnorth.sort()\r\neast.sort(key=lambda q: (q[1]))\r\n\r\nfor a in range(n):\r\n grass = 10**10\r\n state = directions[a]\r\n position1 = coordinates[a]\r\n x1 = position1[0]\r\n y1 = position1[1]\r\n time = 0\r\n stopper = 0\r\n if state == 'N':\r\n for b in range(len(east)):\r\n position2 = east[b]\r\n x2 = position2[0]\r\n y2 = position2[1]\r\n width = x1-x2\r\n height = y2-y1\r\n if height > 0 and width > 0:\r\n if width < height:\r\n grass = min(grass, height)\r\n if grass == height:\r\n time = height\r\n stopper = coordinates.index(position2)+1\r\n who.append(stopper)\r\n elif state == 'E':\r\n for c in range(len(north)):\r\n position3 = north[c]\r\n x3 = position3[0]\r\n y3 = position3[1]\r\n width = x3-x1\r\n height = y1-y3\r\n if width > 0 and height > 0:\r\n if height < width:\r\n grass = min(grass, width)\r\n if grass == width:\r\n time = width\r\n stopper = coordinates.index(position3)+1\r\n\r\n who.append(stopper)\r\n when.append(time)\r\n log.append(grass)\r\n\r\nfor x in range(n):\r\n member = who[x]\r\n hour1 = when[x]\r\n hour2 = when[member-1]\r\n if (hour1 > hour2) and hour2:\r\n who[x] = 0\r\n when[x] = 0\r\n log[x] = 10**10\r\n\r\n\r\nfor d in range(len(log)):\r\n if log[d] == 10**10:\r\n fout.write('Infinity')\r\n else:\r\n fout.write(str(log[d]))\r\n if d != (len(log)-1):\r\n fout.write('\\n')\r\n\r\nfout.close()\r\n","sub_path":"Bronze/Practice/stuckRut.py","file_name":"stuckRut.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"251544323","text":"import io\n\nfrom app import bcrypt\nfrom flask import render_template, redirect, url_for, request, Response, send_file\nfrom flask_login import login_required, login_user, logout_user\n\nfrom app import app, mongo\nfrom app import login_manager\nfrom app.forms import LoginForm\nfrom app.models import User\nimport csv\nimport pymongo\nfrom werkzeug.security import check_password_hash\nfrom app.utils import upload_data, create_csv, get_filters, build_query\nimport configparser\nfrom bson.objectid import ObjectId\nimport pandas as pd\nimport numpy as np\nfrom app.db import sub_partner_emp , create_csv,show_subs\nimport traceback\npid = ['5d4421722b3dc03043dead1e']\n\n\nconfig = configparser.ConfigParser()\nconfig.read('configmsg.ini')\ndsh = config['DASH']\nMONGO_HOST = dsh['MONGO_HOST']\nMONGO_PORT = int(dsh['MONGO_PORT'])\nmyclient = pymongo.MongoClient(MONGO_HOST,MONGO_PORT,maxPoolSize=10)\nmydb = myclient[(dsh['database_name'])]\n\ndash_usr_tbl=mydb[(dsh['dash_users'])]\n\n\n\n\n\n\n\n \ndff = pd.DataFrame()\napplied_filters=[]\n\ndef get_filtered(app_filters,ddf):\n ddf = (ddf[ddf[list(app_filters.keys())].isin(app_filters).all(1)])\n return(ddf)\n\n\n@app.route('/employee/', methods=[\"GET\"])\n@login_required\ndef employee_view(page):\n global dff\n global applied_filters\n global filters\n print(page)\n \n\n dfss = []\n filters = []\n pids = []\n \n global query_string \n if(page ==1):\n\n \n try:\n dfss.append(sub_partner_emp(pid))\n partners_tbl=mydb['partners'].find({'parent_partner_id': ObjectId('5d4421722b3dc03043dead1e')})\n part = pd.DataFrame(list(partners_tbl))\n pids.append((part['_id'].astype(str)).tolist()) \n dfss.append(sub_partner_emp(pids[0]))\n dfss = (dfss[0]+dfss[1])\n dff = pd.concat(dfss, ignore_index=True)\n except Exception:\n print(traceback.format_exc())\n\n filters = get_filters()\n _, applied_filters = build_query(request.args)\n app_filters = applied_filters\n app_filters = {k: v for k, v in app_filters.items() if v}\n\n print(app_filters)\n if(app_filters):\n dff = get_filtered(app_filters,dff)\n print(app_filters)\n \n \n query_string = request.query_string.decode(\"utf-8\")\n if query_string != \"\":\n query_string = \"?\" + query_string\n items_per_page = 15\n skip=items_per_page * (page - 1)\n limit=items_per_page\n employees = dff.iloc[skip:(skip+15)]\n employee_list = list()\n for i, employee in employees.iterrows():\n employee_list.append(dict(employee))\n else:\n _, applied_filters = build_query(request.args)\n app_filters = applied_filters\n app_filters = {k: v for k, v in app_filters.items() if v}\n\n \n if(app_filters):\n dff = get_filtered(app_filters,dff)\n \n query_string = request.query_string.decode(\"utf-8\")\n if query_string != \"\":\n query_string = \"?\" + query_string\n \n items_per_page = 15\n skip=items_per_page * (page - 1)\n limit=items_per_page\n employees = dff.iloc[skip:(skip+15)]\n employee_list = list()\n for i, employee in employees.iterrows():\n employee_list.append(dict(employee))\n\n \n # applied_filters\n\n \n \n \n\n \n # return render_template(\"employees.html\", emps=employee_list, page=page)\n\n return render_template(\"employees.html\", emps=employee_list, page=page, filters=filters,\napp_filters=applied_filters, query_str='')\n\n@app.route('/employee/upload', methods=[\"POST\"])\n@login_required\ndef upload_employee_data():\n file = request.files[\"csv\"]\n print(file)\n stream = io.StringIO(file.read().decode(\"UTF8\"), newline=None)\n csvreader = csv.reader(stream)\n csvreader = list(csvreader)[1:]\n \n for row in csvreader:\n print(row)\n try:\n is_uploaded = upload_data(csvreader)\n if is_uploaded:\n return Response(status=200, )\n else:\n return Response(status=400)\n except:\n return Response(status=400)\n\n\n@app.route('/employee/download', methods=[\"GET\"])\n@login_required\ndef download_csv():\n file_path, file_name = create_csv()\n return send_file(file_path, attachment_filename=file_name, as_attachment=True)\n\n\n\n@app.route('/employee/change_emp_status', methods=[\"PUT\"])\n@login_required\ndef change_emp_status():\n response = request\n result = mongo.db.Employee.find_one_and_update({\"_id\": ObjectId(response.form[\"emp_id\"])},\n {\"$set\": {\"emp\": response.form[\"value\"]}})\n if result:\n return Response(status=200)\n else:\n return Response(status=500)\n\n\n\n\n\n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"583325450","text":"import json\nimport csv\nimport re \nimport random\n\nleetData = open('leetData.csv', 'r+')\nQcollection = open('Qcollection.csv', 'r+')\nqbank = open(\"questionbank.json\", 'r')\nwriter = csv.writer(leetData)\nQwriter = csv.writer(Qcollection)\ndata = json.load(qbank)\ndic = data.items()\n\nQcol = []\n\ncount = 0\ncount5 = 0\ncount4 = 0\ncount3 = 0\ncount35 = 0\ncount1 = 0\ncount0 = 0\n#remove the examples from the dataset\nfor name, r in dic:\n r[0] = re.sub(\"Example\\s\\d+(.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"Note:(.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"Note that(.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"For example(.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"Example(.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"Follow up:(.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"Suppose(.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"You may assume(.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"Formally, (.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"Answer this question(.|\\s)*$\", '', str(r[0]))\n r[0] = re.sub(\"push(x) --(.|\\s)*$\", '', str(r[0]))\n r[0] = r[0].strip()\n r[0] = r[0].replace('\\n', '')\n Qcol.append(r[0])\n\nQwriter.writerow(Qcol)\nrow = []\nfor name, r in dic:\n if r[1]:\n for qName in r[1]:\n if qName in data:\n row = []\n \n row.append(r[0])\n row.append(data[qName][0])\n row.append(5.0)\n ran = random.random()\n if ran<= 0.05:\n row.append('test')\n elif ran <= 0.25:\n row.append('dev')\n writer.writerow(row)\n count +=1 \n count5+=1\nfor name, r in dic:\n if len(r[2]) >= 3:\n for sname, sr in dic:\n if sname != name and all(item in sr[2] for item in r[2]):\n row = []\n row.append(r[0])\n row.append(sr[0])\n row.append(4.0)\n ran = random.random()\n if ran<= 0.05:\n row.append('test')\n elif ran <= 0.25:\n row.append('dev')\n writer.writerow(row)\n count +=1 \n count4 += 1\n\nfor name, r in dic:\n if len(r[2]) == 2:\n for sname, sr in dic:\n if sname != name and all(item in sr[2] for item in r[2]):\n row = []\n row.append(r[0])\n row.append(sr[0])\n row.append(3.0)\n ran = random.random()\n if ran<= 0.05:\n row.append('test')\n elif ran <= 0.25:\n row.append('dev')\n writer.writerow(row)\n count +=1 \n count3 +=1\n\nfor name, r in dic:\n if len(r[2]) >= 3:\n for sname, sr in dic:\n if sname != name and len(set(r[2]) &set(sr[2])) >= 3:\n row = []\n row.append(r[0])\n row.append(sr[0])\n row.append(3.5)\n ran = random.random()\n if ran<= 0.05:\n row.append('test')\n elif ran <= 0.25:\n row.append('dev')\n writer.writerow(row)\n count +=1 \n count35 += 1\n\nfor name, r in dic:\n if len(r[2]) >= 3:\n for sname, sr in dic:\n if len(sr[2]) >= 3 and sname != name and not(any(item in sr[2] for item in r[2])):\n row = []\n row.append(r[0])\n row.append(sr[0])\n row.append(0.0)\n ran = random.random()\n if ran<= 0.05:\n row.append('test')\n elif ran <= 0.25:\n row.append('dev')\n writer.writerow(row)\n count +=1 \n count0 +=1\n\nfor name, r in dic:\n if len(r[2]) >= 3:\n for sname, sr in dic:\n if len(sr[2]) >= 3 and sname != name and len(set(sr[2]).intersection(set(r[2]))) == 1:\n row = []\n row.append(r[0])\n row.append(sr[0])\n row.append(1.0)\n ran = random.random()\n if ran<= 0.05:\n row.append('test')\n elif ran <= 0.25:\n row.append('dev')\n writer.writerow(row)\n count +=1 \n count1 +=1\n\n#make a 20 percent dev set \n\nleetData.close()\nQcollection.close()\nqbank.close()\n\nprint(\"total: \" + str(count))\nprint(\"score 5: \"+ str(count5))\nprint(\"score 4: \"+ str(count4))\nprint(\"score 3: \"+ str(count3))\nprint(\"score 3.5: \"+ str(count35))\nprint(\"score 1: \"+ str(count1))\nprint(\"score 0: \"+ str(count0))","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509345315","text":"#!/usr/bin/env python\n\n# =============================================================================\n# GLOBAL IMPORTS\n# =============================================================================\nimport os\nimport glob\nimport io\nimport collections\nimport pickle\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nimport scipy.stats\n\n\n## For QQ-Plots and Error Slope Calc\nimport scipy.stats\nimport scipy.integrate\nimport matplotlib.patches as patches\nfrom pylab import rcParams\nfrom operator import itemgetter, attrgetter\n\n\n# =============================================================================\n# CONSTANTS\n# =============================================================================\n\n# Paths to input data.\nLOGP_SUBMISSIONS_DIR_PATH = './logP_predictions'\nEXPERIMENTAL_DATA_FILE_PATH = '../experimental_data/logP_experimental_values.csv'\nUSER_MAP_FILE_PATH = '../predictions/SAMPL6-user-map-logP.csv'\nMETHOD_MAP_FILE_PATH = '../predictions/SAMPL6-logP-method-map.csv'\n\n# =============================================================================\n# STATS FUNCTIONS\n# =============================================================================\n\ndef r2(data):\n x, y = data.T\n slope, intercept, r_value, p_value, stderr = scipy.stats.linregress(x, y)\n return r_value**2\n\n\ndef slope(data):\n x, y = data.T\n slope, intercept, r_value, p_value, stderr = scipy.stats.linregress(x, y)\n return slope\n\n\ndef me(data):\n x, y = data.T\n error = np.array(x) - np.array(y)\n return error.mean()\n\n\ndef mae(data):\n x, y = data.T\n error = np.abs(np.array(x) - np.array(y))\n return error.mean()\n\n\ndef rmse(data):\n x, y = data.T\n error = np.array(x) - np.array(y)\n rmse = np.sqrt((error**2).mean())\n return rmse\n\ndef kendall_tau(data):\n x, y = data.T\n correlation, p_value = scipy.stats.kendalltau(x, y)\n return correlation\n\n\ndef compute_bootstrap_statistics(samples, stats_funcs, percentile=0.95, n_bootstrap_samples=1000):\n \"\"\"Compute bootstrap confidence interval for the given statistics functions.\"\"\"\n # Handle case where only a single function is passed.\n #print(\"SAMPLES:\\n\", samples)\n\n try:\n len(stats_funcs)\n except TypeError:\n stats_funcs = [stats_funcs]\n\n # Compute mean statistics.\n statistics = [stats_func(samples) for stats_func in stats_funcs]\n\n # Generate bootstrap statistics.\n bootstrap_samples_statistics = np.zeros((len(statistics), n_bootstrap_samples))\n for bootstrap_sample_idx in range(n_bootstrap_samples):\n samples_indices = np.random.randint(low=0, high=len(samples), size=len(samples))\n for stats_func_idx, stats_func in enumerate(stats_funcs):\n bootstrap_samples_statistics[stats_func_idx][bootstrap_sample_idx] = stats_func(samples[samples_indices])\n\n # Compute confidence intervals.\n percentile_index = int(np.floor(n_bootstrap_samples * (1 - percentile) / 2)) - 1\n bootstrap_statistics = []\n for stats_func_idx, samples_statistics in enumerate(bootstrap_samples_statistics):\n samples_statistics.sort()\n stat_lower_percentile = samples_statistics[percentile_index]\n stat_higher_percentile = samples_statistics[-percentile_index+1]\n confidence_interval = (stat_lower_percentile, stat_higher_percentile)\n bootstrap_statistics.append([statistics[stats_func_idx], confidence_interval, samples_statistics])\n\n return bootstrap_statistics\n\n# =============================================================================\n# STATS FUNCTIONS FOR QQ-PLOT AND ERROR SLOPE CALCULATION\n#\n# Methods from uncertain_check.py David L. Mobley wrote for the SAMPL4 analysis\n# ===============================================================================\n\ndef normal(y):\n \"\"\"Return unit normal distribution value at specified location.\"\"\"\n return 1. / np.sqrt(2 * np.pi) * np.exp(-y ** 2 / 2.)\n\n\ndef compute_range_table(stepsize=0.001, maxextent=10):\n \"\"\"Compute integrals of the unit normal distribution and return these tabulated.\n Returns:\n --------\n - range: NumPy array giving integration range (x) where integration range runs -x to +x\n - integral: NumPy arrange giving integrals over specified integration range.\n\n Arguments (optional):\n ---------------------\n - stepsize: Step size to advance integration range by each trial. Default: 0.001\n - maxextent: Maximum extent of integration range\n\"\"\"\n # Calculate integration range\n x = np.arange(0, maxextent, stepsize) # Symmetric, so no need to do negative values.\n\n # Calculate distribution at specified x values\n distrib = normal(x)\n\n integral = np.zeros(len(x), float)\n for idx in range(1, len(x)):\n integral[idx] = 2 * scipy.integrate.trapz(distrib[0:idx + 1], x[0:idx + 1]) # Factor of 2 handles symmetry\n\n return x, integral\n\n\ndef get_range(integral, rangetable, integraltable):\n \"\"\"Use rangetable and integral table provided (i.e. from compute_range_table) to find the smallest range of integration for which the integral is greater than the specified value (integral). Return this range as a float.\"\"\"\n\n idx = np.where(integraltable > integral)[0]\n return rangetable[idx[0]]\n\n\n# [DLM]Precompute integral of normal distribution so I can look up integration range which gives desired integral\n# integral_range, integral = compute_range_table()\n\n\ndef fracfound_vs_error(calc, expt, dcalc, dexpt, integral_range, integral):\n \"\"\"\n Takes in calculated and experimental values, their uncertainties as well as\n \"\"\"\n # Fraction of Gaussian distribution we want to compute\n X = np.arange(0, 1.0, 0.01)\n Y = np.zeros(len(X))\n\n for (i, x) in enumerate(X):\n # Determine integration range which gives us this much probability\n rng = get_range(x, integral_range, integral)\n # print x, rng\n\n # Loop over samples and compute fraction of measurements found\n y = 0.\n # for n in range(0, len(DGcalc)):\n # sigma_eff = sqrt( sigma_calc[n]**2 + sigma_expt[n]**2 )\n # absdiff = abs( DGcalc[n] - DGexpt[n])\n # #print absdiff, n, sigma_eff\n # if absdiff < rng * sigma_eff: #If the difference falls within the specified range of sigma values, then this is within the range we're looking at; track it\n # #print \"Incrementing y for n=%s, x = %.2f\" % (n, x)\n # y += 1./len(DGcalc)\n # Rewrite for speed\n sigma_eff = np.sqrt(np.array(dcalc) ** 2 + np.array(dexpt) ** 2)\n absdiff = np.sqrt((np.array(calc) - np.array(expt)) ** 2)\n idx = np.where(absdiff < rng * sigma_eff)[0]\n Y[i] = len(idx) * 1. / len(calc)\n\n # print Y\n # raw_input()\n\n return X, Y\n\n\n# Copied from David L. Mobley's scripts written for SAMPL4 analysis (added calculation uncertainty)\ndef bootstrap_exptnoise(calc1, expt1, exptunc1, returnunc=False):\n \"\"\"Take two datasets (equal length) of calculated and experimental values. Construct new datasets of equal length by picking, with replacement, a set of indices to use from both sets. Return the two new datasets. To take into account experimental uncertainties, random noise is added to the experimental set, distributed according to gaussians with variance taken from the experimental uncertainties. Approach suggested by J. Chodera.\nOptionally, 'returnunc = True', which returns a third value -- experimental uncertainties corresponding to the data points actually used.\"\"\"\n\n # Make everything an array just in case\n calc = np.array(calc1)\n expt = np.array(expt1)\n exptunc = np.array(exptunc1)\n npoints = len(calc)\n\n # Pick random datapoint indices\n idx = np.random.randint(0, npoints,\n npoints) # Create an array consisting of npoints indices, where each index runs from 0 up to npoints.\n\n # Construct initial new datasets\n newcalc = calc[idx]\n newexpt = expt[idx]\n newuncExp = exptunc[idx]\n\n # Add noise to experimental set\n noise = np.random.normal(0.,\n exptunc) # For each data point, draw a random number from a normal distribution centered at 0, with standard devaitions given by exptunc\n newexpt += noise\n\n if not returnunc:\n return newcalc, newexpt\n else:\n return newcalc, newexpt, newuncExp\n\n# Modified from David L. Mobley's scripts written for SAMPL4 analysis (added bootstrapped values to the list of returned values )\ndef getQQdata(calc, expt, dcalc, dexpt, boot_its):\n \"\"\"\n Takes calculated and experimental values and their uncertainties\n\n Parameters\n ----------\n calc: predicted logP value\n expt: experimental logP value\n dcalc: predicted model uncertainty\n dexp: experimental logP SEM\n\n Outputs\n -------\n X: array of x axis values for QQ-plot\n Y: array of y axis values for QQ-plot\n slope: Error Slope (ES) of line fit to QQ-plot\n slopes: Erros Slope (ES) of line fit to QQ-plot of bootstrapped datapoints\n \"\"\"\n integral_range, integral = compute_range_table()\n X, Y = fracfound_vs_error(calc, expt, dcalc, dexpt, integral_range, integral)\n xtemp = X[:, np.newaxis]\n coeff, _, _, _ = np.linalg.lstsq(xtemp, Y)\n slope = coeff[0]\n slopes = []\n for it in range(boot_its):\n n_calc, n_expt, n_dexpt = bootstrap_exptnoise(calc, expt, dexpt, returnunc=True)\n nX, nY = fracfound_vs_error(n_calc, n_expt, dcalc, n_dexpt, integral_range, integral)\n a, _, _, _ = np.linalg.lstsq(xtemp, nY)\n slopes.append(a[0])\n return X, Y, slope, np.array(slopes).std(), slopes\n\n# =============================================================================\n# PLOTTING FUNCTIONS\n# =============================================================================\n\ndef plot_correlation(x, y, data, title=None, color=None, kind='joint', ax=None):\n # Extract only logP values.\n data = data[[x, y]]\n\n # Find extreme values to make axes equal.\n min_limit = np.ceil(min(data.min()) - 1)\n max_limit = np.floor(max(data.max()) + 1)\n axes_limits = np.array([min_limit, max_limit])\n\n if kind == 'joint':\n grid = sns.jointplot(x=x, y=y, data=data,\n kind='reg', joint_kws={'ci': None}, stat_func=None,\n xlim=axes_limits, ylim=axes_limits, color=color)\n ax = grid.ax_joint\n grid.fig.subplots_adjust(top=0.95)\n grid.fig.suptitle(title)\n elif kind == 'reg':\n ax = sns.regplot(x=x, y=y, data=data, color=color, ax=ax)\n ax.set_title(title)\n\n # Add diagonal line.\n ax.plot(axes_limits, axes_limits, ls='--', c='black', alpha=0.8, lw=0.7)\n\n # Add shaded area for 0.5-1 logP error.\n palette = sns.color_palette('BuGn_r')\n ax.fill_between(axes_limits, axes_limits - 0.5, axes_limits + 0.5, alpha=0.2, color=palette[2])\n ax.fill_between(axes_limits, axes_limits - 1, axes_limits + 1, alpha=0.2, color=palette[3])\n\n\ndef plot_correlation_with_SEM(x_lab, y_lab, x_err_lab, y_err_lab, data, title=None, color=None, ax=None):\n # Extract only logP values.\n x_error = data.loc[:, x_err_lab]\n y_error = data.loc[:, y_err_lab]\n x_values = data.loc[:, x_lab]\n y_values = data.loc[:, y_lab]\n data = data[[x_lab, y_lab]]\n\n # Find extreme values to make axes equal.\n min_limit = np.ceil(min(data.min()) - 1)\n max_limit = np.floor(max(data.max()) + 1)\n axes_limits = np.array([min_limit, max_limit])\n\n # Color\n current_palette = sns.color_palette()\n sns_blue = current_palette[0]\n\n # Plot\n plt.figure(figsize=(6, 6))\n grid = sns.regplot(x=x_values, y=y_values, data=data, color=color, ci=None)\n plt.errorbar(x=x_values, y=y_values, xerr=x_error, yerr=y_error, fmt=\"o\", ecolor=sns_blue, capthick='2',\n label='SEM', alpha=0.75)\n plt.axis(\"equal\")\n\n if len(title) > 70:\n plt.title(title[:70]+\"...\")\n else:\n plt.title(title)\n\n # Add diagonal line.\n grid.plot(axes_limits, axes_limits, ls='--', c='black', alpha=0.8, lw=0.7)\n\n # Add shaded area for 0.5-1 logP error.\n palette = sns.color_palette('BuGn_r')\n grid.fill_between(axes_limits, axes_limits - 0.5, axes_limits + 0.5, alpha=0.2, color=palette[2])\n grid.fill_between(axes_limits, axes_limits - 1, axes_limits + 1, alpha=0.2, color=palette[3])\n\n plt.xlim(axes_limits)\n plt.ylim(axes_limits)\n\n\ndef barplot_with_CI_errorbars(df, x_label, y_label, y_lower_label, y_upper_label, figsize=False):\n \"\"\"Creates bar plot of a given dataframe with asymmetric error bars for y axis.\n\n Args:\n df: Pandas Dataframe that should have columns with columnnames specified in other arguments.\n x_label: str, column name of x axis categories\n y_label: str, column name of y axis values\n y_lower_label: str, column name of lower error values of y axis\n y_upper_label: str, column name of upper error values of y axis\n figsize: tuple, size in inches. Default value is False.\n\n \"\"\"\n # Column names for new columns for delta y_err which is calculated as | y_err - y |\n delta_lower_yerr_label = \"$\\Delta$\" + y_lower_label\n delta_upper_yerr_label = \"$\\Delta$\" + y_upper_label\n data = df # Pandas DataFrame\n data.loc[:,delta_lower_yerr_label] = data.loc[:,y_label] - data.loc[:,y_lower_label]\n data.loc[:,delta_upper_yerr_label] = data.loc[:,y_upper_label] - data.loc[:,y_label]\n\n # Color\n current_palette = sns.color_palette()\n sns_color = current_palette[2]\n\n # Plot style\n plt.close()\n plt.style.use([\"seaborn-talk\", \"seaborn-whitegrid\"])\n plt.rcParams['axes.labelsize'] = 20 # 18\n plt.rcParams['xtick.labelsize'] = 14\n plt.rcParams['ytick.labelsize'] = 18 #16\n plt.rcParams['legend.fontsize'] = 16\n plt.rcParams['legend.handlelength'] = 2\n #plt.tight_layout()\n\n # If figsize is specified\n if figsize != False:\n plt.figure(figsize=figsize)\n\n # Plot\n x = range(len(data[y_label]))\n y = data[y_label]\n plt.bar(x, y)\n plt.xticks(x, data[x_label], rotation=90)\n plt.errorbar(x, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),\n fmt=\"none\", ecolor=sns_color, capsize=3, capthick=True)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n\ndef barplot_with_CI_errorbars_colored_by_label(df, x_label, y_label, y_lower_label, y_upper_label, color_label, figsize=False):\n \"\"\"Creates bar plot of a given dataframe with asymmetric error bars for y axis.\n\n Args:\n df: Pandas Dataframe that should have columns with columnnames specified in other arguments.\n x_label: str, column name of x axis categories\n y_label: str, column name of y axis values\n y_lower_label: str, column name of lower error values of y axis\n y_upper_label: str, column name of upper error values of y axis\n color_label: str, column name of label that will determine the color of bars\n figsize: tuple, size in inches. Default value is False.\n\n \"\"\"\n # Column names for new columns for delta y_err which is calculated as | y_err - y |\n delta_lower_yerr_label = \"$\\Delta$\" + y_lower_label\n delta_upper_yerr_label = \"$\\Delta$\" + y_upper_label\n data = df # Pandas DataFrame\n data.loc[:, delta_lower_yerr_label] = data.loc[:, y_label] - data.loc[:, y_lower_label]\n data.loc[:, delta_upper_yerr_label] = data.loc[:, y_upper_label] - data.loc[:, y_label]\n\n # Color\n #current_palette = sns.color_palette()\n #sns_color = current_palette[2] # Error bar color\n\n # Zesty colorblind-friendly color palette\n color0 = \"#0F2080\"\n color1 = \"#F5793A\"\n color2 = \"#A95AA1\"\n color3 = \"#85C0F9\"\n current_palette = [color0, color1, color2, color3]\n error_color = 'gray'\n\n # Bar colors\n if color_label == \"category\":\n category_list = [\"Physical\", \"Empirical\", \"Mixed\", \"Other\"]\n elif color_label == \"reassigned_category\":\n category_list = [\"Physical (MM)\", \"Empirical\", \"Mixed\", \"Physical (QM)\"]\n elif color_label == \"type\":\n category_list = [\"Standard\", \"Reference\"]\n else:\n Exception(\"Error: Unsupported label used for coloring\")\n bar_color_dict = {}\n for i, cat in enumerate(category_list):\n bar_color_dict[cat] = current_palette[i]\n print(\"bar_color_dict:\\n\", bar_color_dict)\n\n\n # Plot style\n plt.close()\n plt.style.use([\"seaborn-talk\", \"seaborn-whitegrid\"])\n plt.rcParams['axes.labelsize'] = 20 # 18\n plt.rcParams['xtick.labelsize'] = 14\n plt.rcParams['ytick.labelsize'] = 18 #16\n plt.rcParams['legend.fontsize'] = 16\n plt.rcParams['legend.handlelength'] = 2\n # plt.tight_layout()\n\n # If figsize is specified\n if figsize != False:\n plt.figure(figsize=figsize)\n\n # Plot\n x = range(len(data[y_label]))\n y = data[y_label]\n #barlist = plt.bar(x, y)\n fig, ax = plt.subplots(figsize=figsize)\n barlist = ax.bar(x, y)\n\n plt.xticks(x, data[x_label], rotation=90)\n plt.errorbar(x, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),\n fmt=\"none\", ecolor=error_color, capsize=3, elinewidth=2, capthick=True)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n # Reset color of bars based on color label\n #print(\"data.columns:\\n\",data.columns)\n #print(\"\\nData:\\n\", data)\n for i, c_label in enumerate(data.loc[:, color_label]):\n barlist[i].set_color(bar_color_dict[c_label])\n\n # create legend\n from matplotlib.lines import Line2D\n if color_label == 'category':\n custom_lines = [Line2D([0], [0], color=bar_color_dict[\"Physical\"], lw=5),\n Line2D([0], [0], color=bar_color_dict[\"Empirical\"], lw=5),\n Line2D([0], [0], color=bar_color_dict[\"Mixed\"], lw=5),\n Line2D([0], [0], color=bar_color_dict[\"Other\"], lw=5)]\n elif color_label == 'reassigned_category':\n custom_lines = [Line2D([0], [0], color=bar_color_dict[\"Physical (MM)\"], lw=5),\n Line2D([0], [0], color=bar_color_dict[\"Empirical\"], lw=5),\n Line2D([0], [0], color=bar_color_dict[\"Mixed\"], lw=5),\n Line2D([0], [0], color=bar_color_dict[\"Physical (QM)\"], lw=5)]\n elif color_label == 'type':\n custom_lines = [Line2D([0], [0], color=bar_color_dict[\"Standard\"], lw=5),\n Line2D([0], [0], color=bar_color_dict[\"Reference\"], lw=5)]\n ax.legend(custom_lines, category_list)\n\n\ndef barplot(df, x_label, y_label, title):\n \"\"\"Creates bar plot of a given dataframe.\n\n Args:\n df: Pandas Dataframe that should have columns with columnnames specified in other arguments.\n x_label: str, column name of x axis categories\n y_label: str, column name of y axis values\n title: str, the title of the plot\n\n \"\"\"\n # Plot style\n plt.close()\n plt.style.use([\"seaborn-talk\", \"seaborn-whitegrid\"])\n plt.rcParams['axes.labelsize'] = 18\n plt.rcParams['xtick.labelsize'] = 14\n plt.rcParams['ytick.labelsize'] = 16\n #plt.tight_layout()\n\n # Plot\n data = df\n x = range(len(data[y_label]))\n y = data[y_label]\n plt.bar(x, y)\n plt.xticks(x, data[x_label], rotation=90)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n if len(title) > 70:\n plt.title(title[:70]+\"...\")\n else:\n plt.title(title)\n plt.tight_layout()\n\n# ============================================================================\n# PLOTTING FUNCTIONS FOR QQ-PLOT\n#\n# Methods from uncertain_check.py David L. Mobley wrote for the SAMPL4 analysis\n# =============================================================================\n\ndef JCAMDdict(w = 1, square = False, fontsize = 8):\n \"\"\"\n This method returns a dictionary with the figure settings for JCMD, including font sizes and markersize defaults. Then you can edit the dictionary once you've called this method.\n w should be 0 to 4 corresponding to [39, 84, 129, 174] mm figure width\n If square is true then the height and width will be equal, otherwise height will be determined as the golden ratio * width\n \"\"\"\n # options for figure width on JCAMD in mm\n widths = [39, 84, 129, 174, 267]\n # Convert width in mm to inches\n wid = widths[w]* 0.0393701 # Convert to inches\n\n # Determine height\n if square:\n height = wid\n else:\n height = wid * (np.sqrt(5.0) - 1.0) / 2.0\n\n parameters = {'backend': 'ps',\n 'axes.labelsize': fontsize,\n 'xtick.labelsize': fontsize,\n 'ytick.labelsize': fontsize,\n 'font.size': fontsize,\n 'xtick.labelsize': fontsize,\n 'ytick.labelsize': fontsize,\n 'figure.figsize': [wid, height],\n 'legend.fontsize': 6,\n 'font.family':'sans-serif',\n 'font.sans-serif':'arial',\n 'lines.markersize': 3,\n 'lines.linewidth': 0.25,\n 'figure.autolayout' : False,\n 'figure.subplot.right': 0.6,\n 'figure.subplot.left': 0.15,\n 'figure.subplot.bottom': 0.2,\n 'figure.subplot.top': 0.85\n }\n\n return parameters\n\ndef makeQQplot(X, Y, slope, title, xLabel =\"Expected fraction within range\" , yLabel =\"Fraction of predictions within range\", fileName = \"QQplot.pdf\", uncLabel = 'Model Unc.', leg = [1.02, 0.98, 2, 1], ax1 = None):\n \"\"\"\n Provided with experimental and calculated values (and their associated uncertainties) in the form of list like objects.\n Provides the analysis to make a QQ-plot using the guassian integral methods David wrote for SAMPL4 that are included above.\n Makes a files of the plot and returns the \"error slope\" as a float and the figure of the created plot\n \"\"\"\n if ax1 == None:\n axReturn = False\n # Get plot parameters for JCAMD\n # plt.rcParams.update(JCAMDdict())\n plt.close()\n plt.style.use([\"seaborn-talk\", \"seaborn-whitegrid\"])\n plt.rcParams['axes.labelsize'] = 18\n plt.rcParams['xtick.labelsize'] = 14\n plt.rcParams['ytick.labelsize'] = 16\n plt.rcParams['figure.figsize'] = 6, 6\n\n # Set up plot\n #fig1 = plt.figure(1, figsize=(6,6))\n #plt.ylim = (0,1)\n #plt.xlim = (0,1)\n #plt.xlabel(xLabel)\n #plt.ylabel(yLabel)\n #plt.title(title, fontsize=20)\n #ax1 = fig1.add_subplot(111)\n\n # New way to plot with subplots\n fig1, ax1 = plt.subplots(1,1)\n ax1.set_xlim(0,1)\n ax1.set_ylim(0,1)\n ax1.set_xlabel(xLabel)\n ax1.set_ylabel(yLabel)\n ax1.set_title(title, fontsize=20)\n\n else:\n axReturn = True\n # Add data to plot\n p1 = ax1.plot(X,Y,'bo', label = uncLabel)\n\n # Add x=y line\n p2 = ax1.plot(X,X,'k-', label = r'$X=Y$')\n\n # X data needs to be a column vector to use linalg.lstsq\n p3 = ax1.plot(X, slope*X, 'r-', label = 'Slope %.2f' % slope)\n\n # Build Legend\n handles = [p1,p2,p3]\n if leg != None:\n ax1.legend(bbox_to_anchor = (leg[0], leg[1]), loc = leg[2], ncol = leg[3], borderaxespad = 0.)\n\n if axReturn:\n return ax1\n else:\n # Adjust spacing then save and close figure\n plt.savefig(fileName)\n plt.close(fig1)\n\n\n# =============================================================================\n# UTILITY CLASSES\n# =============================================================================\n\nclass IgnoredSubmissionError(Exception):\n \"\"\"Exception used to signal a submission that must be ignored.\"\"\"\n pass\n\n\nclass BadFormatError(Exception):\n \"\"\"Exception used to signal a submission with unexpected formatting.\"\"\"\n pass\n\n\nclass SamplSubmission:\n \"\"\"A generic SAMPL submission.\n Parameters\n ----------\n file_path : str\n The path to the submission file.\n Raises\n ------\n IgnoredSubmission\n If the submission ID is among the ignored submissions.\n \"\"\"\n # The D3R challenge IDs that are handled by this class.\n CHALLENGE_IDS = {1559}\n\n # The IDs of the submissions used for testing the validation.\n TEST_SUBMISSIONS = {}\n\n # The IDs of submissions used for reference calculations\n REF_SUBMISSIONS = ['REF01', 'REF02', 'REF03', 'REF04', 'REF05', 'REF06', 'REF07', 'REF08',\n 'REF09', 'REF10', 'REF11', 'REF12', 'REF13', 'NULL0']\n\n\n # Section of the submission file.\n SECTIONS = {}\n\n # Sections in CSV format with columns names.\n CSV_SECTIONS = {}\n\n def __init__(self, file_path, user_map, method_map):\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n print(file_name)\n file_data = file_name.split('-')\n\n # Check if this is a deleted submission.\n if file_data[0] == 'DELETED':\n raise IgnoredSubmissionError('This submission was deleted.')\n\n # Check if this is a test submission.\n self.receipt_id = file_data[0]\n if self.receipt_id in self.TEST_SUBMISSIONS:\n raise IgnoredSubmissionError('This submission has been used for tests.')\n\n # Check if this is a reference submission\n self.reference_submission = False\n if self.receipt_id in self.REF_SUBMISSIONS:\n self.reference_submission = True\n\n # Check this is the correct challenge.\n self.challenge_id = int(file_data[1])\n assert self.challenge_id in self.CHALLENGE_IDS\n\n # Store user map information.\n user_map_record = user_map[user_map.receipt_id == self.receipt_id]\n assert len(user_map_record) == 1\n user_map_record = user_map_record.iloc[0]\n\n self.id = user_map_record.id\n self.participant = user_map_record.firstname + ' ' + user_map_record.lastname\n self.participant_id = user_map_record.uid\n #self.participant_email = user_map_record.email\n #assert self.challenge_id == user_map_record.component\n\n # Store method map information\n method_map_record = method_map[method_map.receipt_id == self.receipt_id]\n method_map_record = method_map_record.iloc[0]\n self.reassigned_category = method_map_record.reassigned_category\n\n @classmethod\n def _read_lines(cls, file_path):\n \"\"\"Generator to read the file and discard blank lines and comments.\"\"\"\n with open(file_path, 'r', encoding='utf-8-sig') as f:\n for line in f:\n # Strip whitespaces.\n line = line.strip()\n # Don't return blank lines and comments.\n if line != '' and line[0] != '#':\n yield line\n\n @classmethod\n def _load_sections(cls, file_path):\n \"\"\"Load the data in the file and separate it by sections.\"\"\"\n sections = {}\n current_section = None\n for line in cls._read_lines(file_path):\n # Check if this is a new section.\n if line[:-1] in cls.SECTIONS:\n current_section = line[:-1]\n else:\n if current_section is None:\n import pdb\n pdb.set_trace()\n try:\n sections[current_section].append(line)\n except KeyError:\n sections[current_section] = [line]\n\n # Check that all the sections have been loaded.\n found_sections = set(sections.keys())\n if found_sections != cls.SECTIONS:\n raise BadFormatError('Missing sections: {}.'.format(found_sections - cls.SECTIONS))\n\n # Create a Pandas dataframe from the CSV format.\n for section_name in cls.CSV_SECTIONS:\n csv_str = io.StringIO('\\n'.join(sections[section_name]))\n columns = cls.CSV_SECTIONS[section_name]\n id_column = columns[0]\n section = pd.read_csv(csv_str, index_col=id_column, names=columns, skipinitialspace=True)\n #section = pd.read_csv(csv_str, names=columns, skipinitialspace=True)\n sections[section_name] = section\n return sections\n\n @classmethod\n def _create_comparison_dataframe(cls, column_name, submission_data, experimental_data):\n \"\"\"Create a single dataframe with submission and experimental data.\"\"\"\n # Filter only the systems IDs in this submissions.\n\n\n experimental_data = experimental_data[experimental_data.index.isin(submission_data.index)] # match by column index\n # Fix the names of the columns for labelling.\n submission_series = submission_data[column_name]\n submission_series.name += ' (calc)'\n experimental_series = experimental_data[column_name]\n experimental_series.name += ' (expt)'\n\n # Concatenate the two columns into a single dataframe.\n return pd.concat([experimental_series, submission_series], axis=1)\n\n# =============================================================================\n# LOGP PREDICTION CHALLENGE\n# =============================================================================\n\nclass logPSubmission(SamplSubmission):\n \"\"\"A submission for logP challenge.\n\n Parameters\n ----------\n file_path : str\n The path to the submission file\n\n Raises\n ------\n IgnoredSubmission\n If the submission ID is among the ignored submissions.\n\n \"\"\"\n\n # The D3R challenge IDs that are handled by this class.\n CHALLANGE_IDS = {1559}\n\n # The IDs of the submissions that will be ignored in the analysis.\n TEST_SUBMISSIONS = {}\n\n # Section of the submission file.\n SECTIONS = {'Predictions', 'Name', 'Software', 'Category', 'Method'}\n\n # Sections in CSV format with columns names.\n CSV_SECTIONS = {'Predictions': (\"Molecule ID\", \"logP mean\", \"logP SEM\", \"logP model uncertainty\")}\n\n\n def __init__(self, file_path, user_map, method_map):\n super().__init__(file_path, user_map, method_map)\n\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n file_data = file_name.split('-')\n\n # Check if this is a type III submission\n self.submission_type = file_data[2]\n assert self.submission_type in ['logP']\n\n self.file_name, self.index = file_data[3:]\n self.index = int(self.index)\n\n # Load predictions.\n sections = self._load_sections(file_path) # From parent-class.\n self.data = sections['Predictions'] # This is a pandas DataFrame.\n self.name = sections['Name'][0]\n self.category = sections['Category'][0] # New section for logP challenge.\n\n def compute_logP_statistics(self, experimental_data, stats_funcs):\n data = self._create_comparison_dataframe('logP mean', self.data, experimental_data)\n\n # Create lists of stats functions to pass to compute_bootstrap_statistics.\n stats_funcs_names, stats_funcs = zip(*stats_funcs.items())\n bootstrap_statistics = compute_bootstrap_statistics(data.as_matrix(), stats_funcs, n_bootstrap_samples=10000) #10000\n\n # Return statistics as dict preserving the order.\n return collections.OrderedDict((stats_funcs_names[i], bootstrap_statistics[i])\n for i in range(len(stats_funcs)))\n\n def compute_logP_model_uncertainty_statistics(self,experimental_data):\n\n # Create a dataframe for data necessary for error slope analysis\n expt_logP_series = experimental_data[\"logP mean\"]\n expt_logP_SEM_series = experimental_data[\"logP SEM\"]\n pred_logP_series = self.data[\"logP mean\"]\n pred_logP_SEM_series = self.data[\"logP SEM\"]\n pred_logP_mod_unc_series = self.data[\"logP model uncertainty\"]\n\n # Concatenate the columns into a single dataframe.\n data_exp = pd.concat([expt_logP_series, expt_logP_SEM_series], axis=1)\n data_exp = data_exp.rename(index=str, columns={\"logP mean\": \"logP mean (expt)\",\n \"logP SEM\": \"logP SEM (expt)\"})\n\n data_mod_unc = pd.concat([data_exp, pred_logP_series, pred_logP_SEM_series, pred_logP_mod_unc_series], axis=1)\n data_mod_unc = data_mod_unc.rename(index=str, columns={\"logP mean (calc)\": \"logP mean (calc)\",\n \"logP SEM\": \"logP SEM (calc)\",\n \"logP model uncertainty\": \"logP model uncertainty\"})\n #print(\"data_mod_unc:\\n\", data_mod_unc)\n\n # Compute QQ-Plot Error Slope (ES)\n calc = data_mod_unc.loc[:, \"logP mean (calc)\"].values\n expt = data_mod_unc.loc[:, \"logP mean (expt)\"].values\n dcalc = data_mod_unc.loc[:, \"logP model uncertainty\"].values\n dexpt = data_mod_unc.loc[:, \"logP SEM (expt)\"].values\n n_bootstrap_samples = 1000 #1000\n\n X, Y, error_slope, error_slope_std, slopes = getQQdata(calc, expt, dcalc, dexpt, boot_its=n_bootstrap_samples)\n #print(X)\n #print(Y)\n #print(\"ES:\", error_slope)\n #print(\"ES std:\", error_slope_std)\n #print(\"Bootstrapped Error Slopes:\", slopes)\n QQplot_data = [X, Y, error_slope]\n\n # Compute 95% confidence intervals of Error Slope\n percentile = 0.95\n percentile_index = int(np.floor(n_bootstrap_samples * (1 - percentile) / 2)) - 1\n\n #for stats_func_idx, samples_statistics in enumerate(bootstrap_samples_statistics):\n samples_statistics = np.asarray(slopes)\n samples_statistics.sort()\n stat_lower_percentile = samples_statistics[percentile_index]\n stat_higher_percentile = samples_statistics[-percentile_index + 1]\n confidence_interval = (stat_lower_percentile, stat_higher_percentile)\n\n model_uncertainty_statistics = [error_slope, confidence_interval, samples_statistics]\n\n\n return model_uncertainty_statistics, QQplot_data\n\n\n# =============================================================================\n# UTILITY FUNCTIONS\n# =============================================================================\n\n\ndef load_submissions(directory_path, user_map, method_map):\n \"\"\"Load submissions from a specified directory using a specified user map.\n Optional argument:\n ref_ids: List specifying submission IDs (alphanumeric, typically) of\n reference submissions which are to be ignored/analyzed separately.\n Returns: submissions\n \"\"\"\n submissions = []\n for file_path in glob.glob(os.path.join(directory_path, '*.csv')):\n try:\n submission = logPSubmission(file_path, user_map, method_map)\n\n except IgnoredSubmissionError:\n continue\n submissions.append(submission)\n return submissions\n\n\n\nclass logPSubmissionCollection:\n \"\"\"A collection of logP submissions.\"\"\"\n\n LOGP_CORRELATION_PLOT_BY_METHOD_PATH_DIR = 'logPCorrelationPlots'\n LOGP_CORRELATION_PLOT_WITH_SEM_BY_METHOD_PATH_DIR = 'logPCorrelationPlotsWithSEM'\n LOGP_CORRELATION_PLOT_BY_LOGP_PATH_DIR = 'error_for_each_logP.pdf'\n ABSOLUTE_ERROR_VS_LOGP_PLOT_PATH_DIR = 'AbsoluteErrorPlots'\n\n\n def __init__(self, submissions, experimental_data, output_directory_path, logP_submission_collection_file_path, ignore_refcalcs = True):\n\n\n # Check if submission collection file already exists.\n if os.path.isfile(logP_submission_collection_file_path):\n print(\"Analysis will be done using the existing logP_submission_collection.csv file.\")\n\n self.data = pd.read_csv(logP_submission_collection_file_path)\n print(\"\\n SubmissionCollection: \\n\")\n print(self.data)\n\n # Populate submission.data dataframes parsing sections of collection file.\n for submission in submissions:\n data = []\n\n receipt_ID = submission.receipt_id\n if submission.reference_submission and ignore_refcalcs:\n continue\n df_collection_of_each_submission = self.data.loc[self.data[\"receipt_id\"] == receipt_ID ]\n\n # Transform into Pandas DataFrame.\n submission.data = pd.DataFrame()\n submission.data[\"logP mean\"] = df_collection_of_each_submission[\"logP (calc)\"]\n submission.data[\"logP SEM\"] = df_collection_of_each_submission[\"logP SEM (calc)\"]\n submission.data[\"Molecule ID\"] = df_collection_of_each_submission[\"Molecule ID\"]\n submission.data[\"logP model uncertainty\"] = df_collection_of_each_submission[\"logP model uncertainty\"]\n\n submission.data.set_index(\"Molecule ID\", inplace=True)\n\n # Transform into Pandas DataFrame.\n self.output_directory_path = output_directory_path\n\n #print(\"submission.data:\\n\", submission.data)\n\n else: # Build collection dataframe from the beginning.\n # Build full logP collection table.\n data = []\n\n # Submissions for logP.\n for submission in submissions:\n if submission.reference_submission and ignore_refcalcs:\n continue\n print(\"submission.data:\\n\", submission.data)\n\n\n for mol_ID, series in submission.data.iterrows():\n #print(\"mol_ID:\", mol_ID)\n #print(\"series:\\n\", series)\n\n #mol_ID = series[1][\"Molecule ID\"]\n\n #pKa_mean_exp = experimental_data.loc[experimental_data[\"pKa ID\"] == pKa_ID, 'pKa mean'].values[0]\n logP_mean_exp = experimental_data.loc[mol_ID, 'logP mean']\n logP_SEM_exp = experimental_data.loc[mol_ID, 'logP SEM']\n\n #pKa_mean_pred = submission.data.loc[submission.data[\"pKa ID\"] == pKa_ID, 'pKa mean'].values[0]\n logP_mean_pred = submission.data.loc[mol_ID, \"logP mean\"]\n logP_SEM_pred = submission.data.loc[mol_ID, \"logP SEM\"]\n logP_model_uncertainty = submission.data.loc[mol_ID, \"logP model uncertainty\"]\n\n data.append({\n 'receipt_id': submission.receipt_id,\n 'participant': submission.participant,\n 'name': submission.name,\n 'category': submission.category,\n 'reassigned category': submission.reassigned_category,\n 'Molecule ID': mol_ID,\n 'logP (calc)': logP_mean_pred,\n 'logP SEM (calc)': logP_SEM_pred,\n 'logP (exp)': logP_mean_exp,\n 'logP SEM (exp)': logP_SEM_exp,\n '$\\Delta$logP error (calc - exp)': logP_mean_pred - logP_mean_exp,\n 'logP model uncertainty': logP_model_uncertainty\n })\n\n # Transform into Pandas DataFrame.\n self.data = pd.DataFrame(data=data)\n self.output_directory_path = output_directory_path\n\n print(\"\\n SubmissionCollection: \\n\")\n print(self.data)\n\n # Create general output directory.\n os.makedirs(self.output_directory_path, exist_ok=True)\n\n # Save collection.data dataframe in a CSV file.\n self.data.to_csv(logP_submission_collection_file_path)\n\n def generate_correlation_plots(self):\n # logP correlation plots.\n output_dir_path = os.path.join(self.output_directory_path,\n self.LOGP_CORRELATION_PLOT_BY_METHOD_PATH_DIR)\n os.makedirs(output_dir_path, exist_ok=True)\n for receipt_id in self.data.receipt_id.unique():\n # Skip NULL0 submission\n if receipt_id == \"NULL0\":\n continue\n\n data = self.data[self.data.receipt_id == receipt_id]\n title = '{} ({})'.format(receipt_id, data.name.unique()[0])\n\n plt.close('all')\n plot_correlation(x='logP (exp)', y='logP (calc)',\n data=data, title=title, kind='joint')\n plt.tight_layout()\n # plt.show()\n output_path = os.path.join(output_dir_path, '{}.pdf'.format(receipt_id))\n plt.savefig(output_path)\n\n def generate_correlation_plots_with_SEM(self):\n # logP correlation plots.\n output_dir_path = os.path.join(self.output_directory_path,\n self.LOGP_CORRELATION_PLOT_WITH_SEM_BY_METHOD_PATH_DIR)\n os.makedirs(output_dir_path, exist_ok=True)\n for receipt_id in self.data.receipt_id.unique():\n\n # Skip NULL0 submission\n if receipt_id == \"NULL0\":\n continue\n\n data = self.data[self.data.receipt_id == receipt_id]\n title = '{} ({})'.format(receipt_id, data.name.unique()[0])\n\n plt.close('all')\n plot_correlation_with_SEM(x_lab='logP (exp)', y_lab='logP (calc)',\n x_err_lab='logP SEM (exp)', y_err_lab='logP SEM (calc)',\n data=data, title=title)\n plt.tight_layout()\n # plt.show()\n output_path = os.path.join(output_dir_path, '{}.pdf'.format(receipt_id))\n plt.savefig(output_path)\n\n def generate_molecules_plot(self):\n # Correlation plot by molecules.\n plt.close('all')\n data_ordered_by_mol_ID = self.data.sort_values([\"Molecule ID\"], ascending=[\"True\"])\n sns.set(rc={'figure.figsize': (8.27,11.7)})\n sns.violinplot(y='Molecule ID', x='$\\Delta$logP error (calc - exp)', data=data_ordered_by_mol_ID,\n inner='point', linewidth=1, width=1.2)\n plt.tight_layout()\n # plt.show()\n plt.savefig(os.path.join(self.output_directory_path, self.LOGP_CORRELATION_PLOT_BY_LOGP_PATH_DIR))\n\n def generate_absolute_error_vs_molecule_ID_plot(self):\n \"\"\"\n For each method a bar plot is generated so that absolute errors of each molecule can be compared.\n \"\"\"\n # Setup output directory\n output_dir_path = os.path.join(self.output_directory_path,\n self.ABSOLUTE_ERROR_VS_LOGP_PLOT_PATH_DIR)\n os.makedirs(output_dir_path, exist_ok=True)\n\n # Calculate absolute errors.\n self.data[\"absolute error\"] = np.NaN\n self.data.loc[:, \"absolute error\"] = np.absolute(self.data.loc[:, \"$\\Delta$logP error (calc - exp)\"])\n\n # Create a separate plot for each submission.\n for receipt_id in self.data.receipt_id.unique():\n data = self.data[self.data.receipt_id == receipt_id]\n title = '{} ({})'.format(receipt_id, data.name.unique()[0])\n\n plt.close('all')\n barplot(df=data, x_label=\"Molecule ID\", y_label=\"absolute error\", title=title)\n output_path = os.path.join(output_dir_path, '{}.pdf'.format(receipt_id))\n plt.savefig(output_path)\n\n\ndef generate_statistics_tables(submissions, stats_funcs, directory_path, file_base_name,\n sort_stat=None, ordering_functions=None,\n latex_header_conversions=None, ignore_refcalcs = True):\n stats_names = list(stats_funcs.keys())\n ci_suffixes = ('', '_lower_bound', '_upper_bound')\n\n # Collect the records for the DataFrames.\n statistics_csv = []\n statistics_latex = []\n statistics_plot = []\n\n # Collect the records for QQ Plot\n # Dictionary of receipt ID: [X, Y, error_slope]\n QQplot_dict = {}\n\n for i, submission in enumerate(submissions):\n receipt_id = submission.receipt_id\n category = submission.category\n reassigned_category = submission.reassigned_category\n\n # Pull submission type\n type = 'Standard'\n if submission.reference_submission:\n type = 'Reference'\n\n # Ignore reference calculation, if applicable\n if submission.reference_submission and ignore_refcalcs:\n continue\n\n print('\\rGenerating bootstrap statistics for submission {} ({}/{})'\n ''.format(receipt_id, i + 1, len(submissions)), end='')\n\n bootstrap_statistics = submission.compute_logP_statistics(experimental_data, stats_funcs)\n\n # Compute error slope\n error_slope_bootstrap_statistics, QQplot_data = submission.compute_logP_model_uncertainty_statistics(experimental_data)\n #print(\"error_slope_bootstrap_statistics:\\n\")\n #print(error_slope_bootstrap_statistics)\n\n # Add data to to QQplot dictionary\n QQplot_dict.update({receipt_id : QQplot_data})\n\n # Add error slope and CI to bootstrap_statistics\n bootstrap_statistics.update({'ES' : error_slope_bootstrap_statistics })\n #print(\"bootstrap_statistics:\\n\", bootstrap_statistics)\n\n # Organize data to construct CSV and PDF versions of statistics tables\n record_csv = {}\n record_latex = {}\n for stats_name, (stats, (lower_bound, upper_bound), bootstrap_samples) in bootstrap_statistics.items():\n # For CSV and JSON we put confidence interval in separate columns.\n for suffix, info in zip(ci_suffixes, [stats, lower_bound, upper_bound]):\n record_csv[stats_name + suffix] = info\n\n # For the PDF, print bootstrap CI in the same column.\n stats_name_latex = latex_header_conversions.get(stats_name, stats_name)\n record_latex[stats_name_latex] = '{:.2f} [{:.2f}, {:.2f}]'.format(stats, lower_bound, upper_bound)\n\n # For the violin plot, we need all the bootstrap statistics series.\n for bootstrap_sample in bootstrap_samples:\n statistics_plot.append(dict(ID=receipt_id, name=submission.name, category=category,\n statistics=stats_name_latex, value=bootstrap_sample))\n\n statistics_csv.append({'ID': receipt_id, 'name': submission.name, 'category': category, 'reassigned_category': reassigned_category, 'type': type, **record_csv})\n escaped_name = submission.name.replace('_', '\\_')\n statistics_latex.append({'ID': receipt_id, 'name': escaped_name, 'category': category, 'reassigned_category': reassigned_category, 'type':type, **record_latex})\n print()\n print(\"statistics_csv:\\n\",statistics_csv)\n print()\n\n\n # Write QQplot_dict to a JSON file for plotting later\n #print(\"QQplot_dict:\\n\", QQplot_dict)\n QQplot_directory_path = os.path.join(output_directory_path, \"QQPlots\")\n os.makedirs(QQplot_directory_path, exist_ok=True)\n QQplot_dict_filename = os.path.join(QQplot_directory_path, 'QQplot_dict.pickle')\n\n with open(QQplot_dict_filename, 'wb') as outfile:\n pickle.dump(QQplot_dict, outfile)\n\n\n # Convert dictionary to Dataframe to create tables/plots easily.\n statistics_csv = pd.DataFrame(statistics_csv)\n statistics_csv.set_index('ID', inplace=True)\n statistics_latex = pd.DataFrame(statistics_latex)\n statistics_plot = pd.DataFrame(statistics_plot)\n\n # Sort by the given statistics.\n if sort_stat is not None:\n statistics_csv.sort_values(by=sort_stat, inplace=True)\n statistics_latex.sort_values(by=latex_header_conversions.get(sort_stat, sort_stat),\n inplace=True)\n\n # Reorder columns that were scrambled by going through a dictionaries.\n stats_names_csv = [name + suffix for name in stats_names for suffix in ci_suffixes]\n #print(\"stats_names_csv:\", stats_names_csv)\n stats_names_latex = [latex_header_conversions.get(name, name) for name in stats_names]\n #print(\"stats_names_latex:\", stats_names_latex)\n statistics_csv = statistics_csv[['name', \"category\", \"reassigned_category\", \"type\"] + stats_names_csv + [\"ES\", \"ES_lower_bound\", \"ES_upper_bound\"] ]\n statistics_latex = statistics_latex[['ID', 'name'] + stats_names_latex + [\"ES\"]] ## Add error slope(ES)\n\n # Create CSV and JSON tables (correct LaTex syntax in column names).\n os.makedirs(directory_path)\n file_base_path = os.path.join(directory_path, file_base_name)\n with open(file_base_path + '.csv', 'w') as f:\n statistics_csv.to_csv(f)\n with open(file_base_path + '.json', 'w') as f:\n statistics_csv.to_json(f, orient='index')\n\n # Create LaTex table.\n latex_directory_path = os.path.join(directory_path, file_base_name + 'LaTex')\n os.makedirs(latex_directory_path, exist_ok=True)\n with open(os.path.join(latex_directory_path, file_base_name + '.tex'), 'w') as f:\n f.write('\\\\documentclass{article}\\n'\n '\\\\usepackage[a4paper,margin=0.005in,tmargin=0.5in,lmargin=0.5in,rmargin=0.5in,landscape]{geometry}\\n'\n '\\\\usepackage{booktabs}\\n'\n '\\\\usepackage{longtable}\\n'\n '\\\\pagenumbering{gobble}\\n'\n '\\\\begin{document}\\n'\n '\\\\begin{center}\\n'\n '\\\\scriptsize\\n')\n statistics_latex.to_latex(f, column_format='|ccccccccc|', escape=False, index=False, longtable=True)\n f.write('\\end{center}\\n'\n '\\nNotes\\n\\n'\n '- RMSE: Root mean square error\\n\\n'\n '- MAE: Mean absolute error\\n\\n'\n '- ME: Mean error\\n\\n'\n '- R2: R-squared, square of Pearson correlation coefficient\\n\\n'\n '- m: slope of the line fit to predicted vs experimental logP values\\n\\n'\n '- $\\\\tau$: Kendall rank correlation coefficient\\n\\n'\n '- ES: error slope calculated from the QQ Plots of model uncertainty predictions\\n\\n'\n '- Mean and 95\\% confidence intervals of RMSE, MAE, ME, R2, and m were calculated by bootstrapping with 10000 samples.\\n\\n'\n '- 95\\% confidence intervals of ES were calculated by bootstrapping with 1000 samples.'\n #'- Some logP predictions were submitted after the submission deadline to be used as a reference method.\\n\\n'\n '\\end{document}\\n')\n\n # Violin plots by statistics across submissions.\n plt.close('all')\n fig, axes = plt.subplots(ncols=len(stats_names), figsize=(12, 0.375 * len(submissions)))\n for ax, stats_name in zip(axes, stats_names):\n stats_name_latex = latex_header_conversions.get(stats_name, stats_name)\n data = statistics_plot[statistics_plot.statistics == stats_name_latex]\n # Plot ordering submission by statistics.\n ordering_function = ordering_functions.get(stats_name, lambda x: x)\n order = sorted(statistics_csv[stats_name].items(), key=lambda x: ordering_function(x[1]))\n order = [receipt_id for receipt_id, value in order]\n sns.violinplot(x='value', y='ID', data=data, ax=ax,\n order=order, palette='PuBuGn_r', linewidth=0.5, width=1)\n ax.set_xlabel(stats_name_latex)\n ax.set_ylabel('')\n sns.set_style(\"whitegrid\")\n plt.tight_layout()\n # plt.show()\n plt.savefig(file_base_path + '_bootstrap_distributions.pdf')\n\n\n\n\ndef generate_performance_comparison_plots(statistics_filename, directory_path, ignore_refcalcs = False):\n # Read statistics table\n statistics_file_path = os.path.join(directory_path, statistics_filename)\n df_statistics = pd.read_csv(statistics_file_path)\n #print(\"\\n df_statistics \\n\", df_statistics)\n\n # RMSE comparison plot\n barplot_with_CI_errorbars(df=df_statistics, x_label=\"ID\", y_label=\"RMSE\", y_lower_label=\"RMSE_lower_bound\",\n y_upper_label=\"RMSE_upper_bound\", figsize=(28,10)) # figsize=(22,10)\n plt.savefig(directory_path + \"/RMSE_vs_method_plot.pdf\")\n\n # RMSE comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics, x_label=\"ID\", y_label=\"RMSE\",\n y_lower_label=\"RMSE_lower_bound\",\n y_upper_label=\"RMSE_upper_bound\", color_label = \"reassigned_category\", figsize=(28,10))\n plt.ylim(0.0, 7.0)\n plt.savefig(directory_path + \"/RMSE_vs_method_plot_colored_by_method_category.pdf\")\n\n # Do same graph with colorizing by reference calculation\n if not ignore_refcalcs:\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics, x_label=\"ID\", y_label=\"RMSE\",\n y_lower_label=\"RMSE_lower_bound\",\n y_upper_label=\"RMSE_upper_bound\", color_label = \"type\", figsize=(28,10))\n plt.ylim(0.0, 7.0)\n plt.savefig(directory_path + \"/RMSE_vs_method_plot_colored_by_type.pdf\")\n\n # MAE comparison plot\n # Reorder based on MAE value\n df_statistics_MAE = df_statistics.sort_values(by=\"MAE\", inplace=False)\n\n barplot_with_CI_errorbars(df=df_statistics_MAE, x_label=\"ID\", y_label=\"MAE\", y_lower_label=\"MAE_lower_bound\",\n y_upper_label=\"MAE_upper_bound\", figsize=(28,10))\n plt.savefig(directory_path + \"/MAE_vs_method_plot.pdf\")\n\n # MAE comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_MAE, x_label=\"ID\", y_label=\"MAE\",\n y_lower_label=\"MAE_lower_bound\",\n y_upper_label=\"MAE_upper_bound\", color_label=\"reassigned_category\",\n figsize=(28, 10))\n plt.ylim(0.0, 7.0)\n plt.savefig(directory_path + \"/MAE_vs_method_plot_colored_by_method_category.pdf\")\n\n # Do same graph with colorizing by reference calculation\n if not ignore_refcalcs:\n # MAE comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_MAE, x_label=\"ID\", y_label=\"MAE\",\n y_lower_label=\"MAE_lower_bound\",\n y_upper_label=\"MAE_upper_bound\", color_label=\"type\",\n figsize=(28, 10))\n plt.ylim(0.0, 7.0)\n plt.savefig(directory_path + \"/MAE_vs_method_plot_colored_by_type.pdf\")\n\n\n # Kendall's Tau comparison plot\n # Reorder based on Kendall's Tau value\n df_statistics_tau = df_statistics.sort_values(by=\"kendall_tau\", inplace=False, ascending=False)\n\n barplot_with_CI_errorbars(df=df_statistics_tau, x_label=\"ID\", y_label=\"kendall_tau\",\n y_lower_label=\"kendall_tau_lower_bound\",\n y_upper_label=\"kendall_tau_upper_bound\", figsize=(28, 10))\n plt.savefig(directory_path + \"/kendalls_tau_vs_method_plot.pdf\")\n\n # Kendall's Tau comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_tau, x_label=\"ID\", y_label=\"kendall_tau\",\n y_lower_label=\"kendall_tau_lower_bound\",\n y_upper_label=\"kendall_tau_upper_bound\", color_label=\"reassigned_category\",\n figsize=(28, 10))\n plt.savefig(directory_path + \"/kendalls_tau_vs_method_plot_colored_by_method_category.pdf\")\n\n\n # Do same graph with colorizing by reference calculation\n if not ignore_refcalcs:\n # MAE comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_tau, x_label=\"ID\", y_label=\"kendall_tau\",\n y_lower_label=\"kendall_tau_lower_bound\",\n y_upper_label=\"kendall_tau_upper_bound\", color_label=\"type\",\n figsize=(28, 10))\n plt.savefig(directory_path + \"/kendalls_tau_vs_method_plot_colored_by_type.pdf\")\n\n\n\n # R-squared comparison plot\n # Reorder based on R-squared\n df_statistics_R2 = df_statistics.sort_values(by=\"R2\", inplace=False, ascending=False)\n\n barplot_with_CI_errorbars(df=df_statistics_R2, x_label=\"ID\", y_label=\"R2\",\n y_lower_label=\"R2_lower_bound\",\n y_upper_label=\"R2_upper_bound\", figsize=(28, 10))\n plt.ylim(0, 1.0)\n plt.savefig(directory_path + \"/Rsquared_vs_method_plot.pdf\")\n\n # R-squared comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_R2, x_label=\"ID\", y_label=\"R2\",\n y_lower_label=\"R2_lower_bound\",\n y_upper_label=\"R2_upper_bound\", color_label=\"reassigned_category\",\n figsize=(28, 10))\n plt.ylim(0, 1.0)\n plt.savefig(directory_path + \"/Rsquared_vs_method_plot_colored_by_method_category.pdf\")\n\n\n # Do same graph with colorizing by reference calculation\n if not ignore_refcalcs:\n # MAE comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_R2, x_label=\"ID\", y_label=\"R2\",\n y_lower_label=\"R2_lower_bound\",\n y_upper_label=\"R2_upper_bound\", color_label=\"type\",\n figsize=(28, 10))\n plt.ylim(0, 1.0)\n plt.savefig(directory_path + \"/Rsquared_vs_method_plot_colored_by_type.pdf\")\n\n\n\n # Plot RMSE, MAE, Kendall's Tau, and R-squared comparison plots for each category separately\n #category_list = [\"Physical\",\"Empirical\", \"Mixed\", \"Other\"]\n category_list = [\"Physical (MM)\", \"Empirical\", \"Mixed\", \"Physical (QM)\"] # Reassigned categories\n\n # New labels for file naming for reassigned categories\n reassigned_category_path_label_dict = {\"Physical (MM)\": \"Physical_MM\",\n \"Empirical\": \"Empirical\",\n \"Mixed\": \"Mixed\",\n \"Physical (QM)\": \"Physical_QM\"}\n\n\n for category in category_list:\n print(\"Reassigned category: \",category)\n #print(\"df_statistics.columns:\\n\", df_statistics.columns)\n\n # Take subsection of dataframe for each category\n df_statistics_1category = df_statistics.loc[df_statistics['reassigned_category'] == category]\n df_statistics_MAE_1category = df_statistics_MAE.loc[df_statistics_MAE['reassigned_category'] == category]\n df_statistics_tau_1category = df_statistics_tau.loc[df_statistics_tau['reassigned_category'] == category]\n df_statistics_R2_1category = df_statistics_R2.loc[df_statistics_R2['reassigned_category'] == category]\n\n # RMSE comparison plot for each category\n barplot_with_CI_errorbars(df=df_statistics_1category, x_label=\"ID\", y_label=\"RMSE\", y_lower_label=\"RMSE_lower_bound\",\n y_upper_label=\"RMSE_upper_bound\", figsize=(12, 10))\n plt.title(\"Method category: {}\".format(category), fontdict={'fontsize': 22})\n plt.ylim(0.0,7.0)\n plt.savefig(directory_path + \"/RMSE_vs_method_plot_for_{}_category.pdf\".format(reassigned_category_path_label_dict[category]))\n\n # MAE comparison plot for each category\n barplot_with_CI_errorbars(df=df_statistics_MAE_1category, x_label=\"ID\", y_label=\"MAE\",\n y_lower_label=\"MAE_lower_bound\",\n y_upper_label=\"MAE_upper_bound\", figsize=(12, 10))\n plt.title(\"Method category: {}\".format(category), fontdict={'fontsize': 22})\n plt.ylim(0.0, 7.0)\n plt.savefig(directory_path + \"/MAE_vs_method_plot_for_{}_category.pdf\".format(reassigned_category_path_label_dict[category]))\n\n # Kendall's Tau comparison plot for each category\n barplot_with_CI_errorbars(df=df_statistics_tau_1category, x_label=\"ID\", y_label=\"kendall_tau\",\n y_lower_label=\"kendall_tau_lower_bound\",\n y_upper_label=\"kendall_tau_upper_bound\", figsize=(12, 10))\n plt.title(\"Method category: {}\".format(category), fontdict={'fontsize': 22})\n plt.savefig(directory_path + \"/kendalls_tau_vs_method_plot_for_{}_category.pdf\".format(reassigned_category_path_label_dict[category]))\n\n # R-squared comparison plot for each category\n barplot_with_CI_errorbars(df=df_statistics_R2_1category, x_label=\"ID\", y_label=\"R2\",\n y_lower_label=\"R2_lower_bound\",\n y_upper_label=\"R2_upper_bound\", figsize=(12, 10))\n plt.title(\"Method category: {}\".format(category), fontdict={'fontsize': 22})\n plt.ylim(0, 1.0)\n plt.savefig(directory_path + \"/Rsquared_vs_method_plot_for_{}_category.pdf\".format(reassigned_category_path_label_dict[category]))\n\n\n # Create plots for Physical methods (both MM and QM methods)\n\n df_statistics_MM = df_statistics.loc[df_statistics['reassigned_category'] == \"Physical (MM)\"]\n df_statistics_QM = df_statistics.loc[df_statistics['reassigned_category'] == \"Physical (QM)\"]\n df_statistics_physical = pd.concat([df_statistics_MM, df_statistics_QM])\n\n # RMSE comparison plot\n # Reorder based on RMSE value\n df_statistics_physical_RMSE = df_statistics_physical.sort_values(by=\"RMSE\", inplace=False)\n\n # RMSE comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_physical_RMSE, x_label=\"ID\", y_label=\"RMSE\",\n y_lower_label=\"RMSE_lower_bound\",\n y_upper_label=\"RMSE_upper_bound\", color_label=\"reassigned_category\",\n figsize=(28, 10))\n plt.ylim(0.0, 5.0)\n plt.savefig(directory_path + \"/RMSE_vs_method_plot_physical_methoods_colored_by_method_category.pdf\")\n\n # Do same graph with colorizing by reference calculation\n if not ignore_refcalcs:\n # RMSE comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_physical_RMSE, x_label=\"ID\", y_label=\"RMSE\",\n y_lower_label=\"RMSE_lower_bound\",\n y_upper_label=\"RMSE_upper_bound\", color_label=\"type\",\n figsize=(28, 10))\n plt.ylim(0.0, 5.0)\n plt.savefig(directory_path + \"/RMSE_vs_method_plot_physical_methods_colored_by_type.pdf\")\n\n # MAE comparison plot\n # Reorder based on MAE value\n df_statistics_physical_MAE = df_statistics_physical.sort_values(by=\"MAE\", inplace=False)\n\n # ME comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_physical_MAE, x_label=\"ID\", y_label=\"MAE\",\n y_lower_label=\"MAE_lower_bound\",\n y_upper_label=\"MAE_upper_bound\", color_label=\"reassigned_category\",\n figsize=(28, 10))\n plt.ylim(0.0, 5.0)\n plt.savefig(directory_path + \"/MAE_vs_method_plot_physical_methoods_colored_by_method_category.pdf\")\n\n # Do same graph with colorizing by reference calculation\n if not ignore_refcalcs:\n # MAE comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_physical_MAE, x_label=\"ID\", y_label=\"MAE\",\n y_lower_label=\"MAE_lower_bound\",\n y_upper_label=\"MAE_upper_bound\", color_label=\"type\",\n figsize=(28, 10))\n plt.ylim(0.0, 5.0)\n plt.savefig(directory_path + \"/MAE_vs_method_plot_physical_methods_colored_by_type.pdf\")\n\n # Kendall's Tau comparison plot\n # Reorder based on Tau value\n df_statistics_physical_tau = df_statistics_physical.sort_values(by=\"kendall_tau\", inplace=False, ascending=False)\n\n # Kendall's Tau comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_physical_tau, x_label=\"ID\", y_label=\"kendall_tau\",\n y_lower_label=\"kendall_tau_lower_bound\",\n y_upper_label=\"kendall_tau_upper_bound\", color_label=\"reassigned_category\",\n figsize=(28, 10))\n plt.savefig(directory_path + \"/kendall_tau_vs_method_plot_physical_methoods_colored_by_method_category.pdf\")\n\n # Do same graph with colorizing by reference calculation\n if not ignore_refcalcs:\n # Kendall's Tau comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_physical_tau, x_label=\"ID\", y_label=\"kendall_tau\",\n y_lower_label=\"kendall_tau_lower_bound\",\n y_upper_label=\"kendall_tau_upper_bound\", color_label=\"type\",\n figsize=(28, 10))\n plt.savefig(directory_path + \"/kendall_tau_vs_method_plot_physical_methods_colored_by_type.pdf\")\n\n\n # R-squared comparison plot\n # Reorder based on R-squared value\n df_statistics_physical_R2 = df_statistics_physical.sort_values(by=\"R2\", inplace=False, ascending=False)\n\n # R-squared comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_physical_R2, x_label=\"ID\", y_label=\"R2\",\n y_lower_label=\"R2_lower_bound\",\n y_upper_label=\"R2_upper_bound\", color_label=\"reassigned_category\",\n figsize=(28, 10))\n plt.ylim(0, 1.0)\n plt.savefig(directory_path + \"/Rsquared_vs_method_plot_physical_methoods_colored_by_method_category.pdf\")\n\n # Do same graph with colorizing by reference calculation\n if not ignore_refcalcs:\n # R-Squared comparison plot with each category colored separately\n barplot_with_CI_errorbars_colored_by_label(df=df_statistics_physical_R2, x_label=\"ID\", y_label=\"R2\",\n y_lower_label=\"R2_lower_bound\",\n y_upper_label=\"R2_upper_bound\", color_label=\"type\",\n figsize=(28, 10))\n plt.ylim(0, 1.0)\n plt.savefig(directory_path + \"/Rsquared_vs_method_plot_physical_methods_colored_by_type.pdf\")\n\n\n\n\ndef generate_QQplots_for_model_uncertainty(input_file_name, directory_path):\n\n # Read QQplot data points from Pickle file\n QQplot_dict_filename = os.path.join(directory_path, input_file_name)\n with open(QQplot_dict_filename, 'rb') as handle:\n QQplot_dict = pickle.load(handle)\n\n # Iterate through dictionary to create QQ Plot for each submission\n for submission_ID, data in QQplot_dict.items():\n X, Y, slope = data\n QQplot_output_filename = os.path.join(directory_path, \"{}_QQ.pdf\".format(submission_ID))\n makeQQplot(X, Y, slope, title=submission_ID, xLabel=\"Expected fraction within range\",\n yLabel=\"Fraction of predictions within range\", fileName=QQplot_output_filename,\n uncLabel='Model Unc.', leg=[0.05, 0.975, \"upper left\", 1], ax1=None)\n # leg=[1.02, 0.98, 2, 1]\n\n # Replot first item of the dictionary to fix style\n #submission_ID = list(QQplot_dict.keys())[0] # first submission ID\n #print(\"Submission ID:\", submission_ID)\n #data = QQplot_dict.get(submission_ID)\n #X, Y, slope = data\n #makeQQplot(X, Y, slope, title=submission_ID, xLabel=\"Expected fraction within range\",\n # yLabel=\"Fraction of predictions within range\", fileName=QQplot_output_filename,\n # uncLabel='Model Unc.', leg=[0.05, 0.95, \"upper left\", 1], ax1=None)\n\n print(\"QQ Plots for model uncertainty generated.\")\n\n\n# =============================================================================\n# MAIN\n# =============================================================================\n\nif __name__ == '__main__':\n\n sns.set_style('whitegrid')\n sns.set_context('paper')\n\n # Read experimental data.\n with open(EXPERIMENTAL_DATA_FILE_PATH, 'r') as f:\n # experimental_data = pd.read_json(f, orient='index')\n names = ('Molecule ID', 'logP mean', 'logP SEM',\n 'Assay Type', 'Experimental ID', 'Isomeric SMILES')\n experimental_data = pd.read_csv(f, names=names, skiprows=1)\n\n # Convert numeric values to dtype float.\n for col in experimental_data.columns[1:7]:\n experimental_data[col] = pd.to_numeric(experimental_data[col], errors='coerce')\n\n\n experimental_data.set_index(\"Molecule ID\", inplace=True)\n experimental_data[\"Molecule ID\"] = experimental_data.index\n print(\"Experimental data: \\n\", experimental_data)\n\n # Import user map.\n with open(USER_MAP_FILE_PATH, 'r') as f:\n user_map = pd.read_csv(f)\n\n # Import method map\n with open(METHOD_MAP_FILE_PATH, 'r') as f:\n method_map = pd.read_csv(f)\n\n # Configuration: statistics to compute.\n stats_funcs = collections.OrderedDict([\n ('RMSE', rmse),\n ('MAE', mae),\n ('ME', me),\n ('R2', r2),\n ('m', slope),\n ('kendall_tau', kendall_tau)\n ])\n ordering_functions = {\n 'ME': lambda x: abs(x),\n 'R2': lambda x: -x,\n 'm': lambda x: abs(1 - x),\n 'kendall_tau': lambda x: -x\n }\n latex_header_conversions = {\n 'R2': 'R$^2$',\n 'RMSE': 'RMSE',\n 'MAE': 'MAE',\n 'ME': 'ME',\n 'kendall_tau': '$\\\\tau$'\n }\n\n # ==========================================================================================\n # Analysis of standard blind submissions WITHOUT reference calculations\n # ==========================================================================================\n\n # Load submissions data.\n submissions_logP = load_submissions(LOGP_SUBMISSIONS_DIR_PATH, user_map, method_map)\n\n # Perform the analysis\n\n output_directory_path='./analysis_outputs'\n logP_submission_collection_file_path = '{}/logP_submission_collection.csv'.format(output_directory_path)\n\n collection_logP= logPSubmissionCollection(submissions_logP, experimental_data,\n output_directory_path, logP_submission_collection_file_path)\n\n # Generate plots and tables.\n for collection in [collection_logP]:\n collection.generate_correlation_plots()\n collection.generate_correlation_plots_with_SEM()\n collection.generate_molecules_plot()\n collection.generate_absolute_error_vs_molecule_ID_plot()\n\n import shutil\n\n if os.path.isdir('{}/StatisticsTables'.format(output_directory_path)):\n shutil.rmtree('{}/StatisticsTables'.format(output_directory_path))\n\n\n for submissions, type in zip([submissions_logP], ['logP']):\n generate_statistics_tables(submissions, stats_funcs, directory_path=output_directory_path + '/StatisticsTables',\n file_base_name='statistics', sort_stat='RMSE',\n ordering_functions=ordering_functions,\n latex_header_conversions=latex_header_conversions)\n\n # Generate RMSE, MAE, Kendall's Tau comparison plots.\n statistics_directory_path = os.path.join(output_directory_path, \"StatisticsTables\")\n generate_performance_comparison_plots(statistics_filename=\"statistics.csv\", directory_path=statistics_directory_path)\n\n # Generate QQ-Plots for model uncertainty predictions\n QQplot_directory_path = os.path.join(output_directory_path, \"QQPlots\")\n generate_QQplots_for_model_uncertainty(input_file_name=\"QQplot_dict.pickle\", directory_path=QQplot_directory_path)\n\n\n #==========================================================================================\n # Repeat analysis WITH reference calculations\n #==========================================================================================\n # Load submissions data.\n submissions_logP = load_submissions(LOGP_SUBMISSIONS_DIR_PATH, user_map, method_map)\n\n # Perform the analysis\n\n output_directory_path='./analysis_outputs_withrefs'\n logP_submission_collection_file_path = '{}/logP_submission_collection.csv'.format(output_directory_path)\n\n collection_logP= logPSubmissionCollection(submissions_logP, experimental_data,\n output_directory_path, logP_submission_collection_file_path, ignore_refcalcs = False)\n\n # Generate plots and tables.\n for collection in [collection_logP]:\n collection.generate_correlation_plots()\n collection.generate_correlation_plots_with_SEM()\n collection.generate_molecules_plot()\n collection.generate_absolute_error_vs_molecule_ID_plot()\n\n import shutil\n\n if os.path.isdir('{}/StatisticsTables'.format(output_directory_path)):\n shutil.rmtree('{}/StatisticsTables'.format(output_directory_path))\n\n\n for submissions, type in zip([submissions_logP], ['logP']):\n generate_statistics_tables(submissions, stats_funcs, directory_path=output_directory_path + '/StatisticsTables',\n file_base_name='statistics', sort_stat='RMSE',\n ordering_functions=ordering_functions,\n latex_header_conversions=latex_header_conversions, ignore_refcalcs = False)\n\n\n # Generate RMSE, MAE, and Kendall's Tau comparison plots.\n statistics_directory_path = os.path.join(output_directory_path, \"StatisticsTables\")\n generate_performance_comparison_plots(statistics_filename=\"statistics.csv\", directory_path=statistics_directory_path)\n\n # Generate QQ-Plots for model uncertainty predictions\n QQplot_directory_path = os.path.join(output_directory_path, \"QQPlots\")\n generate_QQplots_for_model_uncertainty(input_file_name=\"QQplot_dict.pickle\", directory_path=QQplot_directory_path)\n","sub_path":"physical_properties/logP/analysis_with_reassigned_categories/logP_analysis.py","file_name":"logP_analysis.py","file_ext":"py","file_size_in_byte":74990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"224398652","text":"# -*- coding: utf-8 -*-\n\nfrom rest_framework import serializers\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\ndef validate_user(user1, user2):\n if user1.id != user2.id:\n raise serializers.ValidationError({\"user\": _(\"You are not have authorization.\")})\n\n\ndef validate_image(my_picture):\n if my_picture.image is None:\n raise serializers.ValidationError({\"image\": _(\"Image was not found.\")})\n\n\nclass MyUserDetailSerializer(serializers.Serializer):\n def get_cleaned_data(self, user):\n self.cleaned_data = {\n 'username': user.username,\n 'name': user.last_name + user.first_name,\n 'birth': user.birth,\n 'gender': user.gender,\n 'email': user.email,\n }\n return self.cleaned_data\n\n\nclass MyStaticListSerializer(serializers.Serializer):\n def get_cleaned_data(self, same_ages, all_ages, gender_ages):\n type_0 = 0\n type_1 = 0\n type_2 = 0\n type_3 = 0\n type_4 = 0\n cleaned_data = {}\n\n for same_age in same_ages:\n validate_image(same_age)\n type_num = same_age.result.type\n if type_num == 0:\n type_0 += 1\n elif type_num == 1:\n type_1 += 1\n elif type_num == 2:\n type_2 += 1\n elif type_num == 3:\n type_3 += 1\n elif type_num == 4:\n type_4 += 1\n\n cleaned_data.update({\n 'same_ages_type': {\n '0': type_0,\n '1': type_1,\n '2': type_2,\n '3': type_3,\n '4': type_4,\n }\n })\n\n type_0 = 0\n type_1 = 0\n type_2 = 0\n type_3 = 0\n type_4 = 0\n\n for all_age in all_ages:\n validate_image(all_age)\n type_num = all_age.result.type\n if type_num == 0:\n type_0 += 1\n elif type_num == 1:\n type_1 += 1\n elif type_num == 2:\n type_2 += 1\n elif type_num == 3:\n type_3 += 1\n elif type_num == 4:\n type_4 += 1\n\n cleaned_data.update({\n 'all_users_type': {\n '0': type_0,\n '1': type_1,\n '2': type_2,\n '3': type_3,\n '4': type_4,\n }\n })\n\n type_0 = 0\n type_1 = 0\n type_2 = 0\n type_3 = 0\n type_4 = 0\n\n for gender_age in gender_ages:\n validate_image(gender_age)\n type_num = gender_age.result.type\n if type_num == 0:\n type_0 += 1\n elif type_num == 1:\n type_1 += 1\n elif type_num == 2:\n type_2 += 1\n elif type_num == 3:\n type_3 += 1\n elif type_num == 4:\n type_4 += 1\n\n cleaned_data.update({\n 'gender_users_type': {\n '0': type_0,\n '1': type_1,\n '2': type_2,\n '3': type_3,\n '4': type_4,\n }\n })\n\n return cleaned_data\n\n\nclass MyStaticDetailSerializer(serializers.Serializer):\n def _validate_result(self, type):\n if type not in [0, 1, 2, 3, 4, '0', '1', '2', '3', '4']:\n raise serializers.ValidationError({\"result\": _(\"Result was not found.\")})\n\n def get_cleaned_data(self, my_results, statistic_id):\n self.cleaned_data = {'images': {}}\n self._validate_result(statistic_id)\n for my_result in my_results:\n self._validate_result(my_result.type)\n self.cleaned_data['images'].update({\n my_result.id: 'uploads/' + my_result.image.name,\n })\n self.cleaned_data.update({'type': statistic_id})\n return self.cleaned_data\n\n\nclass MyHistoryListSerializer(serializers.Serializer):\n def get_cleaned_data(self, user, my_pictures):\n pictures = {}\n for my_picture in my_pictures:\n validate_user(user, my_picture.user)\n validate_image(my_picture)\n pictures[my_picture.id] = 'uploads/' + my_picture.image.name\n return pictures\n\n\nclass MyHistoryDetailSerializer(serializers.Serializer):\n def get_cleaned_data(self, user, my_picture):\n validate_user(user, my_picture.user)\n validate_image(my_picture)\n self.cleaned_data = {\n 'name': my_picture.image.name,\n 'created_date': my_picture.created_date,\n 'type': my_picture.result.type,\n }\n return self.cleaned_data\n\n\nclass MyGraphListSerializer(serializers.Serializer):\n def get_cleaned_data(self, user, my_pictures):\n info = {}\n data = {}\n for my_picture in my_pictures:\n validate_user(user, my_picture.user)\n validate_image(my_picture)\n data[my_picture.id] = {\n 'create_date': my_picture.created_date,\n 'birth': my_picture.user.birth,\n 'type': my_picture.result.type,\n 'percentage': my_picture.result.percentage,\n }\n info.update(data)\n return info\n\n\nclass MyGraphDetailSerializer(serializers.Serializer):\n def get_cleaned_data(self, user, my_picture):\n validate_user(user, my_picture.user)\n validate_image(my_picture)\n self.cleaned_data = {\n 'image': 'uploads/' + my_picture.image.name,\n 'result_image': 'uploads/' + my_picture.result.image.name,\n 'result_type': my_picture.result.type,\n 'user': my_picture.user.last_name + my_picture.user.first_name,\n }\n return self.cleaned_data\n","sub_path":"userInfo/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"120403949","text":"from torch import nn\nimport torch\nimport numpy as np\n\n\nclass Tacotron2Loss_VAE(nn.Module):\n def __init__(self, hparams):\n super(Tacotron2Loss_VAE, self).__init__()\n self.anneal_function = hparams.anneal_function\n self.lag = hparams.anneal_lag\n self.k = hparams.anneal_k\n self.x0 = hparams.anneal_x0\n self.upper = hparams.anneal_upper\n \n def kl_anneal_function(self, anneal_function, lag, step, k, x0, upper):\n if anneal_function == 'logistic':\n return float(upper/(upper+np.exp(-k*(step-x0))))\n elif anneal_function == 'linear':\n if step > lag:\n return min(upper, step/x0)\n else:\n return 0\n elif anneal_function == 'constant': # simple beta-VAE\n return 0.001\n\n\n def forward(self, model_output, targets, step): # NOTE model ouput==y_pred in train.py\n mel_target, gate_target = targets[0], targets[1]\n mel_target.requires_grad = False\n gate_target.requires_grad = False\n gate_target = gate_target.view(-1, 1)\n\n mel_out, mel_out_postnet, gate_out, _, mu, logvar, _, _ = model_output # NOTE mu and logvar are both nan, mel_out, mel_out_postnet are all 0 and gate_out is all 1000\n gate_out = gate_out.view(-1, 1)\n mel_loss = nn.MSELoss()(mel_out, mel_target) + \\\n nn.MSELoss()(mel_out_postnet, mel_target)\n gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)\n # print(f'Log var: {logvar}, \\nMu: {mu}, \\nMu power: {mu.pow(2)}, \\nLog var exp: {logvar.exp()}')\n # print(f'mel_out: {mel_out}, \\nmel_out_posnet: {mel_out_postnet}, \\ngate_out: {gate_out}')\n kl_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n kl_weight = self.kl_anneal_function(self.anneal_function, self.lag, step, self.k, self.x0, self.upper)\n \n recon_loss = mel_loss + gate_loss\n total_loss = recon_loss + kl_weight*kl_loss\n # print(f'Total loss:{total_loss}, Recon loss: {recon_loss}, KL loss: {kl_loss}, KL weight: {kl_weight}')\n return total_loss, recon_loss, kl_loss, kl_weight\n","sub_path":"loss_function.py","file_name":"loss_function.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"99322523","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.ram = [number for number in range(256)]\n self.reg = [0] * 8\n self.pc = 0\n self.sp = 7\n self.reg[self.sp] = 0xF4 \n self.LDI = 0b10000010\n self.PRN = 0b01000111\n self.HLT = 0b00000001\n self.ADD = 0b10100000\n self.MUL = 0b10100010\n self.PUSH = 0b01000101\n self.POP = 0b01000110\n self.CALL = 0b01010000\n self.RET = 0b00010001\n self.program = [\n # Default program\n # From print8.ls8\n 0b10000010, # LDI R0,8\n 0b00000000,\n 0b00001000,\n 0b01000111, # PRN R0\n 0b00000000,\n 0b00000001, # HLT\n ]\n\n def ram_read(self,MAR):\n\n return self.ram[MAR]\n\n def ram_write(self, MAR, MDR):\n\n self.ram[MAR] = MDR\n\n def halt(self):\n\n sys.exit()\n\n def ldi(self,LDI,value):\n\n self.reg[LDI] = value\n\n def prn(self,value):\n\n print(self.reg[value])\n\n\n def load(self, program_route=None):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n if not program_route:\n\n program = self.program\n\n else:\n\n program = []\n\n with open(program_route) as f:\n\n for line in f:\n\n if line[0].isdigit():\n\n program.append(int(line[:8].split('#',1)[0],2))\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1\n\n print(program)\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n #elif op == \"SUB\": etc\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def push_to_stack(self,value):\n\n # Decrement the SP\n\n self.reg[self.sp] -= 1 \n\n # Copy the register value into SP's location\n # Get the reg num to push\n\n # reg_num = self.ram[self.pc + 1]\n\n # # Get the value to push\n\n # value = self.reg[reg_num]\n\n # # Copy the value to the SP's location\n\n # top_of_the_stack = self.reg[self.sp]\n\n # self.ram_write(top_of_the_stack,value)\n \n # In one line:\n self.ram_write(self.reg[self.sp],value)\n\n \n\n def pop_from_stack(self):\n\n # Copy the value from SP's location to the register\n # Get the register number to pop into\n \n # reg_num = self.ram[self.pc +1]\n\n # # Get the top of the stack address\n\n # top_of_stack_addr = self.reg[self.sp]\n\n # # Get the value of the top of the stack\n\n # value = self.ram_read(top_of_stack_addr)\n\n # # Store the value into the register\n\n # self.reg[reg_num] = value\n\n # In fewer lines:\n\n # Increment the SP\n \n top_of_stack_addr = self.reg[self.sp]\n\n self.reg[self.sp] += 1\n\n return self.ram_read(top_of_stack_addr)\n\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n \n running = True\n\n while running:\n\n IR = self.ram_read(self.pc)\n\n if IR == self.LDI:\n\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n\n self.ldi(operand_a,operand_b)\n\n self.pc += 3\n \n elif IR == self.PRN:\n\n operand_a = self.ram_read(self.pc + 1)\n self.prn(operand_a)\n\n self.pc += 2\n\n elif IR == self.HLT:\n\n self.halt()\n\n elif IR == self.ADD:\n\n self.alu('ADD',self.ram_read(self.pc + 1),self.ram_read(self.pc + 2))\n self.pc += 3\n\n elif IR == self.MUL:\n\n self.alu('MUL',self.ram_read(self.pc + 1),self.ram_read(self.pc + 2))\n self.pc += 3\n\n elif IR == self.PUSH:\n\n self.push_to_stack(self.reg[self.ram[self.pc + 1]])\n\n self.pc += 2\n\n elif IR == self.POP:\n\n self.reg[self.ram_read(self.pc +1)] = self.pop_from_stack()\n\n self.pc += 2\n\n elif IR == self.CALL:\n\n # Push to the stack the next instruction after call\n self.push_to_stack(self.pc + 2)\n\n # The PC is set to the address stored in the given register.\n self.pc = self.reg[self.ram_read(self.pc + 1)]\n\n elif IR == self.RET:\n\n # Pop the value from the top of the stack and store it in the PC.\n self.pc = self.pop_from_stack() \n\n else:\n\n print(f'Unknown instruction {IR}')\n \n self.pc += 1\n \n ","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"186577008","text":"# experiment result wrapper\n\nimport uuid\nimport h5py\n\nfrom git import Repo\nfrom pymongo import MongoClient\n\n\ndef save(result, name, comment):\n result.name = name\n result.comment = comment\n result.save_meta()\n result.save_data()\n return result.id\n\n\nclass ExperimentResult(object):\n def __init__(self, test_scores, changes, w_history, delta_history,\n primal_objective_curve, objective_curve, timestamps,\n base_iter_history, **kwargs):\n # generated data like scores per iteration, model parameters\n # stored in hdf5 file\n self.test_scores = test_scores\n self.changes = changes\n self.w_history = w_history\n self.delta_history = delta_history\n self.primal_objective_curve = primal_objective_curve\n self.objective_curve = objective_curve\n self.timestamps = timestamps\n self.base_iter_history = base_iter_history\n\n # meta information, comments, parameters\n # this will be stored in mongodb\n self.meta = kwargs\n repo = Repo('~/Documents/Thesis/latent_ssvm')\n self.meta['commit_hash'] = repo.head.commit.hexsha\n self.meta['name'] = ''\n self.meta['comment'] = ''\n # unique experiment identifier\n self.meta['id'] = uuid.uuid1().hex\n\n def save_meta(self):\n client = MongoClient()\n client['lSSVM']['base'].insert(self.meta)\n client.disconnect()\n\n def save_data(self):\n f = h5py.File('/home/dmitry/Documents/Thesis/latent_ssvm/notebooks/experiment_data.hdf5', 'a')\n grp = f[self.meta['dataset_name']].create_group(self.meta['id'])\n grp.create_dataset(\"test_scores\", data=self.test_scores)\n grp.create_dataset(\"changes\", data=self.changes)\n grp.create_dataset(\"w_history\", data=self.w_history)\n grp.create_dataset(\"delta_history\", data=self.delta_history)\n grp.create_dataset(\"primal_objective_curve\", data=self.primal_objective_curve)\n grp.create_dataset(\"objective_curve\", data=self.objective_curve)\n grp.create_dataset(\"timestamps\", data=self.timestamps)\n grp.create_dataset(\"base_iter_history\", data=self.base_iter_history)\n f.close()\n return grp.id.id\n\n @property\n def name(self):\n return self.meta['name']\n\n @name.setter\n def name(self, name_):\n self.meta['name'] = name_\n\n @property\n def comment(self):\n return self.meta['comment']\n\n @comment.setter\n def comment(self, comment_):\n self.meta['comment'] = comment_\n\n @property\n def id(self):\n return self.meta['id']\n","sub_path":"results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"221704392","text":"#!usr/bin/env python\r\nfrom sys import hexversion\r\nif hexversion < 0x03000000:\r\n # if python version less than 3.0\r\n from Tkinter import *\r\n from tkFileDialog import askopenfilename\r\nelse:\r\n from tkinter import *\r\n from tkinter.filedialog import askopenfilename\r\n \r\nfrom os import startfile\r\nimport matplotlib.pyplot as plt\r\nimport struct\r\nimport array\r\nfrom subprocess import call\r\nfrom os.path import split\r\n\r\nclass LaunchPanel:\r\n def __init__( self, myParent = None ):\r\n self.parent = myParent\r\n\r\n self.btn_wd = 15\r\n\r\n self.container = Frame( self.parent )\r\n self.container.pack()\r\n \r\n frame = Frame( root )\r\n frame.pack( ipadx = \"1m\", ipady = \"1m\" )\r\n\r\n self.lbl = Label( frame, text=\"just click on the button\" )\r\n self.lbl.grid(row=0, column=0,columnspan=2 ) \r\n\r\n ## buttons\r\n self.btn_sineBias = Button( frame, text=\"Plot\", width = self.btn_wd, command=self.sineBias )\r\n self.btn_sineBias.grid( row=1, column=0 )\r\n\r\n self.btn_sineLaunch = Button( frame, text=\"Compute\", width = self.btn_wd, command=self.sineLaunch )\r\n self.btn_sineLaunch.grid( row=2, column=0 )\r\n \r\n self.btn_sbSettings = Button( frame, text=\"Settings\", width = self.btn_wd, command=self.sineBiasSettings )\r\n self.btn_sbSettings.grid( row=3, column=0)\r\n\r\n ## Entries\r\n self.e_data = Entry( frame, width = 40 )\r\n self.e_data.grid( row = 1, column = 1 )\r\n self.e_data.insert( END, \"D:\\\\Source\\\\cpp\\\\CP_JJS\\\\cjjs_py_out.bin\" )\r\n \r\n self.e_exec = Entry( frame, width = 40 )\r\n self.e_exec.grid( row = 2, column = 1 )\r\n self.e_exec.insert( END, 'D:\\\\Source\\\\cpp\\\\CP_JJS\\\\chain jjs.exe' )\r\n \r\n self.e_sets = Entry( frame, width = 40 )\r\n self.e_sets.grid( row = 3, column = 1 )\r\n self.e_sets.insert( END, 'D:\\\\source\\\\cpp\\\\CP_JJS\\\\cjjs.txt' )\r\n\r\n ## browse buttons\r\n self.btn_browse1 = Button( frame, text=\"...\", command=self.browse1 )\r\n self.btn_browse1.grid( row=1, column=2 )\r\n\r\n self.btn_browse2 = Button( frame, text=\"...\", command=self.browse2 )\r\n self.btn_browse2.grid( row=2, column=2 )\r\n \r\n self.btn_browse3 = Button( frame, text=\"...\", command=self.browse3 )\r\n self.btn_browse3.grid( row=3, column=2)\r\n\r\n ## ========== \r\n ## methods\r\n ## ==========\r\n def sineBias( self ):\r\n print( \"Plotting...\" )\r\n #===============\r\n filename = self.e_data.get()\r\n flei = open( filename, \"rb\" )\r\n # initializing array size\r\n size, = struct.unpack( 'i', flei.read(4) )\r\n \r\n #print( \"before read \")\r\n print() # skip newline escape character\r\n flei.read(1) \r\n #print( \"after read \")\r\n #print( flei.tell() )\r\n \r\n #print( size )\r\n self.lbl['text'] = str(size) + \" points\"\r\n # initializing function values\r\n time = array.array('d')\r\n time.fromfile( flei, size )\r\n print( \"Absciss array initialized\" )\r\n euler = array.array('d')\r\n euler.fromfile( flei, size )\r\n print( \"ordinate array initialized\" )\r\n flei.close()\r\n\r\n #for i in range( 10 ):\r\n # print( euler[i] );\r\n # print( time[i] );\r\n\r\n # plotting\r\n fig = plt.figure(1)\r\n ax = fig.add_subplot( 111 )\r\n ax.plot( time, euler, 'ro')\r\n leg = ax.legend(( '1st', '2nd' ), 'best' )\r\n ax.grid(True)\r\n #ax.set_xlim(-1, 50 )\r\n #ax.set_ylim(-1, 10 )\r\n ax.set_xlabel(\"U\")\r\n ax.set_ylabel(\"I\" )\r\n ax.set_title(\"Plot\")\r\n print( \"before plot show\" )\r\n plt.show()\r\n print(\"plot 1 close \")\r\n plt.close()\r\n #================\r\n## plt.figure( 1 )\r\n## xax = [1,2,3]\r\n## ax1 = plt.subplot(121)\r\n## plt.plot( xax, [1,2,3], \"g^\" )\r\n## ax2 = plt.subplot( 122, sharex = ax1 )\r\n## plt.plot( xax, [3,2,1], \"ro\" )\r\n## plt.show()\r\n\r\n def JJsystem( self ):\r\n print( \"josephson junctions system simulation has started\" )\r\n #========================\r\n flei = open( \"D:\\\\Source\\\\cpp\\\\CP_JJS\\\\cjjs_py_out_fidot.bin\", \"rb\" )\r\n # initializing array size\r\n size, = struct.unpack( 'i', flei.read(4) )\r\n \r\n flei.read(1) # skip newline escape character\r\n \r\n #print( size )\r\n # initializing function values\r\n time = array.array('d')\r\n time.fromfile( flei, size )\r\n euler = array.array('d')\r\n euler.fromfile( flei, size )\r\n #hune = array.array('d')\r\n #hune.fromfile( flei, size )\r\n #phase3 = array.array('d')\r\n #phase3.fromfile( flei, size )\r\n \r\n flei.close()\r\n\r\n # plotting\r\n fig = plt.figure(2)\r\n ax = fig.add_subplot( 111 )\r\n ax.plot( time, euler, 'r-')#, time, hune, 'g-', time, phase3, 'b-' )\r\n #leg = ax.legend(( '1st', '2nd', '3rd' ), 'best' )\r\n ax.grid(True)\r\n ax.set_xlabel(\"time\")\r\n ax.set_xlim(-1, 20 )\r\n #ax.set_ylabel(\"phase\" )\r\n ax.set_title(\"Plot\")\r\n plt.show()\r\n print(\"plot 2 close\")\r\n plt.close()\r\n #===================\r\n## plt.figure( 2 )\r\n## xax = [1,2,3]\r\n## ax1 = plt.subplot(211)\r\n## plt.plot( xax, [1,2,3], \"g^\" )\r\n## ax2 = plt.subplot( 212, sharex = ax1 )\r\n## plt.plot( xax, [3,2,1], \"ro\" )\r\n## plt.show()\r\n\r\n \r\n def sineLaunch( self ):\r\n filename = self.e_exec.get();\r\n self.lbl['text'] = 'Now computing...'\r\n call( [filename] )\r\n self.lbl['text'] = 'Done'\r\n \r\n def jjsLaunch( self ):\r\n call( ['D:\\\\source\\\\cpp\\\\tasks\\\\release\\\\Chain jjs.exe'] )\r\n \r\n def sineBiasSettings( self ):\r\n print( \"Settings file has been opened\" )\r\n filename = self.e_sets.get()\r\n startfile( filename )\r\n\r\n def JJsystemSettings(self ):\r\n print( \"start jjsystem settings file\" )\r\n startfile( \"e:\\source code\\JJsystem.txt\" )\r\n\r\n def browse1( self ):\r\n pathname = self.e_data.get()\r\n pathname = split(pathname)\r\n #self.lbl['text'] = pathname[0]\r\n result = askopenfilename( defaultextension='.txt', parent=self.container,\r\n filetypes=[(\"Binary file\", \".bin\")], initialdir=pathname[0] )\r\n if not (result == \"\"):\r\n self.lbl['text']=result\r\n self.e_data.delete( 0, END )\r\n self.e_data.insert( END, result )\r\n \r\n def browse2( self ):\r\n pathname = self.e_exec.get()\r\n pathname = split(pathname)\r\n #self.lbl['text'] = pathname[0]\r\n result = askopenfilename( defaultextension='.txt', parent=self.container,\r\n filetypes=[(\"Executable\", \".exe\")], initialdir=pathname[0] )\r\n if not (result == \"\"):\r\n #result.replace( \"/\", \"\\\\\" )\r\n self.lbl['text']=result\r\n self.e_exec.delete( 0, END )\r\n self.e_exec.insert( END, result )\r\n \r\n def browse3( self ):\r\n pathname = self.e_sets.get()\r\n pathname = split(pathname)\r\n #self.lbl['text'] = pathname[0]\r\n result = askopenfilename( defaultextension='.txt', parent=self.container,\r\n filetypes=[(\"Text file\", \".txt\")], initialdir=pathname[0] )\r\n if not (result == \"\"):\r\n self.lbl['text']=result\r\n self.e_sets.delete( 0, END )\r\n self.e_sets.insert( END, result )\r\n \r\nroot = Tk()\r\nroot.resizable( False, False )\r\nroot.title( \"Josephson juncitons\" )\r\nmyapp = LaunchPanel(root)\r\n\r\nroot.mainloop()\r\n","sub_path":"Python/cp/launch_panel.py","file_name":"launch_panel.py","file_ext":"py","file_size_in_byte":7726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"505476541","text":"import dash\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\nimport pandas as pd\r\nimport plotly.graph_objs as go\r\nimport plotly.express as px\r\n\r\n'''\r\nThis file builds an interactive map, that shows the concentration of CO2 per capita for each\r\ncountry. The bar in the right represents the scale for analysing the colors of the map.\r\n'''\r\n\r\nfilename='../Data/co2/co-emissions-per-capita.csv'\r\ndf = pd.read_csv(filename, delimiter=',')\r\ndf = df[df['Year'] == 2017]\r\n\r\nmap_co2_layout = html.Div([\r\n dcc.Graph(\r\n id='life-exp-vs-gdp',\r\n style={\r\n 'height':'250px'\r\n },\r\n figure={\r\n 'data': [go.Choropleth(\r\n locations = df['Code'],\r\n z = df['percapita'],\r\n text = df['Country'],\r\n colorscale = 'Reds',\r\n autocolorscale=False,\r\n reversescale=False,\r\n marker_line_color='darkgray',\r\n marker_line_width=0.5,\r\n colorbar_tickprefix = 't',\r\n colorbar_title = 'Annual CO2 emission',\r\n)],\r\n 'layout': go.Layout(\r\n title = 'Annual 2017 CO2 emission' ,\r\n titlefont = {\r\n 'size':30\r\n },\r\n autosize=True,\r\n \r\n margin=go.layout.Margin(\r\n l=5,\r\n r=5,\r\n b=0,\r\n t=50,\r\n pad=3\r\n )\r\n )\r\n }\r\n )\r\n])\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"dash/components/map_co2.py","file_name":"map_co2.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"334922156","text":"import botpie\r\nimport random\r\n\r\nmessage = botpie.utils.argvstr()\r\nbot = botpie.Bot(\"ImportantBot\")\r\n\r\n@bot.command(\"tracer\")\r\ndef greeter():\r\n greets = [\"hiya!\", \"heya!\", \"hi!\", \"hoiya!\"]\r\n return random.choice(greets)\r\n\r\nresult = bot.inspectstr(message)\r\n\r\nif result:\r\n print(result)","sub_path":"examples/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"200198932","text":"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom bigdl.nn.criterion import *\nfrom bigdl.optim.optimizer import Adam\nfrom bigdl.util.common import *\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.sql import SparkSession\nfrom zoo.common.nncontext import *\nfrom zoo.pipeline.api.net import TFNet\nfrom zoo.pipeline.nnframes import *\n\nimport tensorflow as tf\n\n# This is a simple example, showing how to train and inference a TensorFlow model with NNFrames\n# on Spark DataFrame. It can also be used as a part of Spark ML Pipeline.\n\nif __name__ == '__main__':\n\n sparkConf = init_spark_conf().setAppName(\"testNNClassifer\").setMaster('local[1]')\n sc = init_nncontext(sparkConf)\n spark = SparkSession \\\n .builder \\\n .getOrCreate()\n\n with tf.Graph().as_default():\n input1 = tf.placeholder(dtype=tf.float32, shape=(None, 2))\n hidden = tf.layers.dense(input1, 4)\n output = tf.sigmoid(tf.layers.dense(hidden, 1))\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n net = TFNet.from_session(sess, [input1], [output], generate_backward=True)\n\n df = spark.createDataFrame(\n [(Vectors.dense([2.0, 1.0]), 1.0),\n (Vectors.dense([1.0, 2.0]), 0.0),\n (Vectors.dense([2.0, 1.0]), 1.0),\n (Vectors.dense([1.0, 2.0]), 0.0)],\n [\"features\", \"label\"])\n\n print(\"before training:\")\n NNModel(net).transform(df).show()\n\n classifier = NNClassifier(net, MSECriterion()) \\\n .setBatchSize(4) \\\n .setOptimMethod(Adam()) \\\n .setLearningRate(0.1) \\\n .setMaxEpoch(10)\n\n nnClassifierModel = classifier.fit(df)\n\n print(\"After training: \")\n res = nnClassifierModel.transform(df)\n res.show(10, False)\n\n# expected output:\n#\n# before training:\n# +---------+-----+------------+\n# | features|label| prediction|\n# +---------+-----+------------+\n# |[2.0,1.0]| 1.0|[0.46490368]|\n# |[1.0,2.0]| 0.0|[0.51738966]|\n# |[2.0,1.0]| 1.0|[0.46490368]|\n# |[1.0,2.0]| 0.0|[0.51738966]|\n# +---------+-----+------------+\n#\n# After training:\n# +---------+-----+----------+\n# |features |label|prediction|\n# +---------+-----+----------+\n# |[2.0,1.0]|1.0 |1.0 |\n# |[1.0,2.0]|0.0 |0.0 |\n# |[2.0,1.0]|1.0 |1.0 |\n# |[1.0,2.0]|0.0 |0.0 |\n# +---------+-----+----------+\n","sub_path":"pyzoo/zoo/examples/nnframes/tensorflow/SimpleTraining.py","file_name":"SimpleTraining.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"442710554","text":"#!/usr/local/Cellar/python@3.9/3.9.1_6/bin/python3 \n\"\"\"\n\n\"\"\"\n\nfrom pickle import load as pload\nfrom numpy import savetxt\nimport sys\n\n#from DmimData.data import DMD\nfrom helpers import featurize\n\n\ndef main():\n \"\"\" main execution sequence \"\"\"\n smis = [\n 'CC(C)OC(=O)OC(C)OC(=O)C1=C(CSC2[N+]1([H])C(=O)C2NC(=O)C(=NOC)C3=CSC(=N3)N)COC',\n 'CC(C)OC(=O)OC(C)OC(=O)C1=C(CSC2N1C(=O)C2NC(=O)C(=NOC)C3=CSC(=[N+]3[H])N)COC'\n ]\n structures = []\n with open('cactus_cefprox_pA.xyzmq', 'r') as f:\n structures.append(f.read())\n with open('cactus_cefprox_pB.xyzmq', 'r') as f:\n structures.append(f.read())\n\n X_cust = featurize(smis, structures, ['hac', 'c', 'adb', 'asv', 'ctv', 'hbam', 'hbd'], ['pmi1', 'pmi2', 'pmi3', 'rmd02'])\n X_mqn = featurize(smis, structures, 'all', [])\n X_md3d = featurize(smis, structures, [], 'all')\n X_comb = featurize(smis, structures, 'all', 'all')\n\n savetxt('cactus_cefprox_X_CUST.txt', X_cust)\n savetxt('cactus_cefprox_X_MQN.txt', X_mqn)\n savetxt('cactus_cefprox_X_MD3D.txt', X_md3d)\n savetxt('cactus_cefprox_X_COMB.txt', X_comb)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"prediction/bimod/BACs_and_TERF/cactus_cefprox_featurize.py","file_name":"cactus_cefprox_featurize.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"468270584","text":"from flask import Flask, render_template,request\nimport json\nfrom base64 import b64decode\nimport io\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n\treturn render_template('index.html')\n\ndef query(filename):\n\tf = open(filename)\n\t## to be implemented\n\n@app.route(\"/draw\", methods=['POST'])\ndef process_draw_data():\n\tdata = json.loads(request.data)\n\tkeyword = data['keyword']\n\timage = data['image']\n\theader, encoded = image.split(\",\", 1)\n\timage_content = b64decode(encoded)\n\twith open(\"query/query.png\", \"wb\") as f:\n\t\tf.write(image_content)\n\n\tquery(\"query/query.png\")\n\tdata_back = {\n\t\t\"image\": [],\n\t\t\"rank\": []\n\t}\n\treturn json.dumps(data_back)\n\n@app.route('/upload', methods=['POST'])\ndef process_upload_data():\n\tfile = request.files[\"file\"]\n\tfile.save(\"query/query.png\")\n\tquery(\"query/query.png\")\n\tdata_back = {\n\t\t\"image\": [],\n\t\t\"rank\": []\n\t}\n\treturn json.dumps(data_back)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"315966348","text":"import logging\nimport traceback\nimport time\n\nimport ffmpeg\n\nfrom modules import core\n\n\ndef start_stream(cap_props: core.types.CapProps):\n stream = ffmpeg.input(\n \"pipe:\",\n format=\"rawvideo\",\n pix_fmt=\"rgb24\",\n s=f\"{cap_props['frameWidth']}x{cap_props['frameHeight']}\",\n )\n stream = ffmpeg.output(\n stream,\n core.Config.Uri.PUBLISH_VIDEO,\n vcodec=\"mpeg1video\",\n framerate=cap_props[\"fps\"],\n s=f\"640x480\",\n format=\"mpegts\",\n video_bitrate=\"800k\",\n loglevel=core.Config.FFmpeg.LOGLEVEL,\n )\n stream = ffmpeg.run_async(stream, pipe_stdin=True)\n return stream\n\n\ndef restart_stream(cap_props: core.types.CapProps, intervall: int = 4):\n logging.error(\"FFMPEG Error\")\n logging.error(traceback.format_exc())\n while True:\n time.sleep(intervall)\n logging.info(\"Restarting Stream\")\n try:\n yield start_stream(cap_props)\n break\n except Exception:\n pass\n","sub_path":"services/coral-app/modules/core/src/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"226776152","text":"\"\"\"\nImplementation of endpoints, API version 1.\n\nCopyright CNRS 2023\nAuthors: Andrew P. Davison, Onur Ates, Shailesh Appukuttan, Hélissande Fragnaud and Corentin Fragnaud\nLicence: MIT (see LICENSE)\n\n\"\"\"\n\nfrom typing import Annotated\nfrom pydantic import HttpUrl, PositiveInt\n\nfrom fastapi import Query, HTTPException, APIRouter, status\n\nfrom ..metadata import title, description\nfrom ..data_models import (\n IOModule,\n Segment,\n AnalogSignal,\n SpikeTrain,\n BlockContainer,\n)\nfrom ..data_handler import load_blocks\n\nrouter = APIRouter()\n\n\n@router.get(\"/\")\nasync def info():\n \"\"\"Return information about the API.\"\"\"\n return {\n \"title\": title,\n \"description\": description.strip(),\n \"version\": 1.7,\n }\n\n\n@router.get(\"/blockdata/\")\nasync def get_block_data(\n url: Annotated[\n HttpUrl, Query(description=\"Location of a data file that can be read by Neo.\")\n ],\n type: Annotated[\n IOModule,\n Query(\n description=(\n \"Specify a specific Neo IO module that should be used to open the data file.\"\n \"If not provided, Neo will try to determine which module to use.\"\n )\n ),\n ] = None,\n) -> BlockContainer:\n \"\"\"\n Return metadata about all the blocks in a data file,\n including metadata about the segments within each block,\n but without any information about the data contained within each segment.\n \"\"\"\n # here `url` is a Pydantic object, which we convert to a string\n blocks = load_blocks(str(url), type)\n return BlockContainer.from_neo(blocks, url)\n\n\n@router.get(\"/segmentdata/\")\nasync def get_segment_data(\n url: Annotated[\n HttpUrl, Query(description=\"Location of a data file that can be read by Neo.\")\n ],\n segment_id: Annotated[\n int,\n Query(\n description=\"Index of the segment for which metadata should be returned.\"\n ),\n ],\n block_id: Annotated[\n int,\n Query(\n description=\"Index of the block for which metadata should be returned.\"\n ),\n ] = 0,\n type: Annotated[\n IOModule,\n Query(\n description=(\n \"Specify a specific Neo IO module that should be used to open the data file.\"\n \"If not provided, Neo will try to determine which module to use.\"\n )\n ),\n ] = None,\n) -> Segment:\n \"\"\"\n Return information about an individual Segment within a block,\n including metadata about the signals contained in the segment,\n but not the signal data themselves.\n \"\"\"\n try:\n block = load_blocks(str(url), type)[block_id]\n except IndexError:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"IndexError on block_id\", # todo: improve this message in next API version\n )\n try:\n segment = block.segments[segment_id]\n except IndexError:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"IndexError on segment_id\", # todo: improve this message in next API version\n )\n return Segment.from_neo(segment, url)\n\n\n@router.get(\"/analogsignaldata/\")\nasync def get_analogsignal_data(\n url: Annotated[\n HttpUrl, Query(description=\"Location of a data file that can be read by Neo.\")\n ],\n segment_id: Annotated[\n int,\n Query(description=\"Index of the segment in which the analog signal is found.\"),\n ],\n analog_signal_id: Annotated[\n int, Query(description=\"Index of the signal within the segment.\")\n ],\n block_id: Annotated[\n int,\n Query(\n description=\"Index of the block for which metadata should be returned.\"\n ),\n ] = 0,\n type: Annotated[\n IOModule,\n Query(\n description=(\n \"Specify a specific Neo IO module that should be used to open the data file.\"\n \"If not provided, Neo will try to determine which module to use.\"\n )\n ),\n ] = None,\n down_sample_factor: Annotated[\n PositiveInt,\n Query(\n description=(\n \"Factor by which data should be downsampled prior to loading. \"\n \"Useful for faster loading of large files. Accepts positive integer values.\"\n )\n ),\n ] = 1,\n) -> AnalogSignal:\n \"\"\"Get an analog signal from a given segment, including both data and metadata.\"\"\"\n try:\n block = load_blocks(str(url), type)[block_id]\n except IndexError:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"IndexError on block_id\", # todo: improve this message in next API version\n )\n try:\n segment = block.segments[segment_id]\n except IndexError:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"IndexError on segment_id\", # todo: improve this message in next API version\n )\n if len(segment.analogsignals) > 0:\n container = segment.analogsignals\n else:\n container = segment.irregularlysampledsignals\n try:\n signal = container[analog_signal_id]\n except IndexError:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"IndexError on analog_signal_id\", # todo: improve this message in next API version\n )\n try:\n asig = AnalogSignal.from_neo(signal, down_sample_factor)\n except (ValueError, OSError) as err:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=str(err),\n )\n return asig\n\n\n@router.get(\"/spiketraindata/\")\nasync def get_spiketrain_data(\n url: Annotated[\n HttpUrl, Query(description=\"Location of a data file that can be read by Neo.\")\n ],\n segment_id: Annotated[\n int,\n Query(\n description=\"Index of the segment for which spike trains should be returned.\"\n ),\n ],\n block_id: Annotated[\n int,\n Query(\n description=\"Index of the block for which metadata should be returned.\"\n ),\n ] = 0,\n type: Annotated[\n IOModule,\n Query(\n description=(\n \"Specify a specific Neo IO module that should be used to open the data file.\"\n \"If not provided, Neo will try to determine which module to use.\"\n )\n ),\n ] = None,\n) -> dict[str, SpikeTrain]:\n \"\"\"Get the spike trains from a given segment, including both data and metadata.\"\"\"\n try:\n block = load_blocks(str(url), type)[block_id]\n except IndexError:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"IndexError on block_id\", # todo: improve this message in next API version\n )\n try:\n segment = block.segments[segment_id]\n except IndexError:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"IndexError on segment_id\", # todo: improve this message in next API version\n )\n return {str(i): SpikeTrain.from_neo(st) for i, st in enumerate(segment.spiketrains)}\n","sub_path":"api/resources/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":7214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"385079885","text":"import pygame\nfrom pygame.locals import *\n\nPIXELS_PER_GRID = 20\n\nclass Snake:\n def __init__(self, game, startingCoordinate, startingDirection):\n self.game = game\n\n self.chain = [startingCoordinate]\n for i in range(10):\n n = [startingCoordinate[0]-i-1, startingCoordinate[1]]\n self.chain.append(n)\n\n self.direction = startingDirection\n self.nextDirection = startingDirection\n self.speed = 500\n\n self.speedtimer = 0\n\n def on_loop(self, deltaTicks):\n self.speedtimer += deltaTicks\n if self.speedtimer > self.speed:\n self.speedtimer = 0\n self.update()\n\n def update(self):\n\n # Update direction\n self.direction = self.nextDirection\n\n # Move snake body\n\n for i in reversed(range(len(self.chain))):\n if i!= 0:\n self.chain[i][0] = self.chain[i-1][0]\n self.chain[i][1] = self.chain[i-1][1]\n\n # Move snake head\n\n if self.direction == 0:\n self.chain[0][0] += 1\n if self.direction == 1:\n self.chain[0][1] += 1\n if self.direction == 2:\n self.chain[0][0] += -1\n if self.direction == 3:\n self.chain[0][1] += -1\n\n # Check head in wall\n\n if self.chain[0][0] >= self.game.gridSize[0]:\n self.kill()\n if self.chain[0][1] >= self.game.gridSize[1]:\n self.kill()\n if self.chain[0][0] < 0:\n self.kill()\n if self.chain[0][1] < 0:\n self.kill()\n\n # Check head in other snake's body\n\n collided = self.game.snakeCollision(self)\n\n if type(collided) == Snake:\n self.kill(collided)\n\n # Check head in food\n\n if self.game.getFoodAt(self.chain[0]) != None:\n self.chain.append([self.chain[-1][0], self.chain[-1][1]])\n\n\n\n def on_render(self, screen):\n for i in range(len(self.chain)):\n rect = (self.chain[i][0] * PIXELS_PER_GRID, self.chain[i][1] * PIXELS_PER_GRID, PIXELS_PER_GRID, PIXELS_PER_GRID)\n if i == 0:\n pygame.draw.rect(screen, [100, 100, 100], rect, 0)\n else:\n pygame.draw.rect(screen, [75, 75, 75], rect, 0)\n\n def setDirection(self, direction):\n if (self.direction - direction)%2 != 0:\n self.nextDirection = direction\n\n def kill(self, killedBy = None):\n if killedBy == self:\n print(\"I SUICIDED\")\n elif killedBy == None:\n print(\"Killed by the world\")\n else:\n print(\"I HAVE BEEN MURDERED\")\n\nclass Food:\n def __init__(self, game, coord):\n self.coord = coord\n self.game = game\n\n self.game.food[coord] = self\n\n def on_render(self,screen):\n pygame.draw.circle(screen, [150, 50, 49],\n [int(self.coord[0]*PIXELS_PER_GRID+PIXELS_PER_GRID/2), int(self.coord[1]*PIXELS_PER_GRID+PIXELS_PER_GRID/2)],\n int(PIXELS_PER_GRID/2))\n\nclass Game:\n def __init__(self, gridSize):\n self._running = True\n self._display_surf = None\n self.size = self.width, self.height = PIXELS_PER_GRID*gridSize[0], PIXELS_PER_GRID*gridSize[1]\n self.gridSize = gridSize\n\n self.ticksPassed = 0\n self.deltaTicks = 0\n self.food = {}\n\n Food(self, (5,5))\n\n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)\n self._running = True\n\n self.ownSnake = Snake(self, [10,0], 0)\n self.otherSnakes = []#Snake(self, [5,2], 0)]\n\n def snakeCollision(self, snake):\n coordinate = snake.chain[0]\n\n # Check if collision with ownSnake\n\n for i in range(len(self.ownSnake.chain)):\n if i != 0 or snake != self.ownSnake:\n if self.ownSnake.chain[i][0] == coordinate[0] and self.ownSnake.chain[i][1] == coordinate[1]:\n return self.ownSnake\n\n # Check collision with one of the other snakes\n\n for otherSnake in self.otherSnakes:\n for i in range(len(otherSnake.chain)):\n if i != 0 or snake != otherSnake:\n if otherSnake.chain[i][0] == coordinate[0] and otherSnake.chain[i][1] == coordinate[1]:\n return otherSnake\n\n return False\n\n def getFoodAt(self, coordinate):\n coordinate = tuple(coordinate)\n return self.food.get(coordinate, None)\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self._running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_a:\n self.ownSnake.setDirection(2)\n if event.key == pygame.K_w:\n self.ownSnake.setDirection(3)\n if event.key == pygame.K_d:\n self.ownSnake.setDirection(0)\n if event.key == pygame.K_s:\n self.ownSnake.setDirection(1)\n\n\n if event.key == pygame.K_j:\n self.otherSnakes[0].setDirection(2)\n if event.key == pygame.K_i:\n self.otherSnakes[0].setDirection(3)\n if event.key == pygame.K_l:\n self.otherSnakes[0].setDirection(0)\n if event.key == pygame.K_k:\n self.otherSnakes[0].setDirection(1)\n\n\n def on_loop(self):\n self.deltaTicks = pygame.time.get_ticks() - self.ticksPassed\n self.ticksPassed = pygame.time.get_ticks()\n\n self.ownSnake.on_loop(self.deltaTicks)\n\n for i in self.otherSnakes:\n i.on_loop(self.deltaTicks)\n\n def on_render(self):\n self._display_surf.fill([0, 0, 0])\n\n self.ownSnake.on_render(self._display_surf)\n for i in self.otherSnakes:\n i.on_render(self._display_surf)\n\n for i in self.food.values():\n i.on_render(self._display_surf)\n\n pygame.display.flip()\n\n def on_cleanup(self):\n pygame.quit()\n\n def on_execute(self):\n if self.on_init() == False:\n self._running = False\n\n while (self._running):\n for event in pygame.event.get():\n self.on_event(event)\n self.on_loop()\n self.on_render()\n\n self.on_cleanup()\n\n\n\n\n\n\nif __name__ == \"__main__\":\n game = Game([60, 40])\n game.on_execute()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"384641829","text":"import tensorflow as tf\n\n\nclass DataProvider():\n\n def __init__(self, tfrecords_file, batch_size, is_shuffle):\n self.tfrecords_file = tfrecords_file\n self.batch_size = batch_size\n self.is_shuffle = is_shuffle\n\n def get_batch(self):\n dataset = tf.data.TFRecordDataset(self.tfrecords_file)\n dataset = dataset.map(self.parse_example)\n if self.is_shuffle:\n dataset = dataset.shuffle(buffer_size=10000)\n padded_shapes = ([None], [1])\n self.dataset = dataset.padded_batch(self.batch_size, padded_shapes=padded_shapes)\n\n def parse_example(self, serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'file': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.string),\n 'frame': tf.FixedLenFeature([], tf.string),\n }\n )\n\n frame = features['frame']\n label = features['label']\n\n frame = tf.decode_raw(frame, tf.float32)\n label = tf.decode_raw(label, tf.int32)\n\n return frame, label","sub_path":"data_provider.py","file_name":"data_provider.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"446856789","text":"import pygame\nimport random\n\n# This is a minesweeper game that can be solved manually or by an AI\n# December 4, 2019\n__author__ = \"Tate Staples\"\n\n# todo: do thing with adding sums of neighbors\n# todo fix werid fill thing\n\nwindowWidth = 800\nwindowHeight = 800\nwindow = pygame.display.set_mode((windowWidth, windowHeight))\n\nrow_count = 30\ncol_count = 30\nbomb_count = 100\n\n# A bunch of color rgb values\nWHITE = (255, 255, 255)\nYELLOW = (255, 255, 0)\nBLUE = (0, 0, 255)\nRED = (255, 0, 0)\nPINK = (255, 192, 203)\nORANGE = (255, 165, 0)\nLIGHT_BLUE = (135, 206, 235)\nGREEN = (0, 255, 0)\nBROWN = (165, 42, 42)\nBLACK = (0, 0, 0)\n\nbomb_cords = [(random.randint(0, row_count-1), random.randint(0, col_count-1)) for i in range(bomb_count)]\nrun = True\n\n# establish value of each square\nboard_vals = []\nflags = []\nfor r in range(row_count):\n row_values = []\n row_flags = []\n for c in range(col_count):\n row_flags.append(False)\n if (r, c) in bomb_cords:\n row_values.append(\"bomb\")\n else:\n surrounding_bombs = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (r+i, c+j) in bomb_cords:\n surrounding_bombs += 1\n row_values.append(surrounding_bombs)\n board_vals.append(row_values)\n flags.append(row_flags)\n\n# create starter display\nfor r in range(row_count+1):\n for c in range(col_count+1):\n row_height = windowHeight // row_count\n col_width = windowWidth // col_count\n x_cord = col_width * c\n y_cord = row_height * r\n if (r, c) in bomb_cords and False: # this is used for debugging\n pygame.draw.rect(window, PINK, (x_cord, y_cord, col_width - 1, row_height - 1))\n else:\n pygame.draw.rect(window, LIGHT_BLUE, (x_cord, y_cord, col_width-1, row_height-1))\npygame.display.update()\n\n\n# converts clicked location to row/col\ndef get_square(cords):\n global windowWidth, windowHeight, row_count, col_count\n x, y = cords\n row_height = windowHeight // row_count\n col_width = windowWidth // col_count\n row = y // row_height\n col = x // col_width\n return row, col\n\n\n# flags a spot that player thinks is a bomb\ndef flag(spot):\n global flags\n row, col = spot\n row_height = windowHeight // row_count\n col_width = windowWidth // col_count\n x_cord = col_width * col\n y_cord = row_height * row\n if flags[row][col]:\n pygame.draw.rect(window, LIGHT_BLUE, (x_cord, y_cord, col_width - 1, row_height - 1))\n flags[row][col] = False\n else:\n pygame.draw.rect(window, RED, (x_cord, y_cord, col_width-1, row_height-1))\n flags[row][col] = True\n\n\n# reveals a clicked square\nrevealed = [] # list of revealed coordinates\ndef reveal(cords):\n global board_vals, revealed, windowWidth, windowHeight, row_count, col_count, run\n row, col = cords\n val = board_vals[row][col]\n if val == \"bomb\":\n print(\"You hit a bomb, LOOSER!\")\n print(cords)\n for row in calculate_probs():\n for val in row:\n print(round(val, 1), end=\"\\t\")\n print()\n run = False\n return\n #quit()\n revealed.append((row, col))\n\n row_height = windowHeight // row_count\n col_width = windowWidth // col_count\n x_cord = col_width * col\n y_cord = row_height * row\n\n text_font = 'Comic Sans MS'\n # points = pixels * 72 / 96\n pixels = row_height if row_height < col_width else col_width\n text_size = int(pixels * 72 / 48)\n font = pygame.font.SysFont(text_font, text_size)\n score_surface = font.render(str(val), False, RED)\n\n window.blit(score_surface, (x_cord, y_cord))\n\n if val == 0: # this section reveals large area\n for neighbor in get_neighbors(cords):\n if neighbor not in revealed and neighbor not in locked:\n try:\n reveal(neighbor)\n except Exception as e:\n print(f\"Exeception: {e}\")\n\n\n# returns a list of all surround boxes\ndef get_neighbors(cords):\n neighbors = []\n row, col = cords\n for i in range(-1, 2):\n for j in range(-1, 2):\n if i == 0 and j == 0:\n continue\n new_row = row + i\n new_col = col + j\n if new_col < 0 or new_col >= len(board_vals[0]): # if not in a valid column\n continue\n if new_row < 0 or new_row >= len(board_vals): # if not in a valid row\n break\n neighbors.append((new_row, new_col))\n return neighbors\n\n\nlocked = [] # list of spots that aren't worth checking anymore\nknown_bombs = [] # list of previously identified bombs\ndef calculate_probs():\n global revealed, board_vals, known_bombs, locked\n # step 1: create random\n bomb_probs = []\n try:\n default_value = bomb_count / (row_count * col_count - len(revealed) - len(known_bombs))\n except ZeroDivisionError: # this occurs when you win\n print(\"over\")\n return\n for row in range(row_count): # creates initial probabilities\n row_probs = []\n for col in range(col_count):\n row_probs.append(default_value)\n bomb_probs.append(row_probs)\n\n # step 1: create locked\n for r, c in locked:\n bomb_probs[r][c] = 2.0\n\n # step 2: establish already found bombs\n for r, c in known_bombs:\n bomb_probs[r][c] = 1.0\n\n # step 3: establish simple probs\n for row, col in revealed:\n val = board_vals[row][col]\n bomb_probs[row][col] = 2.0 # can't be a already revealed number\n bomb_probs = update_probs((row, col), val, bomb_probs, default_value)\n\n # step 4: use known bombs\n is_bomb = True\n while is_bomb:\n is_bomb = False\n for row, r in enumerate(bomb_probs):\n for col, c in enumerate(r):\n if c == 1.0 and (row, col) not in known_bombs:\n is_bomb = True\n known_bombs.append((row, col))\n flag((row, col)) # this is weird late game\n neighbors = get_neighbors((row, col))\n for neighbor in neighbors:\n if neighbor in revealed:\n val = board_vals[neighbor[0]][neighbor[1]]\n bomb_probs = update_probs(neighbor, val, bomb_probs, default_value)\n return bomb_probs\n\n\n# calculates probablities\ndef update_probs(cords, val, bomb_probs, default_val):\n global known_bombs, locked\n neighbors = get_neighbors(cords)\n amount_of_known = 0 # amount of revealed squares\n amount_of_unknown = 0 # amount of uncertain squares\n amount_of_bombs = 0 # amount of already found bombs\n blanks = []\n for neighbor in neighbors: # get a count of each type\n r, c = neighbor\n if neighbor in revealed or bomb_probs[r][c] == 0 or neighbor in locked: # if the neighbor is a known\n amount_of_known += 1\n elif bomb_probs[r][c] == 1.0 or (r, c) in known_bombs: # if there is a bomb there, 100% chance\n amount_of_bombs += 1\n else:\n amount_of_unknown += 1\n blanks.append(neighbor)\n try:\n prob = (val - amount_of_bombs) / amount_of_unknown # formula for probability\n except ZeroDivisionError: # occurs when nothing unknown around it\n locked.append(cords) # this saves this coordinate so it doesn't have to be recalculated every time\n revealed.remove(cords)\n return bomb_probs\n for spot in blanks:\n r, c = spot\n if bomb_probs[r][c] < prob or bomb_probs[r][c] == default_val or prob == 0: # checks if it should override current prob\n bomb_probs[r][c] = prob\n return bomb_probs\n\n\n# AI to solve\ndef solve():\n global revealed, bomb_count, board_vals, row_count, col_count, run\n\n while run: # main loop\n bomb_probs = calculate_probs() # solves for all probabilities\n if bomb_probs is None: # prevents a bug that happens occasionally\n break\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n pygame.time.delay(0) # this can be raised if you want the AI to slow down so people can watch\n\n # finds the lowest value\n spot = None # initializes value\n minimum = 1\n for r, row in enumerate(bomb_probs):\n low = min(row)\n i = row.index(low)\n if low < minimum:\n minimum = low\n spot = r, i\n\n if spot is not None: # prevents possible exception\n reveal(spot) # clicks the lowest spot\n pygame.display.update() # updates the display\n\n\ndef user_input():\n global run\n while run:\n has_clicked = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n clicked = get_square(pygame.mouse.get_pos())\n reveal(clicked)\n has_clicked = True\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n location = pygame.mouse.get_pos()\n square = get_square(location)\n flag(square)\n has_clicked = True\n if keys[pygame.K_RETURN]:\n solve()\n run = False\n if has_clicked:\n pygame.display.update()\n\n\n# main method that takes input and redirects to solve type\ndef main():\n choice = input(\"AI or player? \") # user input\n pygame.init()\n\n if 'ai' in choice.lower():\n solve()\n else:\n user_input()\n\n\nif __name__ == \"__main__\":\n main()\n\n # this is so the app doesn't quit when over\n pygame.display.update()\n while True:\n pygame.time.delay(1000)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.KEYDOWN:\n print(\"Test\")\n main()\n","sub_path":"Minesweeper.py","file_name":"Minesweeper.py","file_ext":"py","file_size_in_byte":10011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"614617141","text":"##!/usr/bin/env python\n## -*- coding: utf-8 -*-\n\ndef printa_liste(words_list, occurrecny_list):\n x = 0\n for parola in words_list:\n print (parola)\n print (occurrency_list [x])\n x += 1\n\n\ni = 0\nwords_list = []\noccurrency_list = []\n\nwhile i < 1:\n i += 1\n articolo = \"contenuto_articolo_\" + str(i) + \".txt\"\n contenuto = open(articolo, 'r')\n for line in contenuto:\n for word in line.split():\n if word in words_list:\n word_index = words_list.index(word)\n occurrency_list[word_index] += 1 \n else:\n words_list.append(word)\n occurrency_list.append(1)\n contenuto.close()\n printa_liste(words_list, occurrency_list);\n\n\n\n\n\n\n\n\n\n\n","sub_path":"save_content_in_list.py","file_name":"save_content_in_list.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"292164402","text":"from django.views.generic import TemplateView\nfrom django.shortcuts import render\n\n\nclass MultiFormView(TemplateView):\n forms = {}\n # Armazena os formulário que usam o usuário logado\n pass_user = []\n\n def get_context_data(self):\n context = super().get_context_data()\n\n for form_name, form in self.forms.items():\n form_kwargs = self.get_form_kwargs(form_name)\n form_kwargs['auto_id'] = form_name + '_%s'\n\n if form_kwargs.get('instance'):\n context[form_name + '_instance'] = form_kwargs.get('instance')\n\n context['form_' + form_name] = form(**form_kwargs)\n\n get_view_data_method = self.get_method('get_view_data')\n\n if get_view_data_method:\n context.update(get_view_data_method())\n\n return context\n\n def get_form_kwargs(self, form_name):\n form_kwargs = {}\n form_base_instance_method = self.get_method('get_instance')\n form_instance_method = self.get_method(\n 'get_{}_instance'.format(form_name))\n form_kwargs_method = self.get_method('get_{}_kwargs'.format(form_name))\n\n if form_base_instance_method:\n form_kwargs['instance'] = form_base_instance_method(form_name)\n if form_instance_method:\n form_kwargs['instance'] = form_instance_method()\n if form_name in self.pass_user:\n form_kwargs['user'] = self.request.user\n if form_kwargs_method:\n form_kwargs.update(form_kwargs_method())\n\n return form_kwargs\n\n def get_current_form(self):\n self.form_name = self.request.POST['form_name']\n return self.forms[self.form_name]\n\n def get_method(self, method_name):\n return getattr(self, method_name, None)\n\n def get(self, request, **kwargs):\n self.kwargs = kwargs\n return render(request, self.template_name, self.get_context_data())\n\n def post(self, request, **kwargs):\n self.kwargs = kwargs\n current_form = self.get_current_form()\n form_kwargs = self.get_form_kwargs(self.form_name)\n form = current_form(request.POST, request.FILES, **form_kwargs)\n\n if form.is_valid():\n form_valid_method = self.get_method(self.form_name + '_form_valid')\n form_base_valid_method = self.get_method('form_valid')\n\n if form_valid_method:\n return form_valid_method(form)\n elif form_base_valid_method:\n return form_base_valid_method(form, self.form_name)\n\n context = self.get_context_data()\n context['form_' + self.form_name] = form\n return render(request, self.template_name, context)\n","sub_path":"exchange_core/base_views.py","file_name":"base_views.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"398854693","text":"import os\nimport sys\nimport copy\nimport argparse\n\nfrom avalon import io\nfrom avalon.tools import publish\n\nimport pyblish.api\nimport pyblish.util\n\nfrom pype.api import Logger\nimport pype\nfrom pype.hosts import celaction\n\nlog = Logger().get_logger(\"Celaction_cli_publisher\")\n\npublish_host = \"celaction\"\n\nPUBLISH_PATH = os.path.join(pype.PLUGINS_DIR, publish_host, \"publish\")\n\nPUBLISH_PATHS = [\n PUBLISH_PATH,\n os.path.join(pype.PLUGINS_DIR, \"ftrack\", \"publish\")\n]\n\n\ndef cli():\n parser = argparse.ArgumentParser(prog=\"celaction_publish\")\n\n parser.add_argument(\"--currentFile\",\n help=\"Pass file to Context as `currentFile`\")\n\n parser.add_argument(\"--chunk\",\n help=(\"Render chanks on farm\"))\n\n parser.add_argument(\"--frameStart\",\n help=(\"Start of frame range\"))\n\n parser.add_argument(\"--frameEnd\",\n help=(\"End of frame range\"))\n\n parser.add_argument(\"--resolutionWidth\",\n help=(\"Width of resolution\"))\n\n parser.add_argument(\"--resolutionHeight\",\n help=(\"Height of resolution\"))\n\n celaction.kwargs = parser.parse_args(sys.argv[1:]).__dict__\n\n\ndef _prepare_publish_environments():\n \"\"\"Prepares environments based on request data.\"\"\"\n env = copy.deepcopy(os.environ)\n\n project_name = os.getenv(\"AVALON_PROJECT\")\n asset_name = os.getenv(\"AVALON_ASSET\")\n\n io.install()\n project_doc = io.find_one({\n \"type\": \"project\"\n })\n av_asset = io.find_one({\n \"type\": \"asset\",\n \"name\": asset_name\n })\n parents = av_asset[\"data\"][\"parents\"]\n hierarchy = \"\"\n if parents:\n hierarchy = \"/\".join(parents)\n\n env[\"AVALON_PROJECT\"] = project_name\n env[\"AVALON_ASSET\"] = asset_name\n env[\"AVALON_TASK\"] = os.getenv(\"AVALON_TASK\")\n env[\"AVALON_WORKDIR\"] = os.getenv(\"AVALON_WORKDIR\")\n env[\"AVALON_HIERARCHY\"] = hierarchy\n env[\"AVALON_PROJECTCODE\"] = project_doc[\"data\"].get(\"code\", \"\")\n env[\"AVALON_APP\"] = f\"hosts.{publish_host}\"\n env[\"AVALON_APP_NAME\"] = \"celaction_local\"\n\n env[\"PYBLISH_HOSTS\"] = publish_host\n\n os.environ.update(env)\n\n\ndef main():\n # prepare all environments\n _prepare_publish_environments()\n\n # Registers pype's Global pyblish plugins\n pype.install()\n\n for path in PUBLISH_PATHS:\n path = os.path.normpath(path)\n\n if not os.path.exists(path):\n continue\n\n log.info(f\"Registering path: {path}\")\n pyblish.api.register_plugin_path(path)\n\n pyblish.api.register_host(publish_host)\n\n # Register project specific plugins\n project_name = os.environ[\"AVALON_PROJECT\"]\n project_plugins_paths = os.getenv(\"PYPE_PROJECT_PLUGINS\", \"\")\n for path in project_plugins_paths.split(os.pathsep):\n plugin_path = os.path.join(path, project_name, \"plugins\")\n if os.path.exists(plugin_path):\n pyblish.api.register_plugin_path(plugin_path)\n\n return publish.show()\n\n\nif __name__ == \"__main__\":\n cli()\n result = main()\n sys.exit(not bool(result))\n","sub_path":"pype/hosts/celaction/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"422585383","text":"#-*- coding:utf8\nimport numpy as np \nimport os\nimport argparse\nfrom my_function import get_model,get_preprocess\nimport cv2\nimport pandas as pd\nfrom keras.preprocessing import image\nfrom PIL import Image as pil_image\n\ndef own_zoom_large(imgs = None, zoom_range = 64, target_size = (448,448)):\n imgs = imgs[0,:,:,:]\n img_size = imgs.shape[0]\n offset = zoom_range #np.random.randint(zoom_range)\n im = imgs[offset:(img_size-offset), offset:(img_size-offset), :]\n im = cv2.resize(im, target_size)\n return np.expand_dims(im, axis=0)\n\ndef own_zoom_little(imgs = None, zoom_range = 64, target_size = (448,448)):\n imgs = imgs[0,:,:,:]\n raw_shape = imgs.shape[0]\n data = np.zeros((raw_shape, raw_shape, 3),dtype=np.uint8)\n random_num = 32 #np.random.randint(zoom_range)\n offset = int(random_num / 2)\n img_tem = imgs\n img_tem = cv2.resize(img_tem,(512-offset*2,512-offset*2))\n data[offset:(512-offset),offset:(512-offset), 0] = img_tem[:,:,0]\n data[offset:(512-offset),offset:(512-offset), 1] = img_tem[:,:,1]\n data[offset:(512-offset),offset:(512-offset), 2] = img_tem[:,:,2]\n im = cv2.resize(data, target_size)\n return np.expand_dims(im, axis=0)\n\n# 粗糙实现,之后根据数据格式再改一改\ndef predict(args):\n model_path = args.model_path\n model_name = args.model_name # 使用模型的名称\n class_name = args.class_name \n class_num = args.class_num\n img_prefix = args.img_prefix\n img_size = args.img_size\n model = get_model(model_name = args.model_name, input_shape = (img_size,img_size,3), class_num = args.class_num)\n preprocess_input = get_preprocess(args)\n model.load_weights(model_path)\n\n fin = pd.read_csv(args.question_file,sep='\\t',header=None)\n fin.columns = ['idx', 'label', 'image']\n Data=fin\n IndexList = Data.index\n\n finout = open(args.result_file,'w')\n finout1 = open(args.result_file+'.1','w')\n finout2 = open(args.result_file+'.2','w')\n finout3 = open(args.result_file+'.3','w')\n finout4 = open(args.result_file+'.4','w')\n finout5 = open(args.result_file+'.5','w')\n\n for pos in IndexList:\n name = Data.at[pos,'image']\n fn = img_prefix + '/' + name\n if(os.path.isfile(fn)==False):\n print('can not find the image from '+fn)\n break\n imgs = image.load_img(fn)\n imgs = image.img_to_array(imgs)\n imgs = np.expand_dims(imgs, axis=0)\n imgs = preprocess_input(imgs)\n imgs_mirror = imgs[:, :, ::-1, :]\n\n imgs_large_1 = own_zoom_large(imgs,zoom_range=16)\n imgs_large_2 = own_zoom_large(imgs,zoom_range=32)\n imgs_large_3 = own_zoom_large(imgs,zoom_range=48)\n imgs_large_4 = own_zoom_large(imgs,zoom_range=64)\n imgs_mirror_large_1 = own_zoom_large(imgs_mirror,zoom_range=16)\n imgs_mirror_large_2 = own_zoom_large(imgs_mirror,zoom_range=32)\n imgs_mirror_large_3 = own_zoom_large(imgs_mirror,zoom_range=48)\n imgs_mirror_large_4 = own_zoom_large(imgs_mirror,zoom_range=64)\n\n imgs = np.expand_dims(cv2.resize(imgs[0,:,:,:], (448,448)) , axis=0)\n imgs_mirror = np.expand_dims(cv2.resize(imgs_mirror[0,:,:,:], (448,448)), axis=0)\n\n result_ori = model.predict([np.concatenate([imgs,imgs_large_1,imgs_large_2,imgs_large_3,imgs_large_4])])\n result_mir = model.predict([np.concatenate([imgs_mirror,imgs_mirror_large_1,imgs_mirror_large_2,imgs_mirror_large_3,imgs_mirror_large_4])])\n\n result1 = np.mean([result_ori[0,:],result_mir[0,:]],0)\n result2 = np.mean([result_ori[1,:],result_mir[1,:]],0)\n result3 = np.mean([result_ori[2,:],result_mir[2,:]],0)\n result4 = np.mean([result_ori[3,:],result_mir[3,:]],0)\n result5 = np.mean([result_ori[4,:],result_mir[4,:]],0)\n\n result_all = (result1 + result2 + result3 + result4 + result5) / 5.\n \n # 输出预测的结果\n result_str = name + ','\n result_str1 = name + ','\n result_str2 = name + ','\n result_str3 = name + ','\n result_str4 = name + ','\n result_str5 = name + ','\n strs = [str('%.7f'%(result_all[j])) for j in range(0, class_num)]\n strs1 = [str('%.7f'%(result1[j])) for j in range(0, class_num)]\n strs2 = [str('%.7f'%(result2[j])) for j in range(0, class_num)]\n strs3 = [str('%.7f'%(result3[j])) for j in range(0, class_num)]\n strs4 = [str('%.7f'%(result4[j])) for j in range(0, class_num)]\n strs5 = [str('%.7f'%(result5[j])) for j in range(0, class_num)]\n for ii in range(0, len(strs)):\n data = strs[ii]\n data1 = strs1[ii]\n data2 = strs2[ii]\n data3 = strs3[ii]\n data4 = strs4[ii]\n data5 = strs5[ii]\n result_str += data[0:8]\n result_str1 += data1[0:8]\n result_str2 += data2[0:8]\n result_str3 += data3[0:8]\n result_str4 += data4[0:8]\n result_str5 += data5[0:8]\n\n if(ii != len(strs)-1):\n result_str += ';'\n result_str1 += ';'\n result_str2 += ';'\n result_str3 += ';'\n result_str4 += ';'\n result_str5 += ';'\n result_str += '\\n'\n result_str1 += '\\n'\n result_str2 += '\\n'\n result_str3 += '\\n'\n result_str4 += '\\n'\n result_str5 += '\\n'\n finout.write(result_str)\n finout1.write(result_str1)\n finout2.write(result_str2)\n finout3.write(result_str3)\n finout4.write(result_str4)\n finout5.write(result_str5)\n finout.close()\n finout1.close()\n finout2.close()\n finout3.close()\n finout4.close()\n finout5.close()\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"command for training\")\n parser.add_argument('--gpus', type=str, default='0', help='the gpus will be used, e.g \"0,1,2,3\"')\n parser.add_argument('--model-name', type=str, default='InceptionResNetV2', help='the name of model')\n parser.add_argument('--class-name', type=str, default='collar_design_labels', help='the name of class')\n parser.add_argument('--class-num', type=int, default=5, help='the class number of your task')\n parser.add_argument('--model-path', type=str, default='/home/leiji/disk1t/zhengjiexin/keras_model/save_model/InceptionResNetV2/sleeve_length_labels/weights.0007.hdf5', help='the path of saving model')\n parser.add_argument('--img-prefix', type=str, default='/home/leiji/disk1t/zhengjiexin/keras_image/4-29/round2_Images_bbox/round2/testA/bbox_new', help='the path of test data')\n parser.add_argument('--question-file', type=str, default='question.csv', help='the path of question file')\n parser.add_argument('--result-file', type=str, default='result.csv', help='the result file')\n parser.add_argument('--img-size', type=int, default=448, help='The shape of image')\n args = parser.parse_args()\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n from cls_model import *\n import keras\n\n predict(args)\n\n\n\n","sub_path":"final/keras_model/code_new/eval_zoom.py","file_name":"eval_zoom.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"398102474","text":"\n# coding: utf-8\n\n# # Parsing HTML with BeautifulSoup\n\n# In[2]:\n\n# import packages\nimport requests\n\n\n# In[3]:\n\n# import packages\nfrom bs4 import BeautifulSoup\n\n\n# In[4]:\n\n# Assign the url\nurl = 'https://www.python.org/~guido/'\n\n\n# In[5]:\n\nr = requests.get(url)\n\n\n# In[6]:\n\nhtml_doc = r.text\n\n\n# In[8]:\n\nsoup = BeautifulSoup(html_doc, 'lxml')\n\n\n# In[12]:\n\npretty_soup = soup.prettify()\n\n\n# In[13]:\n\nprint(pretty_soup)\n\n\n# In[ ]:\n\n\n\n","sub_path":"Parsing HTML with BeautifulSoup.py","file_name":"Parsing HTML with BeautifulSoup.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"532625909","text":"import sys\nimport csv\nimport os\n\nclass CoupleClusterObject:\n def __init__(self, songName1,songName2,algo,embedding,perc):\n self.songName1 = songName1\n self.songName2 = songName2\n self.algo= algo\n self.embedding=embedding\n self.perc= perc\n\n\ndef generateCSV():\n\n with open('./Percentage_1/'+algo+'/'+n_couple_str+'.csv', 'w') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n filewriter.writerow(['N','Song1','Song2','Algo','Embedding','Perc'])\n i=0\n for l in range(0,len(arrayPerc)):\n filewriter.writerow([i,arrayPerc[i].songName1,arrayPerc[i].songName2\n ,arrayPerc[i].algo,arrayPerc[i].embedding,arrayPerc[i].perc])\n i=i+1\n\ndef read_CSV(filename, folderPath, song1, song2):\n print(\"FILENAME\"+filename)\n path=folderPath+filename\n with open(path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n print(row[1], row[2])\n song1CSVf=row[1]\n song2CSVf=row[2]\n if (song1CSVf==song1 and song2CSVf==song2):\n print(\"entra\")\n cnt=int(row[3])\n print(\"CNT:\",cnt)\n perc=((69-cnt)/69)*100\n percDef=100-perc\n return percDef\n\n line_count += 1\n\n print(f'Processed {line_count} lines.')\n\n\nn_couple= sys.argv[2]\nn_couple_str= str(n_couple)\nalgo=sys.argv[1]\nn_couple= int(n_couple)\narrayPerc=[]\n\npath = \"./tableCouple.csv\"\nwith open(path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n #print(row[1], row[2])\n if (line_count==n_couple+1):\n print(\"entra\")\n song1CSV = row[1]\n song2CSV = row[2]\n\n line_count += 1\n\nprint(f'Processed {line_count} lines.')\n\nprint(song1CSV)\nprint(song2CSV)\n\nfolderPath='./Results_1_General/'+algo+'/'\nfor file in os.listdir(folderPath):\n try:\n if file.endswith((\".csv\")):\n print(\"CSV file found:\\t\", file)\n #Per ogni file nella cartella,leggilo\n percDef = read_CSV(file, folderPath, song1CSV, song2CSV)\n print(\"PERCDEF: \",percDef)\n percDef = \"{:.2f}\".format(percDef)\n percDef=float(percDef)\n if (file == 'coupleCnt_eng_50.csv'):\n embedding = 50\n if (file== 'coupleCnt_eng_100.csv'):\n embedding = 100\n if (file== 'coupleCnt_eng_150.csv'):\n embedding = 150\n if (file == 'coupleCnt_eng_200.csv'):\n embedding = 200\n if (file == 'coupleCnt_eng_300.csv'):\n embedding = 300\n arrayPerc.append(CoupleClusterObject(song1CSV,song2CSV,algo,embedding,percDef))\n\n except Exception as e:\n raise e\n print(\"No files found here!\")\n\n\nfor i in range(0,len(arrayPerc)):\n print(arrayPerc[i].perc)\n\ngenerateCSV()","sub_path":"AnalisysScript/plotPerc.py","file_name":"plotPerc.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"203423323","text":"from A5CompleteCode import cookies\nfrom sqlalchemy import create_engine, desc, and_, or_, not_\nfrom sqlalchemy.sql import select, func\nimport pprint\nfrom A5CompleteCode import cookies, orders, users, line_items\nimport sqlalchemy as sqal \n\nengine = create_engine('postgresql+psycopg2://@localhost:' '5432/mydb')\nconnection = engine.connect()\npp = pprint.PrettyPrinter(indent=2)\n'''\ndef get_orders_by_customer(customer_name):\n columns=[orders.c.order_id, users.c.username, users.c.phone, cookies.c.cookie_name,\\\n line_items.c.quantity, line_items.c.extended_cost]\n cust_orders = select(columns)\n cust_orders = cust_orders.select_from(users.join(orders).join(line_items).join(cookies))\n cust_orders = cust_orders.where(users.c.username == customer_name)\n result = connection.execute(cust_orders).fetchall()\n return result\nprint(get_orders_by_customer(\"cakeeater\"))\n'''\n\n\n''' now we re going to add another dimension to the parameters about shipped '''\ndef get_orders_by_customer(customer_name, shipped=None, details=False):\n columns=[orders.c.order_id, users.c.username, users.c.phone, cookies.c.cookie_name,\\\n line_items.c.quantity, line_items.c.extended_cost]\n cust_orders = select(columns)\n cust_orders = cust_orders.select_from(users.join(orders).join(line_items).join(cookies))\n cust_orders = cust_orders.where(users.c.username == customer_name)\n if shipped is not None:\n cust_orders = cust_orders.where(orders.c.shipped == shipped)\n result = connection.execute(cust_orders).fetchall()\n return result\n\n#print(get_orders_by_customer('cakeeater'))\n\n#print(get_orders_by_customer('cakeeater', details=True)) \n\n#print(get_orders_by_customer('cakeeater', shipped=True))\n\nprint(get_orders_by_customer('cakeeater', shipped=\"shipped\")) \n\n#get_orders_by_customer('cakeeater', shipped=False, details=True)\n ","sub_path":"SqlAlchemy/B91ChainingQuery.py","file_name":"B91ChainingQuery.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"76624314","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/icnews/acquire/tests/test_adqnews.py\n# Compiled at: 2008-10-06 10:31:17\nimport unittest\nfrom icnews.acquire.tests.base import ICNewsAcquireTestCase\n\nclass TestAdqnews(ICNewsAcquireTestCase):\n \"\"\"Testing the product setup\"\"\"\n __module__ = __name__\n\n def afterSetUp(self):\n \"\"\"Ran before every unit test\"\"\"\n self.qi = self.portal.portal_quickinstaller\n self.catalog = self.portal.portal_catalog\n self.types = self.portal.portal_types\n\n def test_type_installed(self):\n \"\"\"Test that the Adqnews type is instaled.\"\"\"\n adqnews_fti = getattr(self.types, 'Adqnews')\n self.assertEquals('Adqnews', adqnews_fti.title)\n\n def test_fields(self):\n \"\"\"Test fields\"\"\"\n self.setRoles(('Manager', ))\n self.portal.invokeFactory('Adqnews', 'adqnews1')\n fields = ['title', 'source', 're', 'description', 'encoding', 'store']\n object_fields = self.portal.adqnews1.schema.fields()\n object_fields = [ i.getName() for i in object_fields ]\n for field in fields:\n self.failUnless(field in object_fields)\n\n def test_global_allow(self):\n \"\"\"Test that Adqnews is globally allowed\"\"\"\n adqnews_fti = getattr(self.types, 'Adqnews')\n self.failUnless(adqnews_fti.global_allow)\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestAdqnews))\n return suite","sub_path":"pycfiles/icnews.acquire-0.9_dev_r371-py2.4/test_adqnews.py","file_name":"test_adqnews.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"304808460","text":"import json\n\nimport pymongo\nimport requests\n\nfrom config import *\n\n\ndef getFundsTransferVisaCardDetails(firstName, lastName):\n client = pymongo.MongoClient(mongo_url)\n db = client.main\n users = db.user\n\n specificUser = users.find_one({\"name\": {\"first\": firstName, \"last\": lastName}})\n\n # If user does not exist - send None as response\n if (not specificUser):\n return None\n\n url = base_url + \"paai/fundstransferattinq/v5/cardattributes/fundstransferinquiry\"\n headers = {\"Accept\": \"application/json\"}\n body = {}\n payload = json.loads('''\n {\n \"primaryAccountNumber\": \"''' + specificUser[\"accountNumber\"] + '''\",\n \"retrievalReferenceNumber\": \"330000550000\",\n \"systemsTraceAuditNumber\": \"451006\"\n }\n ''')\n timeout = 10\n\n response = requests.post(url,\n cert=(certificate, privateKey),\n headers=headers,\n auth=(user_id, password),\n # data = body,\n json=payload,\n timeout=timeout)\n\n data = response.json()\n return data\n","sub_path":"FundsTransferInquiry.py","file_name":"FundsTransferInquiry.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"214681527","text":"import random\n\nperson_score_var = random.randint(1,10) + random.randint (1,10)\ncomp_score_var = random.randint(1,10) + random.randint (1,10) \n#dealing two cards 1-10 not using Ace\n \nwhile(1):\n print('Your score is:', person_score_var)\n #display score\n response = input(\"Type 'H' for Hit or 'S' for Stand\\n\")\n #user's input\n if response == \"Hit\" or response == \"hit\" or response ==\"h\" or response == \"H\":\n person_score_var = person_score_var + random.randint(1,10)\n #user adds another card 1-10 \n if response == \"stand\" or response == \"Stand\" or response == \"S\" or response == \"s\":\n break\n if person_score_var > 21:\n print('Your score is:', person_score_var, 'Bust! You lose.')\n print('Dealer score is:', comp_score_var)\n #automatically lose when user goes over 21, displays Dealer's score along with user's\n break\n \nwhile(1):\n if comp_score_var < 16:\n comp_score_var = comp_score_var + random.randint (1,10)\n #dealer gains another card \n elif comp_score_var > 21:\n print('Dealer score is:', comp_score_var, 'Bust! Dealer loses.')\n #dealer busts\n print('Your Score is: ',person_score_var)\n break\n else:\n break\n #two breaks needed because 16-21 \n \nif person_score_var <= 21 and comp_score_var <= 21:\n if person_score_var > comp_score_var:\n print(\"You Win!!!\")\n print(\"Your Score is: \",person_score_var)\n print(\"Dealer Score is: \",comp_score_var)\n elif person_score_var < comp_score_var:\n print(\"Dealer Wins.\")\n print(\"Your Score is: \",person_score_var)\n print(\"Dealer Score is: \",comp_score_var)\n else:\n print(\"Tie\")\n print(\"Your Score is: \",person_score_var)\n print(\"Dealer Score is: \",comp_score_var)\n","sub_path":"Presentation_3/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"625571565","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# Copyright 2017-2018 Martin Olejar\n# Copyright 2019-2020 NXP\n#\n# SPDX-License-Identifier: BSD-3-Clause\n\n\"\"\"Module that allows human-friendly interpretation of HAB logs.\"\"\"\n\nimport struct\n\nfrom .error_codes import HabStatusInfo, HabErrorReason\n\n\n########################################################################################################################\n# i.MX6 HAB Log Parser\n########################################################################################################################\ndef parse_mx6_log(data: bytes) -> str:\n \"\"\"Parses the HAB log data for i.MX6 devices.\n\n :param data: Data retrieved from the device\n :return: String representation of the data\n \"\"\"\n log_single_desc = {\n 0x00010000: \"BOOTMODE - Internal Fuse\",\n 0x00010001: \"BOOTMODE - Serial Bootloader\",\n 0x00010002: \"BOOTMODE - Internal/Override\",\n 0x00010003: \"BOOTMODE - Test Mode\",\n 0x00020000: \"Security Mode - Fab\",\n 0x00020033: \"Security Mode - Return\",\n 0x000200F0: \"Security Mode - Open\",\n 0x000200CC: \"Security Mode - Closed\",\n 0x00030000: \"DIR_BT_DIS = 0\",\n 0x00030001: \"DIR_BT_DIS = 1\",\n 0x00040000: \"BT_FUSE_SEL = 0\",\n 0x00040001: \"BT_FUSE_SEL = 1\",\n 0x00050000: \"Primary Image Selected\",\n 0x00050001: \"Secondary Image Selected\",\n 0x00060000: \"NAND Boot\",\n 0x00060001: \"USDHC Boot\",\n 0x00060002: \"SATA Boot\",\n 0x00060003: \"I2C Boot\",\n 0x00060004: \"ECSPI Boot\",\n 0x00060005: \"NOR Boot\",\n 0x00060006: \"ONENAND Boot\",\n 0x00060007: \"QSPI Boot\",\n 0x00061003: \"Recovery Mode I2C\",\n 0x00061004: \"Recovery Mode ECSPI\",\n 0x00061FFF: \"Recovery Mode NONE\",\n 0x00062001: \"MFG Mode USDHC\",\n 0x00070000: \"Device INIT Call\",\n 0x000700F0: \"Device INIT Pass\",\n 0x00070033: \"Device INIT Fail\",\n 0x000800F0: \"Device READ Data Pass\",\n 0x00080033: \"Device READ Data Fail\",\n 0x000A00F0: \"Plugin Image Pass\",\n 0x000A0033: \"Plugin Image Fail\",\n 0x000C0000: \"Serial Downloader Entry\",\n 0x000E0000: \"ROMCP Patch\"\n }\n\n log_double_desc = {\n 0x00080000: \"Device READ Data Call\",\n 0x00090000: \"HAB Authentication Status Code:\",\n 0x000A0000: \"Plugin Image Call\",\n 0x000B0000: \"Program Image Call\",\n 0x000D0000: \"Serial Downloader Call\"\n }\n\n ret_msg = ''\n log_loop = 0\n while log_loop < 64:\n log_value = struct.unpack_from('I', data, log_loop * 4)[0]\n\n if log_value == 0x0:\n break\n\n if log_value in log_single_desc:\n ret_msg += \" %02d. (0x%08X) -> %s\\n\" % (log_loop, log_value, log_single_desc[log_value])\n # TODO remove unused code> if log_value & 0xffff0000 == 0x00060000: boot_type = log_value & 0xff\n elif log_value in log_double_desc:\n ret_msg += \" %02d. (0x%08X) -> %s\\n\" % (log_loop, log_value, log_double_desc[log_value])\n log_loop += 1\n log_data = struct.unpack_from('I', data, log_loop * 4)[0]\n if log_value == 0x00090000:\n ret_msg += \" %02d. (0x%08X) -> HAB Status Code: 0x%02X %s\\n\" % \\\n (log_loop, log_data, log_data & 0xff, HabStatusInfo.desc(log_data & 0xff))\n ret_msg += \" HAB Reason Code: 0x%02X %s\\n\" % \\\n ((log_data >> 8) & 0xff, HabErrorReason.desc((log_data >> 8) & 0xff))\n else:\n ret_msg += \" %02d. (0x%08X) -> Address: 0x%08X\\n\" % (log_loop, log_data, log_data)\n else:\n ret_msg += \" Log Buffer Code not found\\n\"\n\n log_loop += 1\n\n return ret_msg\n\n\n########################################################################################################################\n# i.MX7 HAB Log Parser\n########################################################################################################################\ndef parse_mx7_log(data: bytes) -> str:\n \"\"\"Parses the HAB log data for i.MX7 devices.\n\n :param data: Data retrieved from the device\n :return: String representation of the data\n \"\"\"\n log_all_desc = {\n 0x10: \"BOOTMODE - Internal Fuse\",\n 0x11: \"BOOTMODE - Serial Bootloader \",\n 0x12: \"BOOTMODE - Internal/Override \",\n 0x13: \"BOOTMODE - Test Mode \",\n 0x20: \"Security Mode - Fab \",\n 0x21: \"Security Mode - Return \",\n 0x22: \"Security Mode - Open \",\n 0x23: \"Security Mode - Closed \",\n 0x30: \"DIR_BT_DIS = 0 \",\n 0x31: \"DIR_BT_DIS = 1 \",\n 0x40: \"BT_FUSE_SEL = 0 \",\n 0x41: \"BT_FUSE_SEL = 1 \",\n 0x50: \"Primary Image Selected \",\n 0x51: \"Secondary Image Selected \",\n 0x60: \"NAND Boot \",\n 0x61: \"USDHC Boot \",\n 0x62: \"SATA Boot \",\n 0x63: \"I2C Boot \",\n 0x64: \"ECSPI Boot \",\n 0x65: \"NOR Boot \",\n 0x66: \"ONENAND Boot \",\n 0x67: \"QSPI Boot \",\n 0x70: \"Recovery Mode I2C \",\n 0x71: \"Recovery Mode ECSPI \",\n 0x72: \"Recovery Mode NONE \",\n 0x73: \"MFG Mode USDHC \",\n 0xB1: \"Plugin Image Pass \",\n 0xBF: \"Plugin Image Fail \",\n 0xD0: \"Serial Downloader Entry \",\n 0xE0: \"ROMCP Patch \",\n 0x80: \"Device INIT Call \",\n 0x81: \"Device INIT Pass \",\n 0x91: \"Device READ Data Pass \",\n 0xA0: \"HAB Authentication Status Code: \",\n 0x90: \"Device READ Data Call \",\n 0xB0: \"Plugin Image Call \",\n 0xC0: \"Program Image Call \",\n 0xD1: \"Serial Downloader Call \",\n 0x8F: \"Device INIT Fail \",\n 0x9F: \"Device READ Data Fail \"\n }\n\n log_error_desc = {\n 0x8F: \"Device INIT Fail \",\n 0x9F: \"Device READ Data Fail \",\n 0xBF: \"Plugin Image Fail \"\n }\n\n log_tick_desc = {\n 0x80: \"Device INIT Call \",\n 0x81: \"Device INIT Pass \",\n 0x8F: \"Device INIT Fail \",\n 0x91: \"Device READ Data Pass \",\n 0x9F: \"Device READ Data Fail \",\n 0xB0: \"Plugin Image Call \",\n 0xC0: \"Program Image Call \"\n }\n\n log_address_desc = {\n 0x90: \"Device READ Data Call \",\n 0xB0: \"Plugin Image Call \",\n 0xC0: \"Program Image Call \",\n 0xD1: \"Serial Downloader Call \"\n }\n\n log_hab_desc = {\n 0xA0: \"HAB Authentication Status Code \"\n }\n\n ret_msg = ''\n log_loop = 0\n while log_loop < 64:\n log_value_full = struct.unpack_from('I', data, log_loop * 4)[0]\n log_value = (log_value_full >> 24) & 0xff\n\n if log_value == 0x0:\n break\n\n if log_value in log_all_desc:\n ret_msg += \" %02d. (0x%08X) -> %s\\n\" % (log_loop, log_value_full, log_all_desc[log_value])\n else:\n ret_msg += \" %02d. Log Buffer Code not found\\n\"\n if log_value in log_address_desc:\n log_loop += 1\n log_data = struct.unpack_from('I', data, log_loop * 4)[0]\n ret_msg += \" %02d. (0x%08X) -> Address: 0x%08X\\n\" % (log_loop, log_data, log_data)\n if log_value in log_hab_desc:\n log_loop += 1\n log_data = struct.unpack_from('I', data, log_loop * 4)[0]\n ret_msg += \" %02d. (0x%08X) -> HAB Status Code: 0x%02X %s\\n\" % \\\n (log_loop, log_data, log_data & 0xff, HabStatusInfo.desc(log_data & 0xff))\n ret_msg += \" HAB Reason Code: 0x%02X %s\\n\" % \\\n ((log_data >> 8) & 0xff, HabErrorReason.desc((log_data >> 8) & 0xff))\n if log_value in log_error_desc:\n ret_msg += \" Error Code: 0x%06X\\n\" % (log_value_full & 0xffffff)\n if log_value in log_tick_desc:\n log_loop += 1\n log_data = struct.unpack_from('I', data, log_loop * 4)[0]\n ret_msg += \" %02d. (0x%08X) -> Tick: 0x%08X\\n\" % (log_loop, log_data, log_data)\n\n log_loop = log_loop + 1\n\n return ret_msg\n\n\n########################################################################################################################\n# i.MXRT HAB Log Parser\n########################################################################################################################\ndef parse_mxrt_log(_data: bytes) -> str:\n \"\"\"Parses the HAB log data for i.MX RT devices.\n\n Function is not implemented yet.\n\n :param _data: Data retrieved from the device\n :return: String representation of the data\n \"\"\"\n raise NotImplementedError()\n","sub_path":"spsdk/sdp/hab_logs.py","file_name":"hab_logs.py","file_ext":"py","file_size_in_byte":8517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"110149343","text":"class Person:\n\tdef __init__(self, firstName, lastName, idNumber):\n\t\tself.firstName = firstName\n\t\tself.lastName = lastName\n\t\tself.idNumber = idNumber\n\tdef printPerson(self):\n\t\tprint(\"Name:\", self.lastName + \",\", self.firstName)\n\t\tprint(\"ID:\", self.idNumber)\n\nclass Student(Person):\n def __init__(self,firstName, lastName , idNumber , scores):\n self.firstName=firstName\n self.lastName= lastName\n self.idNumber=idNumber\n self.scores=scores\n def calculate(self):\n self.total=sum(scores)\n self.avg= self.total/len(scores)\n if 90<=self.avg<=100:\n return 'O'\n elif 80<=self.avg<90:\n return 'E'\n elif 70<=self.avg<80:\n return 'A'\n elif 55<=self.avg<70:\n return 'P'\n elif 40<=self.avg<55:\n return 'D'\n elif self.avg<40:\n return 'T'\n\n\n\nline = input().split()\nfirstName = line[0]\nlastName = line[1]\nidNum = line[2]\nnumScores = int(input()) # not needed for Python\nscores = list( map(int, input().split()) )\ns = Student(firstName, lastName, idNum, scores)\ns.printPerson()\nprint(\"Grade:\", s.calculate())\n","sub_path":"inheritance.py","file_name":"inheritance.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"278152721","text":"# -*- coding: utf-8 -*-\nimport pygame, sys, random, math\nfrom pygame.locals import *\nimport os\nimport pygame_textinput\nimport pygame.gfxdraw\n\npos_x = 100\npos_y = 100\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (pos_x,pos_y) #设置窗口起始位置\n\npygame.init()\n\nFPS = 50\nfpsClock = pygame.time.Clock()\n#设置颜色\nBLACK = (0, 0, 0)\nBLUE = (0, 0, 255)\nNAVYBLUE = ( 60, 60, 100)\n\nYELLOW_D = (120, 120,0)\nWHITE_D = (128, 128, 128)\nRED_D = (120, 0, 0)\nGREEN_D = (0, 120, 0)\nYELLOW = (255, 255,0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 200, 0)\nMAROON = (200,0,200)\n\nblockWidth =100 \n# g_ClickCount = -1\ng_TotalSecond = 0\ng_TotalPieNums = 4\n\ng_USERNAME = \"psy\"\n\n\ntextinput = pygame_textinput.TextInput(initial_string=g_USERNAME, font_size=24, text_color=WHITE, font_family=\"simhei\")\n\ncurSurface = pygame.display.set_mode((630, 560), 0, 32)\npygame.display.set_caption(\"汉诺塔游戏\")\n\n# fontObj = pygame.font.Font('simsunb.ttf', 32)\nnumberFontObj = pygame.font.SysFont(\"simhei\", 36)\nresultFontObj = pygame.font.SysFont(\"simhei\", 22)\n\nfontObj = pygame.font.SysFont(\"simhei\", 50)\nheadTextObj = fontObj.render(\"汉诺塔游戏\", True, WHITE, NAVYBLUE)\nheadRectObj = headTextObj.get_rect()\nheadRectObj.center = (300, 50)\n\ninfoFontObj = pygame.font.SysFont(\"simhei\", 16)\n# infoTextObj = infoFontObj.render(\"成绩\", True, YELLOW, NAVYBLUE)\n# infoRectObj = infoTextObj.get_rect()\n# infoRectObj.center = (580, 120)\n# infoTextObj2 = infoFontObj.render(\"0步\", True, YELLOW, NAVYBLUE)\n# infoRectObj2 = infoTextObj2.get_rect()\n# infoRectObj2.center = (580, 150)\n\ninfoAreaTop = 100\ninfoAreaLeft = 530\ninfoTextObj3 = infoFontObj.render(\"计 时\", True, YELLOW, NAVYBLUE)\ninfoRectObj3 = infoTextObj3.get_rect()\ninfoRectObj3.top = infoAreaTop + 10\ninfoRectObj3.left = infoAreaLeft\n\ninfoTextObj4 = infoFontObj.render(\"0秒\", True, YELLOW, NAVYBLUE)\ninfoRectObj4 = infoTextObj4.get_rect()\ninfoRectObj4.top = infoRectObj3.bottom + 10\ninfoRectObj4.left = infoAreaLeft\n# infoRectObj4.center = (580, 230)\n\ninfoHelpText = infoFontObj.render(\"按回车开始\", True, YELLOW, NAVYBLUE)\ninfoHelpRect = infoHelpText.get_rect()\ninfoHelpRect.top = infoRectObj4.bottom + 20\ninfoHelpRect.left = infoAreaLeft\n# infoHelpRect.center = (575, 270)\n\nnameText = infoFontObj.render(\"玩家:\", True, WHITE, NAVYBLUE)\nnameRect = nameText.get_rect()\nnameRect.top = infoHelpRect.bottom + 30\nnameRect.left = infoAreaLeft\n# nameRect.center = (570, 320)\n\nrankTmpText = infoFontObj.render(\"排名\", True, YELLOW, NAVYBLUE)\nrankTmpRect = rankTmpText.get_rect()\nrankTmpRect.left = infoAreaLeft\nrankTmpRect.top = nameRect.bottom + 80\n\nLevel3Text = infoFontObj.render(\"三阶\", True, WHITE, NAVYBLUE)\nLevel3Rect = Level3Text.get_rect()\nLevel3Rect.top = 440\nLevel3Rect.left = infoAreaLeft\n\nLevel4Text = infoFontObj.render(\"四阶\", True, WHITE, NAVYBLUE)\nLevel4Rect = Level4Text.get_rect()\nLevel4Rect.top = Level3Rect.bottom+10\nLevel4Rect.left = infoAreaLeft\n\nLevel5Text = infoFontObj.render(\"五阶\", True, WHITE, NAVYBLUE)\nLevel5Rect = Level5Text.get_rect()\nLevel5Rect.top = Level4Rect.bottom+10\nLevel5Rect.left = infoAreaLeft\n\nlstRankInfo = []\nlstRankNameAndTime = {'3':[], '4':[], '5':[]}\ndef getRankInfo(flag = \"read\"):\n global lstRankInfo, lstRankNameAndTime\n if flag == 'read':\n lstRankNameAndTime = {'3':[], '4':[], '5':[]}\n if os.path.exists(r'rankHannuota.dat'):\n with open(r'rankHannuota.dat', encoding='utf-8' ) as f:\n icount = 0\n \n for irank in f.readlines():\n \n irank = irank.strip()\n if len(irank) == 0:\n break\n tmplevel,tmpsecond, tmpname = irank.split(',')\n # print(tmplevel, type(g_TotalPieNums))\n lstRankNameAndTime[tmplevel].append([int(tmpsecond), tmpname])\n \n lstRankInfo = []\n for ikey in lstRankNameAndTime:\n if int(ikey) == g_TotalPieNums:\n lstRankNameAndTime[ikey].sort()\n for iitem in lstRankNameAndTime[ikey][:3]:\n icount += 1\n rankTmpText = infoFontObj.render(iitem[1]+\":\"+str(iitem[0])+\"秒\", True, YELLOW, NAVYBLUE)\n rankTmpRect = rankTmpText.get_rect()\n rankTmpRect.left = infoAreaLeft\n rankTmpRect.top = nameRect.bottom + 80 +icount*25\n # rankTmpRect.center = (570, 400+icount*30)\n lstRankInfo.append([rankTmpText, rankTmpRect])\n # print(lstRankNameAndTime)\n \n elif flag==\"write\":\n lstRankNameAndTime[str(g_TotalPieNums)].append([g_TotalSecond-1, g_USERNAME])\n lstRankNameAndTime[str(g_TotalPieNums)].sort()\n # print(lstRankNameAndTime,'write')\n rankInfoStr = \"\"\n for ikey in lstRankNameAndTime:\n for item in lstRankNameAndTime[ikey]:\n rankInfoStr += ikey + \",\" + str(item[0]) + \",\" + item[1] + '\\n'\n with open(r'rankHannuota.dat', 'w', encoding='utf-8' ) as f:\n f.write(rankInfoStr)\n\n getRankInfo()\n\n\ndef genRectPos():\n # global g_NumberRect\n dipanWidth = 120\n dipanHeight = 40\n dipanLeft = 40\n dipanTop = 430\n\n zhuziWidth = 5\n zhuziHeight = 220\n # zhuziLeft = dipanLeft +(dipanWidth - zhuziWidth)//2\n zhuziTop = dipanTop - zhuziHeight\n\n lap = 10\n pieWidth = dipanWidth - lap*2\n pieHeight = 20\n\n left = 30\n top = 90\n width = 480\n height = 430\n rect = pygame.Rect(left, top, width, height)\n for i in range(3):\n tmprect = pygame.Rect(left + width//3*i, top, width//3, height)\n g_BGRECT.append(tmprect)\n g_BGRECT.append(rect)\n\n for i in range(3):\n tmpleft = dipanLeft+i*(dipanWidth+50)\n tmp_Dipan = pygame.Rect(tmpleft, dipanTop, dipanWidth, dipanHeight)\n g_PieRect.append(tmp_Dipan)\n\n zhuziLeft = tmpleft +(dipanWidth - zhuziWidth)//2\n tmp_Dipan = pygame.Rect(zhuziLeft, zhuziTop, zhuziWidth, zhuziHeight)\n g_PieRect.append(tmp_Dipan)\n\n A_PieRect.append(g_PieRect[0])\n B_PieRect.append(g_PieRect[2])\n C_PieRect.append(g_PieRect[4])\n\n for i in range(g_TotalPieNums):\n tmpleft = dipanLeft + (i+1)*lap\n pieWidth = dipanWidth - lap*2*(i+1)\n tmptop = dipanTop - (i+1)*(pieHeight+2)\n tmp_Dipan = pygame.Rect(tmpleft, tmptop, pieWidth, pieHeight)\n A_PieRect.append(tmp_Dipan)\n \n#绘制背景\ndef drawBackGround():\n # print(nameRect)\n curSurface.fill(NAVYBLUE)\n pygame.draw.rect(curSurface, YELLOW, g_BGRECT[-1], 2)\n\n for i in range(3):\n if g_ClickFlag[i] == 1:\n pygame.draw.rect(curSurface, RED, g_BGRECT[i], 2) #第一根柱子背景\n else:\n pygame.draw.rect(curSurface, YELLOW, g_BGRECT[i], 2) #第一根柱子背景\n\n # lstColor = [YELLOW, RED, WHITE, MAROON]\n for i in range(len(g_PieRect)):\n pygame.draw.rect(curSurface, GREEN, g_PieRect[i])\n\n for item in A_PieRect[1:]:\n pygame.draw.rect(curSurface, WHITE_D, item)\n for item in B_PieRect[1:]:\n pygame.draw.rect(curSurface, WHITE_D, item)\n for item in C_PieRect[1:]:\n pygame.draw.rect(curSurface, WHITE_D, item)\n \n curSurface.blit(headTextObj, headRectObj)\n curSurface.blit(infoTextObj3, infoRectObj3)\n curSurface.blit(infoTextObj4, infoRectObj4)\n curSurface.blit(infoHelpText, infoHelpRect)\n curSurface.blit(nameText, nameRect)\n curSurface.blit(rankTmpText, rankTmpRect)\n curSurface.blit(Level3Text, Level3Rect)\n curSurface.blit(Level4Text, Level4Rect)\n curSurface.blit(Level5Text, Level5Rect)\n\n curSurface.blit(textinput.get_surface(), (infoAreaLeft, nameRect.bottom+10))\n for irankinfo in lstRankInfo:\n curSurface.blit(irankinfo[0], irankinfo[1])\n\n\ndef calcClickFlag(pos):\n global g_TotalPieNums, g_FirstClick, g_SecondClick, g_ClickFlag, g_GAMEOVER\n if g_GAMEOVER:\n if Level3Rect.collidepoint(pos):\n g_TotalPieNums = 3\n StartGameSet()\n elif Level4Rect.collidepoint(pos):\n g_TotalPieNums = 4\n StartGameSet()\n elif Level5Rect.collidepoint(pos):\n g_TotalPieNums = 5\n StartGameSet()\n return\n # 当柱子所在的背景被点击的时候,改变相应的标记\n tmpPie = [A_PieRect, B_PieRect, C_PieRect]\n for i in range(3):\n # print(i, '---', g_ClickFlag.count(1), len(tmpPie[i]))\n if g_BGRECT[i].collidepoint(pos):\n if g_ClickFlag.count(1) == 0 and len(tmpPie[i])==1: #第一次点击时,底盘上没有饼\n break\n if g_ClickFlag[i] == 1:\n if i == g_FirstClick and g_ClickFlag.count(1)==1:#当只有1个被选中时,才能取消第1次点击的\n g_FirstClick = -1\n g_ClickFlag[i] = 0\n elif i == g_SecondClick:\n g_SecondClick = -1\n g_ClickFlag[i] = 0\n else:\n if g_ClickFlag.count(1) < 2: #如果连续点击三根柱子,则第三根不记录\n g_ClickFlag[i] = 1\n if g_FirstClick == -1:\n g_FirstClick = i\n elif g_SecondClick == -1:\n g_SecondClick = i\n\n # print(g_FirstClick, g_SecondClick, g_ClickFlag)\n MovePie()\n isGameOver()\n # print(\"===\", g_FirstClick, g_SecondClick, g_ClickFlag)\n # g_ClickFlag = [0,0,0]\n # g_FirstClick, g_SecondClick = -1, -1\ng_sn_arr = []\ndef hannuota_genarr(arr, flagarr):\n global g_sn_arr\n [n1, n2, n3]=arr\n [a,b,c] = flagarr\n if n1==0 and n2==0:\n # print('over')\n return\n else:\n hannuota_genarr([n1-1, n2, n3], [a, c, b])\n # print([a,c])\n g_sn_arr.append([a,c])\n hannuota_genarr([n1-1, n2, n3], [b, a, c])\n\ndef autoMove():\n import time\n global g_sn_arr, g_FirstClick, g_SecondClick\n g_sn_arr = []\n hannuota_genarr([g_TotalPieNums,0,0], [0,1,2])\n # print(g_sn_arr)\n for item in g_sn_arr:\n g_FirstClick = item[0]\n g_SecondClick = item[1]\n MovePie()\n # pygame.time.wait(200)\n\n g_FirstClick, g_SecondClick = -1, -1\n\ndef MovePieAnimation(firstPie, left, top):\n if firstPie.left < left:\n firstPie.left += 1\n else:\n firstPie.left -= 1\n\n if firstPie.top < top:\n firstPie.top += 1\n else:\n firstPie.top -= 1\n\n\ndef MovePie():\n global g_FirstClick, g_SecondClick, g_ClickFlag, g_firstPie, g_basePie, g_animation\n \n if g_FirstClick == -1 or g_SecondClick == -1:\n return\n \n # 每一个柱子数组都是从大到小排列\n tmpPie = [A_PieRect, B_PieRect, C_PieRect]\n \n firstPie = tmpPie[g_FirstClick][-1] #移动的饼\n secondPie = tmpPie[g_SecondClick][-1] #要移动到其上的饼(含底盘)\n\n if firstPie.width > secondPie.width: #大盘不能放在小盘上\n g_ClickFlag = [0,0,0]\n g_FirstClick, g_SecondClick = -1, -1\n return \n \n # print(firstPie)\n #--------------------------------------\n firstPie.left = secondPie.left + (secondPie.width-firstPie.width)//2\n firstPie.top = secondPie.top - firstPie.height-2\n\n tmpPie[g_FirstClick].remove(firstPie)\n tmpPie[g_SecondClick].append(firstPie)\n \n g_ClickFlag = [0,0,0]\n g_FirstClick, g_SecondClick = -1, -1\n #---------------------------------------\n g_firstPie = firstPie\n g_basePie = secondPie\n # g_animation = True\n\n\n \n \ng_firstPie = ''\ng_basePie = ''\ng_animation = False\n\ng_GAMEOVER = True\ng_PieRect = []\nA_PieRect = []\nB_PieRect = []\nC_PieRect = []\ng_BGRECT = []\ng_ClickFlag = [0,0,0]\ng_FirstClick = -1\ng_SecondClick = -1\n\ndef isGameOver():\n global g_GAMEOVER\n if len(C_PieRect) == g_TotalPieNums+1:\n g_GAMEOVER = True\n\ndef StartGameSet(flag_qishi=0): \n global g_TotalSecond, g_GAMEOVER, g_FirstClick, g_SecondClick, g_ClickFlag, g_PieRect, A_PieRect, B_PieRect, C_PieRect, g_BGRECT\n g_PieRect = []\n A_PieRect = []\n B_PieRect = []\n C_PieRect = []\n g_BGRECT = []\n g_ClickFlag = [0,0,0]\n g_FirstClick = -1\n g_SecondClick = -1\n if flag_qishi == 0:\n g_GAMEOVER = True\n pygame.time.set_timer(COUNTTIMER, 0)\n else:\n g_GAMEOVER = False\n pygame.time.set_timer(COUNTTIMER, 1000)\n\n g_TotalSecond = 0\n genRectPos() \n getRankInfo()\n \n\nCOUNTTIMER = pygame.USEREVENT\npygame.time.set_timer(COUNTTIMER, 0)\nStartGameSet()\n\n# genRectPos()\n# getRankInfo()\n\nwhile True:\n drawBackGround()\n # if g_startMoveFlag:\n # MoveTileToBlank()\n if g_animation:\n MovePieAnimation()\n \n events = pygame.event.get()\n for event in events:\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEBUTTONUP:\n calcClickFlag(event.pos)\n # numberClick(event.pos)\n\n elif event.type == COUNTTIMER:\n # drawBackGround(1)\n # pygame.time.set_timer(COUNTTIMER, 0) \n\n infoTextObj4 = infoFontObj.render(str(g_TotalSecond)+\"秒\", True, YELLOW, NAVYBLUE)\n g_TotalSecond += 1\n infoRectObj4 = infoTextObj4.get_rect()\n infoRectObj4.top = infoRectObj3.bottom + 10\n infoRectObj4.left = infoAreaLeft\n \n if g_GAMEOVER and g_TotalSecond>2:\n pygame.time.set_timer(COUNTTIMER, 0) #全部选完关闭计时\n getRankInfo('write')\n\n\n elif event.type == KEYUP:\n print(event.key, chr(event.key)=='↑', pygame.key.get_mods())\n \n if event.key == 13: #重新开始\n StartGameSet(1)\n elif event.key == 32:\n autoMove()\n # pass\n \n # # textinput.update(events)\n if textinput.update(events):\n g_USERNAME = textinput.get_text()\n print(textinput.get_text())\n\n \n pygame.display.update()\n fpsClock.tick(FPS)","sub_path":"ex12_hannuota.py","file_name":"ex12_hannuota.py","file_ext":"py","file_size_in_byte":14145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"417278046","text":"ENCODER_MAX_TIME_STEP = 246\nDECODER_MAX_TIME_STEP = 13\n\nDICT_SIZE = 2391\nEMBED_DIM = 128\n\nBATCH_SIZE = 512\nRNN_SIZE = 256\nRNN_LAYER = 2\nLEARNING_RATE = 1e-3\nKEEP_PROB = 0.75\n\nEPOCH = 75\nVALID_MODEL = False\nLOAD_MODEL = False\n\nDOWN_SAMPLE = True\nADD_NOISE = True","sub_path":"final/src/model2/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"111380532","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 8 10:40:23 2020\n查询St_code条件下所有交易信息\n@author: Borisli\n\"\"\"\n#查询库\nfrom pymongo import MongoClient\nclient = MongoClient('mongodb://112.12.60.2:27017')\nST_CODE='688599.SH'\nmydb=client[\"ptest\"]\nmycollection=mydb[\"dailytest\"]\nrs_stcode = mycollection.find({'ts_code':ST_CODE})\nprint (rs_stcode.count())\nfor i in rs_stcode:\n print (i)","sub_path":"strategies/mongodb_findbystcode.py","file_name":"mongodb_findbystcode.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"437974847","text":"#coding=utf-8\nfrom flask import Flask, jsonify, request, render_template,session\nimport pymongo\nimport pandas as pd\nimport datetime \nimport time\nimport numpy as np\n\n# Model\nfrom data_model.manager import *\nfrom data_model.channel import *\nfrom data_model.webhook import *\nfrom data_model.user import *\nfrom data_model.tags import *\n\n\n# line bot 相關元件\nfrom linebot import LineBotApi\nfrom linebot.models import *\nfrom linebot.exceptions import LineBotApiError\n\nclass User:\n def __init__(self):\n self.client = pymongo.MongoClient(\"mongodb://james:wolf0719@cluster0-shard-00-01-oiynz.azure.mongodb.net:27017/?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true&w=majority\")\n self.col_user = self.client.ufs.users\n self.col_point_logs = self.client.ufs.point_logs\n self.col_user_log = self.client.ufs.user_log\n\n # 取得單一帳號資料\n def get_once(self,user_id,channel_id):\n find = {\n \"user_id\": user_id,\n \"channel_id\": channel_id\n }\n userdata = self.col_user.find_one(find)\n del userdata[\"_id\"]\n return userdata\n #確認帳號存在\n def chk_once(self, user_id, channel_id):\n find = {\n \"user_id\": user_id,\n \"channel_id\": channel_id\n }\n cursor = self.col_user.find(find) \n if(cursor.count() == 0):\n return False\n else:\n return True\n # 新增使用者\n def add_once(self,user_id,channel_id):\n jsondata = {\n \"user_id\":user_id,\n \"channel_id\":channel_id,\n \"point\":0,\n \"created_datetime\":datetime.datetime.now(),\n \"last_datetime\":datetime.datetime.now()\n }\n channel = Channel()\n channel_info = channel.get_channel(channel_id)\n channel_access_token = channel_info['channel_access_token']\n line_bot_api = LineBotApi(channel_access_token)\n profile = line_bot_api.get_profile(user_id)\n jsondata['name'] = profile.display_name\n jsondata['avator'] = profile.picture_url\n jsondata['status_message'] = profile.status_message\n \n self.col_user.insert_one(jsondata)\n\n # 新增LOG\n User().set_user_log(user_id,channel_id,\"新增帳號\")\n\n return True\n\n \n def update_user_main(self,user_id,channel_id,data):\n find = {\n \"user_id\":user_id,\n \"channel_id\":channel_id,\n \n }\n data[\"last_datetime\"] =datetime.datetime.now()\n self.col_user.update_one(find,{\"$set\":data})\n return True\n # 設定使用者參數\n def set_user_tag(self,user_id,channel_id,tag):\n find = {\n \"user_id\":user_id,\n \"channel_id\":channel_id\n }\n tag = {\n \"tag\":tag,\n \"date\":datetime.datetime.now()\n }\n self.col_user.update_one(find,{\"$push\":{\"tags\":tag}})\n # 更新最後操作時間和 log\n data = {}\n data[\"last_datetime\"] =datetime.datetime.now()\n self.col_user.update_one(find,{\"$set\":data})\n User().set_user_log(user_id,channel_id,\"設定 Tag:{}\".format(tag))\n\n # 設定 tag\n tags = Tags()\n # 如果是在追蹤清單中\n if tags.chk_once(channel_id,tag) == True:\n tag_limit = tags.chk_limit(channel_id,user_id,tag)\n # 如果額度還夠\n if tag_limit == True:\n # 執行動作\n tags.do_tag_act(channel_id, user_id,tag)\n tags.set_tag_log(channel_id, user_id,tag)\n\n return True\n # 取得使用者有使用到的 TAG\n def get_user_tags(self,user_id,channel_id):\n find = {\n \"user_id\":user_id,\n \"channel_id\":channel_id\n }\n user_data = self.col_user.find_one(find)\n res = []\n if \"tags\" in user_data:\n for t in user_data[\"tags\"]:\n if t['tag'] not in res:\n res.append(t['tag'])\n return res\n \n # 取得所有人\n def get_all_users(self,channel_id):\n find = {\n \"channel_id\":channel_id\n }\n datalist = []\n for d in self.col_user.find(find):\n del d[\"_id\"]\n datalist.append(d)\n return list(datalist)\n\n\n #============================================================================\n #\n # \n # 點數控制\n #\n # \n # =================================================================\n\n # 新增點數\n def add_point(self,user_id,channel_id,point,point_note):\n user_data = User.get_once(self,user_id,channel_id)\n # print(user_data)\n old_point = 0\n if 'point' in user_data:\n old_point = user_data['point']\n new_point = int(old_point) + int(point)\n # 建立 log\n log_data = {\n \"user_id\":user_id,\n \"channel_id\":channel_id,\n 'original':old_point,\n \"point\":point,\n \"act\":\"add\",\n \"update_datetime\":datetime.datetime.now(),\n \"balance_point\":new_point,\n \"point_note\":point_note\n }\n self.col_point_logs.insert_one(log_data)\n # 回寫主表\n find = {\n \"user_id\":user_id,\n \"channel_id\":channel_id\n }\n self.col_user.update_one(find,{\"$set\":{\"point\":new_point}})\n\n # 更新最後操作時間和 log\n data = {}\n data[\"last_datetime\"] =datetime.datetime.now()\n self.col_user.update_one(find,{\"$set\":data})\n log = \"新增點數({0}):{1}\".format(point_note,point)\n User().set_user_log(user_id,channel_id,log)\n return new_point\n\n\n # 扣除點數\n def deduct_point(self,user_id,channel_id,point,point_note):\n user_data = User.get_once(self,user_id,channel_id)\n old_point = user_data['point']\n new_point = old_point - point\n # 建立 log\n log_data = {\n \"user_id\":user_id,\n \"channel_id\":channel_id,\n 'original':old_point,\n \"point\":point,\n \"act\":\"deduct\",\n \"update_datetime\":datetime.datetime.now(),\n \"balance_point\":new_point,\n \"point_note\":point_note\n }\n self.col_point_logs.insert_one(log_data)\n # 回寫主表\n find = {\n \"user_id\":user_id,\n \"channel_id\":channel_id\n }\n self.col_user.update_one(find,{\"$set\":{\"point\":new_point}})\n\n # 更新最後操作時間和 log\n data = {}\n data[\"last_datetime\"] =datetime.datetime.now()\n log = \"扣除點數({0}):{1}\".format(point_note,point)\n User().set_user_log(user_id,channel_id,log)\n return new_point\n\n # 取得交易紀錄\n def get_point_logs(self,user_id,channel_id):\n find = {\n \"user_id\":user_id,\n \"channel_id\":channel_id\n }\n logs_data = self.col_point_logs.find(find).sort(\"update_datetime\",-1)\n datalist = []\n for row in logs_data:\n del row[\"_id\"]\n datalist.append(row)\n \n return list(datalist)\n # 取得累績總點數\n def lifetime_record(self,user_id,channel_id):\n find = {\n \"user_id\":user_id,\n \"channel_id\":channel_id,\n \"act\":\"add\"\n }\n pipeline = [\n {'$match':find},\n {'$group': {'_id': \"$user_id\", 'point': {'$sum': '$point'}}},\n ]\n if self.col_point_logs.find(find).count() == 0:\n return 0\n else :\n res = self.col_point_logs.aggregate(pipeline)\n for data in res:\n print(data)\n return data['point']\n\n def set_user_log(self, user_id,channel_id,log_msg):\n log_data = {}\n log_data['log_note'] = log_msg\n log_data['datetime'] = datetime.datetime.now()\n log_data['user_id'] = user_id\n log_data['channel_id'] = channel_id\n self.col_user_log.insert_one(log_data)\n return True\n \n def get_user_log(self,user_id,channel_id):\n find = {\n \"user_id\": user_id,\n \"channel_id\": channel_id\n }\n logs_data = self.col_user_log.find(find).sort(\"datetime\",-1)\n datalist = []\n for row in logs_data:\n del row[\"_id\"]\n datalist.append(row)\n \n return list(datalist)\n\n\n\n\n\n \n","sub_path":"Flask-3/wwwroot/data_model/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":8421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"636682391","text":"import pandas as pd\r\nimport numpy as np\r\nimport json\r\n\r\nimport os\r\nwd = \"C:\\\\Users\\\\Daniel\\\\Google Drive\\\\Projects\\\\2020.01 Yale's Most Popular Courses\\\\raw-data\"\r\nos.chdir(wd)\r\n\r\ndef attempt_rating(x, name):\r\n try:\r\n return x['same_both'][name]\r\n except:\r\n try: \r\n return x['same_class'][name]\r\n except:\r\n try:\r\n return x['same_professors'][name]\r\n except:\r\n return np.nan\r\n \r\ndef get_rating(x):\r\n return attempt_rating(x, 'rating')\r\n\r\ndef get_workload(x):\r\n return attempt_rating(x, 'workload')\r\n\r\n# do basic proccessing\r\nct = pd.read_json(\"coursetable_202001.json\")\r\nct = ct[['subject', 'number', 'section', 'times', 'locations_summary', 'areas', 'skills', 'average']]\r\nct = ct.rename(columns = {'locations_summary': 'locations'})\r\nct['times'] = [elem['summary'] for elem in ct['times']]\r\nct['rating'] = ct.average.apply(get_rating)\r\nct['workload'] = ct.average.apply(get_workload)\r\n\r\ncoursetable = ct.drop(columns = ['average', 'areas', 'skills'])\r\ncoursetable.to_csv('coursetable.csv', index=False)","sub_path":"scripts/process_coursetable.py","file_name":"process_coursetable.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"293018080","text":"import logging\n\nimport utils\nfrom strategy.Strategy import Strategy\n\n\nclass UnderdogBaseball(Strategy):\n\n def __init__(self, *args, **kwargs):\n super(UnderdogBaseball, self).__init__(*args, **kwargs)\n self.name = \"UnderdogBaseball\"\n self._sport = \"Baseball\"\n self._leagues = [\"MLB\"]\n\n\n def add_fixture(self, fixture, moneyline):\n away_odd = utils.convert_decimal_to_american(moneyline.get('away'))\n home_odd = utils.convert_decimal_to_american(moneyline.get('home'))\n if max(home_odd, away_odd) < 150:\n logging.info(\"Adding fixture: %s\" % (fixture,))\n self._fixtures.append(fixture)\n else:\n logging.debug(\"Trash => \", fixture.home, home_odd, fixture.away, away_odd, moneyline)\n\n","sub_path":"strategy/UnderdogBaseball.py","file_name":"UnderdogBaseball.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"394734430","text":"import random\nprint('Введите слово из списка букв: А Е Н О С Т')\nfoo = ['А','Е','Н','О','С','Т']\ny = random.sample(foo, 4)\nword = (''.join(y))\n\n\noption = []\ni = 1\nD = 0\nB = 0\nwhile word != option:\n print()\n print('Попытка №', i)\n option = input()\n option = option.upper()\n T = 0\n for y in option:\n if y == word[0] or y == word[1] or \\\n y == word[2] or y == word[3]:\n T += 1\n i += 1\n D = 0\n B = 0\n if i > 10:\n print('Вы проиграли:', word)\n break\n if word[0] == option[0]:\n D += 1\n else:\n B += 1\n if word[1] == option[1]:\n D += 1\n else:\n B += 1\n if word[2] == option[2]:\n D += 1\n else:\n B += 1\n if word[3] == option[3]:\n D += 1\n else:\n B += 1\n print('На \"своем месте\":', D)\n print('На \"чужем месте\":', T - D)\nif word == option:\n print('Вы выиграли!')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"138722247","text":"import numpy as np\n\n# either this or use queue for the replay buffer\n# np is more efficient, thus will speed up training by a bit\n\nclass ReplayBuffer:\n \n def __init__(self, \n observation_Space, \n action_Space, \n size, \n batch):\n \n # State buffers\n self.init_state_buffy = np.zeros([size, observation_Space], dtype=np.float32)\n self.next_state_buffy = np.zeros([size, observation_Space], dtype=np.float32)\n \n # Action buffer\n self.action_buffy = np.zeros([size, action_Space], dtype=np.float32)\n \n # reward and done buffer\n self.reward_buffy = np.zeros([size], dtype=np.float32)\n self.done_buffy = np.zeros([size], dtype=np.float32)\n \n # boiler plate varibles\n self.size_max = size\n self.batch = batch\n self.pointer = 0\n self.size = 0\n \n # expects the data as np arrays\n def store_transition(self,\n init_state,\n next_state,\n action,\n reward,\n done):\n \n # save the transition \n self.init_state_buffy[self.pointer] = init_state\n self.next_state_buffy[self.pointer] = next_state\n self.action_buffy[self.pointer] = action\n self.reward_buffy[self.pointer] = reward\n self.done_buffy[self.pointer] = done \n \n # move the pointer and increase size\n self.pointer = (self.pointer + 1) % self.size_max\n self.size = min(self.size + 1, self.size_max)\n \n # returns dictionary based on batch size\n def get_sample(self):\n \n indexes = np.random.choice(self.size, \n size = self.batch,\n replace = False)\n \n sample = dict(init_state = self.init_state_buffy[indexes],\n next_state = self.next_state_buffy[indexes],\n action = self.action_buffy[indexes],\n reward = self.reward_buffy[indexes],\n done = self.done_buffy[indexes])\n \n return sample\n \n def __len__(self) -> int:\n return self.size\n\n","sub_path":"agents/common/ReplayBuffer.py","file_name":"ReplayBuffer.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"626884116","text":"import argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\nimport sys\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n# import torchvision.models as models\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\n\nfrom util import mixup_data, mixup_criterion\nfrom apex import amp, optimizers\nfrom apex.parallel import DistributedDataParallel as DDP\nimport models\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--base_lr', default=0.1, type=float,\n metavar='base_lr', help='base learning rate (default=0.1)', dest='base_lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument(\"--local_rank\", default=0, type=int)\nparser.add_argument('--sync_bn', action='store_true',\n help='enabling apex sync BN.')\nparser.add_argument('--deterministic', action='store_true')\nparser.add_argument('--opt-level', type=str)\nparser.add_argument('--keep-batchnorm-fp32', type=str, default=None)\nparser.add_argument('--loss-scale', type=str, default=None)\nparser.add_argument('--alpha', default=0.7, type=float, help='interpolation strength (uniform=1., ERM=0.)')\n\nbest_acc1 = 0\nargs = parser.parse_args()\n#base_learning_rate = args.base_lr * args.batch_size / 256.\n# base_learning_rate *= torch.cuda.device_count()\n\ndef main():\n cudnn.benchmark = True\n best_prec1 = 0\n if args.deterministic:\n cudnn.benchmark = False\n cudnn.deterministic = True\n torch.manual_seed(args.local_rank)\n torch.set_printoptions(precision=10)\n\n \n # handle distributed traininc\n args.distributed = False\n if 'WORLD_SIZE' in os.environ:\n args.distributed = int(os.environ['WORLD_SIZE']) > 1\n args.gpu = 0\n args.world_size = 1\n if args.distributed:\n args.gpu = args.local_rank\n torch.cuda.set_device(args.gpu)\n torch.distributed.init_process_group(backend='nccl',\n init_method='env://')\n args.world_size = torch.distributed.get_world_size()\n\n assert torch.backends.cudnn.enabled, \"Amp requires cudnn backend to be enabled.\"\n\n global base_learning_rate\n global best_acc1\n base_learning_rate = args.base_lr * float(args.batch_size*args.world_size)/256.\n\n\n # create model\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n model = models.__dict__[args.arch]()\n\n model = model.cuda()\n\n # define loss function (criterion) and optimizer\n cel = nn.CrossEntropyLoss()\n criterion = lambda pred, target, lam: (-F.log_softmax(pred, dim=1) * torch.zeros(pred.size()).cuda().scatter_(1, target.data.view(-1, 1), lam.view(-1, 1))).sum(dim=1).mean()\n parameters_bias = [p[1] for p in model.named_parameters() if 'bias' in p[0]]\n parameters_scale = [p[1] for p in model.named_parameters() if 'scale' in p[0]]\n parameters_others = [p[1] for p in model.named_parameters() if not ('bias' in p[0] or 'scale' in p[0])]\n optimizer = torch.optim.SGD(\n [{'params': parameters_bias, 'lr': args.base_lr/10.},\n {'params': parameters_scale, 'lr': args.base_lr/10.},\n {'params': parameters_others}],\n lr=base_learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n # Initialize Amp. Amp accepts either values or strings for the optional override arguments,\n # for convenient interoperation with argparse.\n model, optimizer = amp.initialize(model, optimizer,\n opt_level=args.opt_level,\n keep_batchnorm_fp32=args.keep_batchnorm_fp32,\n loss_scale=args.loss_scale)\n\n #torch.cuda.set_device(args.gpu)\n model = DDP(model, delay_allreduce=True)\n #model = torch.nn.DataParallel(model)\n \n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion, args)\n return\n\n sgdr = CosineAnnealingLR(optimizer, args.epochs, eta_min=0, last_epoch=-1)\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args)\n\n # evaluate on validation set\n acc1 = validate(val_loader, model, cel, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if args.local_rank == 0:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best)\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (inputs, targets) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n inputs = inputs.cuda(args.gpu, non_blocking=True)\n targets = targets.cuda(args.gpu, non_blocking=True)\n\n inputs, targets_a, targets_b, lam = mixup_data(inputs, targets, args.alpha, use_cuda=True)\n\n # compute output\n output = model(inputs)\n loss_func = mixup_criterion(targets_a, targets_b, lam)\n loss = loss_func(criterion, output)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, targets, topk=(1, 5))\n losses.update(loss.item(), inputs.size(0))\n top1.update(acc1[0], inputs.size(0))\n top5.update(acc5[0], inputs.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n #loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if args.local_rank == 0 and i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n if args.gpu is not None:\n input = input.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if args.local_rank == 0 and i % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n # \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n # lr = args.base_lr * (0.1 ** (epoch // 30))\n # for param_group in optimizer.param_groups:\n # param_group['lr'] = lr\n\n for param_group in optimizer.param_groups:\n if param_group['initial_lr'] == base_learning_rate:\n print(\"adjust non-scalar lr.\")\n lr = base_learning_rate * (0.1 ** (epoch // 30))\n param_group['lr'] = lr\n else:\n print(\"adjust scalar lr.\")\n scalar_lr = param_group['initial_lr'] * (0.1 ** (epoch // 30))\n param_group['lr'] = scalar_lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"imagenet/imagenet_train.py","file_name":"imagenet_train.py","file_ext":"py","file_size_in_byte":14970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"244307009","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Feb 11 2019\n\n@author: \"Anirban Das\"\n\"\"\"\nimport datetime\nimport boto3\nfrom image_resizer import image_resizer\n\ns3_client = boto3.client('s3')\n\ndef function_handler(event, context):\n record = event['Records'][0]\n event_time = record['eventTime']\n invoke_time = datetime.datetime.utcnow().isoformat()\n print('Got event{}'.format(event))\n image_resizer(s3_client, event, invoke_time, event_time)\n return\n","sub_path":"Cloud_pipelines/AWS/Thumbnail-Pipeline/thumbnail_pipeline.py","file_name":"thumbnail_pipeline.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"424133455","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 16 10:16:41 2019\n\n@author: Noory333\n\"\"\"\nimport numpy as np\nimport helpers\nfrom scipy import linalg\nimport pickle\n\npath_to_results = \"../Results/\"\n\n#%%\nprotein = '1pek'\npath_to_dNdS = path_to_results + \"dNdS/\" + protein + \"_Ne2_S-SI\" \npath_to_ssFit = path_to_results + \"site_specific_fitness/\"+ protein +\"_Ne2_S-SD_ssFit/\"\n\ndef Q_matrix(F, GTR, Neff):\n\t'''\n creates a 61x61 instantaneous rate matrix based on the fitness vector (F)\n and the mutation model (GTR) and Neff \n\t'''\n\tQ = np.zeros((61,61))\n\tfor codon1 in range(0,61):\n\t\tfor codon2 in range(0,61):\n\t\t\tnuc_diff = helpers.NucDiff(helpers.Codon[codon1], helpers.Codon[codon2])\n\t\t\tif len(nuc_diff) == 2:\n\t\t\t\tSij = 2*Neff*(F[codon2] - F[codon1])\n\t\t\t\tn1 = helpers.Nucleotide[nuc_diff[0]]\n\t\t\t\tn2 = helpers.Nucleotide[nuc_diff[1]]\n\t\t\t\tif abs(Sij) <= 1e-8:\n\t\t\t\t Q[codon1, codon2] = GTR[n1,n2]\n\t\t\t\telse: \n\t\t\t\t Q[codon1, codon2] = GTR[n1,n2] * (Sij)/(1 - np.exp(-1.*Sij))\n\tfor i in range(0, 61):\n\t\tQ[i,i] = -np.sum(Q[i])\n\treturn Q\n\n\ndef IndicatorMatrix():\n '''\n creates indicator matrices specifying if a substitution is synonymous (IS)\n or nonsynonymous (IN)\n '''\n IN = np.zeros((61, 61))\n IS = np.zeros((61, 61))\n for r in range(61):\n for c in range(61):\n diff = helpers.NucDiff(helpers.Codon[r], helpers.Codon[c])\n if len(diff) == 2:\n oldAA = helpers.Codon_AA[helpers.Codon[r]]\n newAA = helpers.Codon_AA[helpers.Codon[c]]\n if oldAA == newAA:\n IS[r, c] = 1\n else:\n IN[r, c] = 1\n return (IN, IS)\n\ndef expected_dN_dS(pi, Q, M, IN):\n\tKn = np.sum(np.multiply(np.dot(np.diag(pi),Q),IN))\n\tLn = np.sum(np.multiply(np.dot(np.diag(pi),M),IN))\n\tdN_dS = Kn / Ln\n\treturn dN_dS, Kn, Ln\n\n\nif protein == '1qhw':\n pi_nuc = [0.19675, 0.31761, 0.28032, 0.20532] \n GTR = helpers.MutationMatrix(pi_nuc, 4.49765,1,1,1,1,4.49765)\n num_taxa = int(14)\n length = int(300)\n\nelif protein == '2ppn':\n pi_nuc = [0.19246, 0.24559, 0.29365, 0.26830]\n GTR = helpers.MutationMatrix(pi_nuc, 2.50275 ,1,1,1,1,2.50275)\n num_taxa = int(14)\n length = int(107)\n\nelif protein == '1pek':\n pi_nuc = [0.20853, 0.34561, 0.25835, 0.18750]\n GTR = helpers.MutationMatrix(pi_nuc, 0.90382 ,1,1,1,1, 0.90382)\n num_taxa = int(12)\n length = int(279)\n \n \nNeff = int(1e2)\nIN , IS = IndicatorMatrix()\nM = Q_matrix(np.ones(61), GTR, Neff)\n\n \n#Calculate neutral frequencies based on GTR model \nF_neutral = np.ones((61))\nQ_neutral = Q_matrix(F_neutral, GTR, Neff)\nP_neutral = linalg.expm( np.multiply(Q_neutral, 40 ) )\np_neutral = P_neutral[0]\n\n\n#%%############################################################################\n#### calculate E[dN/dS] for S-SI ####\n###############################################################################\nfull_Kn = np.zeros((50,length)); full_Ln = np.zeros((50,length))\nfor trial in range(1, 51):\n KN = []; LN = []\n print(trial)\n full_ssFit = pickle.load(open(path_to_ssFit + 'ssFit_seqfile'+str(trial)+'.pkl', 'rb'))\n ssFit = np.mean(full_ssFit, axis = 1)\n for site in range(length):\n F = ssFit[site]\n \n #calculate stationary freq\n pi = [p_neutral[x] * np.exp(2*Neff*F[x]) for x in range(0,61) ]\n pi = pi/np.sum(pi)\n \n #calculate transition matrix \n Q = Q_matrix(F, GTR, Neff)\n \n #calculate dN/dS\n dnds, Kn, Ln = expected_dN_dS(pi, Q, M, IN)\n KN.append(Kn); LN.append(Ln)\n full_Kn[trial-1] = KN\n full_Ln[trial-1] = LN\n\nnp.savetxt(path_to_dNdS + \"_Kn.csv\", full_Kn, delimiter=\" \")\nnp.savetxt(path_to_dNdS + \"_Ln.csv\", full_Ln, delimiter=\" \")\n\n","sub_path":"scripts/calc_dNdS_S-SI.py","file_name":"calc_dNdS_S-SI.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"398130216","text":"from discord.ext import commands\nimport discord\n\n\nclass Benutzerinfo(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command(name=\"benutzerinfo\")\n async def benutzerinfo_command(self, ctx, member: discord.Member):\n embed = discord.Embed(title='Benutzerinfo für {}'.format(member.name),\n description='Benutzerinfo für {}'.format(\n member.mention),\n color=0x69E82C)\n embed.add_field(name='Server beigetreten',\n value=member.joined_at.strftime('%d/%m/%Y'),\n inline=True)\n embed.add_field(name='Discord beigetreten',\n value=member.created_at.strftime('%d/%m/%Y'),\n inline=True)\n embed.add_field(name=f\"Rollen ({len(member.roles)})\",\n value=\" \".join([role.mention for role in member.roles]))\n\n rollen = ''\n for role in member.roles:\n if not role.is_default():\n rollen += '{} \\r\\n'.format(role.mention)\n embed.add_field(name='Höchste Rolle', value=member.top_role.mention, inline=True),\n embed.add_field(name=\"Benutzer ID\", value=member.id, inline=True),\n embed.set_thumbnail(url=member.avatar_url)\n embed.set_footer(text='Benutzerinfo')\n await ctx.send(embed=embed)\n\n\ndef setup(client):\n client.add_cog(Benutzerinfo(client))\n","sub_path":"cogs/benutzerinfo.py","file_name":"benutzerinfo.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"59175382","text":"import cv2\nimport os\nimport numpy as np\n\n\n\ndef showImage(img_in, img_name='image'):\n cv2.imshow(img_name, img_in)\n cv2.waitKey(0)\n cv2.destroyWindow(img_name)\n return\n\ndef showFrame(frame, name='preview'):\n cv2.imshow(name, frame)\n\ndef startWebcamFeed(name='preview'):\n cv2.namedWindow(name)\n vc = cv2.VideoCapture(0)\n\n if not vc.isOpened():\n raise RuntimeError(\"No webcam found or webcam is busy.\")\n\n return vc\n\n\ndef endWebcamFeed(name='preview'):\n cv2.destroyWindow(name)\n\n\ndef getColor(name):\n if name == 'red':\n return (0,0,255)\n if name == 'green':\n return (0,255,0)\n if name == 'blue':\n return (255,0,0)\n if name == 'black':\n return (255,255,255)\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"535352156","text":"import os.path as op\nimport os\nimport ez_setup\nimport sys\nez_setup.use_setuptools()\n#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages, Extension\n#from distutils.core import setup, Extension\n\nb = op.abspath(op.join('.','clib/src'))\ngfdir = op.abspath(op.join('.','gridfield'))\nallsources = os.listdir(b)\n\n\n# c extension for accessing SELFE files\nelio = [op.join(b,c) for c in allsources if op.splitext(c)[1] in ['.c']]\nelio = Extension('elio',\n elio, \n include_dirs = [b],\n )\n\n# Ugh, does distutils provide an easy wa yto derive this \n# location from the environment a la configure?\nvtkincl = '/usr/include/vtk-5.2'\n#vb = op.abspath(op.join('.','src/vis'))\n\nswig_opts = ['-c++', '-I%s' % b, '-I%s' % vtkincl, '-classic']\n\n# c++ gridfield core extension\ncsources = [c for c in allsources if op.splitext(c)[1] in ['.cc', '.cpp', '.c++', '.c']]\ncsources = [op.join(b,c) for c in csources]\nsources = [op.join(gfdir,'core.i')] + csources\npygridfield = Extension('_core',\n sources, \n include_dirs = [b],\n libraries = ['netcdf_c++', 'netcdf'],\n swig_opts = swig_opts\n )\n\next_modules = [elio, pygridfield]\n\n#try:\n # only build vtk if the vtk modules are installed\n# import vtk\n\n # vtk-based visualization\n # not sure how to avoid recompiling and relinking the gridfield objs\n # Very wasteful, but I'm having trouble instructing python\n # to use the symbols from _gridfield.so\n# gfsources = [c for c in csources if 'output' not in c and 'netcdf' not in c and 'stuebe' not in c]\n# vsources = [op.join('gridfield','gfvis.i'), \n# op.join(vb,'vtkGridField.cxx')] + gfsources\n# vtkgridfield = Extension('_gfvis',\n# vsources,\n# include_dirs = [vtkincl, vb, b],\n# libraries = ['vtkFiltering', 'vtkCommon','vtkRendering', 'vtkIO', 'vtkCommonPythonD'],\n# swig_opts = swig_opts\n# )\n# ext_modules = [vtkgridfield] + ext_modules\n\n#except ImportError:\n# print \"VTK or the VTK python bindings do not appear to be installed....skipping the vis module\"\n# pass\n\n# Workaround for SWIG/C++ bug\n# Specifying \"-c++\" above should be enough to get .cpp SWIG output, but it isn't\n# http://mail.python.org/pipermail/distutils-sig/2005-November/005387.html\ncustom_opts = {\n 'build_ext' : {\n 'swig_opts':' '.join(swig_opts),\n 'debug':True\n }\n}\n \n# Run the setup function\nsetup (name = 'gridfield',\n version = '0.5',\n author = \"Bill Howe\",\n description = \"\"\"Convenient Manipulation of Unstructured Grids\"\"\",\n ext_modules = ext_modules,\n packages = find_packages(),\n options = custom_opts\n )\n","sub_path":"pygridfields/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"426061664","text":"import sys\nimport logging\n\nfrom qcloud_cos import CosConfig\nfrom qcloud_cos import CosS3Client\n\ndef bottleSEND(file):\n logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\n secret_id = 'AKIDmU1RccS3UltpZ88knfMtETYUEmDSgoAL' # 替换为用户的 secretId\n secret_key = 'OghXUIzVp0zmOpcGleT2QceZKtHWZS9J' # 替换为用户的 secretKey\n region = 'ap-chengdu' # 替换为用户的 Region\n config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key)\n # 2. 获取客户端对象\n client = CosS3Client(config)\n # 3.创建桶子\n '''\n response = client.create_bucket(\n sBucket='examplebucket-1250000000'\n '''\n with open(file, 'rb') as fp:\n response = client.put_object(\n Bucket='lzw-1301082773',\n Body=fp,\n Key=file,\n StorageClass='STANDARD',\n EnableMD5=False\n )\n print(response['ETag'])","sub_path":"wechat/utils/bottle.py","file_name":"bottle.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"36496793","text":"from Person import *\nfrom PhoneBook import *\nfrom Table import *\nfrom Node import *\nfrom BST import *\n\nclass Table:\n def __init__(self, data):\n self.data = data # Liste de Dictionnaires\n # Un attribut de la table par case du Dictionnaire\n\n def printTable(self):\n for dico in self.data:\n print(dico)\n\n def addEntity(self):\n dico = {}\n self.data.append(dico)\n if len(self.data) == 1: # Cas de création de la table\n finished = 0\n print(\"Let's start with the attributes in your table\")\n print(\"First attribute?\")\n a = str(input())\n print(\"Value ?\")\n b = input()\n dico[a] = b\n print(\"Other attributes? 0 for yes, 1 for no\")\n finished = int(input())\n while finished == 0:\n print(\"Attribute ?\")\n a = str(input())\n print(\"Value ?\")\n b = input()\n dico[a] = b\n print(\"Other attributes? 0 for yes, 1 for no\")\n finished = int(input())\n else: # Cas où la liste dispose déjà d'au moins un dictionnaire\n for key in self.data[0].keys():\n print(str(key) + \": \")\n dico[key] = input()\n\n def buildAllBST(self):\n allBST = [] # liste de tous les arbres\n for att in self.data[0].keys(): # Pour chaque argument, on crée un arbre\n a = BST()\n for dico in self.data: # Pour chacun des Dictionnaires, on ajoute l'argument 'att' dans l'arbre\n node = Node(dico[att], self.data.index(dico))\n a.addNode(node)\n allBST.append(a)\n return allBST\n\n def whichAtt(self): # Demande et renvoie l'index de l'attribut voulu\n done = False\n result = -1\n while done == False:\n print(\"What is the desired attribute ?\\n\")\n i = 0\n for att in self.data[0].keys():\n print(\"For the \" + str(att) + \", enter \" + str(att))\n i += 1\n result = input()\n for att in self.data[0].keys():\n if result == att:\n done = True\n return result\n\n def buildBST(self, att):\n a = BST()\n for dico in self.data:\n node = Node(dico[att], self.data.index(dico))\n a.addNode(node)\n return a\n\n def delBST(self, tree):\n del(tree.head)\n\n def rebuildBSTUser(self):\n att = self.whichAtt()\n tree = self.buildBST(att)\n return tree\n\n def reBuildBST(self, tree):\n tree1 = self.buildBST(att)\n\n def printAllTrees(self, allTrees): # allTrees est une liste des arbres (fournie par buildAllBST)\n for arbre in myTableTrees:\n print(arbre.affInfixe())\n","sub_path":"Table.py","file_name":"Table.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"337722589","text":"#!/usr/bin/python3\n\"\"\"A Student Class\n\"\"\"\n\n\nclass Student:\n \"\"\"A Student superclass\n \"\"\"\n def __init__(self, first_name, last_name, age):\n \"\"\"inilization of Student\n Arguments:\n first_name (str): first name of Student.\n last_name (str): last name of Student.\n age (int): age of Student\n \"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\" A JSON method that retrieves a\n dictionary representation of a Student instance\n Arguments:\n attrs (str): string of attributes\n Returns:\n a dictionary representation of a Student instance\n \"\"\"\n dictionary = {}\n if attrs is None:\n return self.__dict__\n for key, value in self.__dict__.items():\n if key in attrs:\n dictionary[key] = value\n return dictionary\n","sub_path":"0x0B-python-input_output/12-student.py","file_name":"12-student.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"346365095","text":"#!/usr/bin/env python\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nimport numpy as np\nimport matplotlib.patches as mpatches\nimport math\nimport matplotlib.ticker as ticker\n\nif len(sys.argv) < 1:\n print('Require data directory')\n sys.exit(0)\n\nmethod = 'subset'\n\ndef plot_figure(dirname, method, ax, snapshot_point, title, ylim, xlim):\n subset_data ={}\n ax.set_prop_cycle('color', colors)\n for r in snapshot_point:\n filename = dirname + \"/result90unhash_\" + method + \"V1Round\" + str(r) + \".txt\"\n buff = []\n # print(filename)\n f = open(filename,'r',errors='replace')\n line=f.readlines()\n a=line[0].strip().split(\" \")\n for j in range(len(a)):\n buff.append(int(float(a[j])))\n subset_data[r]=sorted(buff)\n f.close()\n\n for i, d in subset_data.items():\n ax.grid(True)\n ax.plot(d) #, label=\"round\"+str(i)\n tick_spacing = 50\n ax.yaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))\n ax.set_ylim(ylim)\n ax.set_xlim(xlim)\n name = title.split('/')[-1]\n ax.set_title(name)\n ax.legend()\n\ndef get_y_lim(dirname, method, snapshot_point, min_y, max_y):\n num_node = 0\n for r in snapshot_point:\n filename = dirname + \"/result90unhash_\" + method + \"V1Round\" + str(r) + \".txt\"\n with open(filename,'r') as f:\n line=f.readlines()\n a=line[0].strip().split(\" \")\n num_node = len(a)\n for j in range(len(a)):\n n = int(float(a[j]))\n if max_y == None or n > max_y:\n max_y = n\n if min_y == None or n < min_y:\n min_y = n\n return min_y, max_y, num_node\n\n\ndatadir_list = []\nfor i in range(1, len(sys.argv)):\n datadir_list.append(sys.argv[i])\n\nsnapshot_point = [0,8,16,32,64,96]\n\nnum_row = 1\nnum_col = len(datadir_list)\nmin_y = None \nmax_y = None\nfor dirname in datadir_list:\n dirpath = dirname\n min_y, max_y, num_node = get_y_lim(dirpath, method, snapshot_point, min_y, max_y)\n\nylim = [200,650]#[min_y, max_y]\nxlim = [0, num_node]\n# print(ylim)\n# print(num_row, num_col)\n\ndata_dirname = datadir_list\nnum_row = 2\nnum_col = 3\nnum_exp = len(data_dirname)\nprint('num_exp', num_exp)\nif num_exp <= 1:\n num_row = 1\n num_col = 1\nelif num_exp <= 2:\n num_row = 1\n num_col = 2\nelif num_exp <= 4:\n num_row = 2\n num_col = 2\nelif num_exp <=6:\n num_row = 2\n num_col = 3\nelif num_exp <=8:\n num_row = 2\n num_col = 4 \nelse:\n print('Warn. More room to plot')\n sys.exit(0)\n\nfig, axs = plt.subplots(ncols=num_col, nrows=num_row, constrained_layout=False, figsize=(18,9))\ncolormap = plt.cm.nipy_spectral\ncolors = [colormap(i) for i in np.linspace(0, 0.9, len(snapshot_point))]\npatches = []\n\nfor i in range(len(snapshot_point)):\n p = mpatches.Patch(color=colors[i], label=str(snapshot_point[i]))\n patches.append(p) \n\nmax_patch = mpatches.Patch(color='red', label='max')\nmin_patch = mpatches.Patch(color='green', label='min')\nmean_patch = mpatches.Patch(color='blue', label='mean')\n\n# print('h',len(axs), num_exp)\ni = 0\nc = 0\nr = 0\nfor dirname in data_dirname:\n c = int(i / num_col)\n r = i % num_col\n #print(c, )\n dirpath = dirname\n \n if num_row ==1 and num_col == 1:\n plot_figure(dirpath, method, axs, snapshot_point, dirname, ylim, xlim)\n elif len(axs) == num_exp:\n plot_figure(dirpath, method, axs[i], snapshot_point, dirname, ylim, xlim)\n else:\n plot_figure(dirpath, method, axs[c, r], snapshot_point, dirname, ylim, xlim)\n i += 1\n if i >= num_row* num_col:\n break\n\n\nfig.legend(loc='lower center', handles=patches, fontsize='small', ncol= math.ceil(len(patches)/2))\nplt.show()\n#plt.savefig(\"subset64_2.png\")\nprint(\"finish\")\n","sub_path":"analysis/custom_plot.py","file_name":"custom_plot.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"156411729","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport math\nimport oneflow as flow\nfrom oneflow.python.nn.module import Module\nfrom oneflow.python.oneflow_export import oneflow_export, experimental_api\nfrom oneflow.python.nn.modules.utils import _pair\nfrom oneflow.python.nn.common_types import _size_2_t\nfrom oneflow.python.nn import init\n\n\n@oneflow_export(\"nn.ConvTranspose2d\")\n@experimental_api\nclass ConvTranspose2d(Module):\n r\"\"\"\n \n Applies a 2D transposed convolution operator over an input image composed of several input planes.\n\n This module can be seen as the gradient of Conv2d with respect to its input.\n It is also known as a fractionally-strided convolution or\n a deconvolution (although it is not an actual deconvolution operation).\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding\n will be added to both sides of each dimension in the input. Default: 0\n output_padding (int or tuple, optional): Additional size added to one side\n of each dimension in the output shape. Default: 0\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n\n Shape:\n - Input: :math:`(N, C_{in}, H_{in}, W_{in})`\n - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where\n\n .. math::\n H_{out} = (H_{in} - 1) \\times \\text{stride}[0] - 2 \\times \\text{padding}[0] + \\text{dilation}[0] \n\n \\times (\\text{kernel_size}[0] - 1) + \\text{output_padding}[0] + 1\n .. math::\n W_{out} = (W_{in} - 1) \\times \\text{stride}[1] - 2 \\times \\text{padding}[1] + \\text{dilation}[1]\n \n \\times (\\text{kernel_size}[1] - 1) + \\text{output_padding}[1] + 1\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape\n :math:`(\\text{in_channels}, \\frac{\\text{out_channels}}{\\text{groups}},`\n :math:`\\text{kernel_size[0]}, \\text{kernel_size[1]})`.\n The values of these weights are sampled from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{out} * \\prod_{i=0}^{1}\\text{kernel_size}[i]}`\n bias (Tensor): the learnable bias of the module of shape (out_channels)\n If :attr:`bias` is ``True``, then the values of these weights are\n sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{out} * \\prod_{i=0}^{1}\\text{kernel_size}[i]}`\n\n Examples::\n\n >>> import numpy as np\n >>> import oneflow.experimental as flow\n >>> import oneflow.experimental.nn as nn\n >>> flow.enable_eager_execution()\n\n >>> m = nn.ConvTranspose2d(16, 33, 3, stride=2)\n >>> # non-square kernels and unequal stride and with padding\n >>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))\n >>> m = m.to(\"cuda\")\n >>> input = flow.Tensor(np.random.randn(20, 16, 50, 100), device=flow.device(\"cuda\"))\n >>> output = m(input)\n >>> output.size()\n flow.Size([20, 33, 93, 100])\n\n .. _cross-correlation:\n https://en.wikipedia.org/wiki/Cross-correlation\n\n .. _link:\n https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: _size_2_t,\n stride: _size_2_t = 1,\n padding: _size_2_t = 0,\n output_padding: _size_2_t = 0,\n groups: int = 1,\n bias: bool = True,\n dilation: int = 1,\n padding_mode: str = \"zeros\",\n ) -> None:\n super().__init__()\n\n assert padding_mode == \"zeros\"\n assert groups == 1, f\"not support group convtranspose2d now!\"\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n output_padding = _pair(output_padding)\n dilation = _pair(dilation)\n self.groups = groups\n self.weight = flow.nn.Parameter(\n flow.Tensor(in_channels, out_channels // groups, *kernel_size)\n )\n self.bias = None\n self._bias_add_op = None\n if bias:\n self.bias = flow.nn.Parameter(flow.Tensor(out_channels // groups))\n self._bias_add_op = (\n flow.builtin_op(\"bias_add\")\n .Input(\"a\")\n .Input(\"b\")\n .Output(\"out\")\n .Attr(\"axis\", 1)\n .Build()\n )\n\n self._op = (\n flow.builtin_op(\"deconv2d\")\n .Input(\"in\")\n .Input(\"weight\")\n .Attr(\"filters\", out_channels)\n .Attr(\"padding_before\", padding)\n .Attr(\"data_format\", \"channels_first\")\n .Attr(\"kernel_size\", kernel_size)\n .Attr(\"strides\", stride)\n .Attr(\"dilation_rate\", dilation)\n .Attr(\"output_padding\", output_padding)\n .Attr(\"groups\", groups)\n .Output(\"out\")\n .Build()\n )\n self.reset_parameters()\n\n def reset_parameters(self) -> None:\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, x):\n res = self._op(x, self.weight)[0]\n if self._bias_add_op is not None:\n res = self._bias_add_op(res, self.bias)[0]\n return res\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod(raise_on_error=True)\n","sub_path":"oneflow/python/nn/modules/deconv.py","file_name":"deconv.py","file_ext":"py","file_size_in_byte":6835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"103658310","text":"from __future__ import print_function\n\nimport boto3\nimport json\nimport logging\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nses = boto3.client('ses')\nemail_address = 'mkukreja1@gmail.com'\n\ndef lambda_handler(event, context):\n subject = 'New File uploaded to S3'\n body_text = 'A new file has been uploaded to S3. Here are the details: %s' % (json.dumps(event))\n ses.send_email(Source=email_address,\n Destination={'ToAddresses': [email_address]},\n Message={'Subject': {'Data': subject}, 'Body': {'Text': {'Data': body_text}}})\n logger.info('Email has been sent')\n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }\n","sub_path":"aws-analytics/s3-event-notification/s3-notify.py","file_name":"s3-notify.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"306085346","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 23 12:28:23 2020\n\n@author: safa\n\"\"\"\ndef AVLExecution(ConfNo,Alpha,BaseAVLInputFileName,Directory,GeomData,Ref): \n import os\n os.chdir(Directory)\n\n \n######################################################### \n ## BASE AVL FORMAT\n BaseFormatFile = open(Directory+'\\\\'+BaseAVLInputFileName)\n BaseFormatData = list()\n \n for line in BaseFormatFile:\n BaseFormatData.append(line)\n \n BaseFormatFile.close()\n \n NewConfData = BaseFormatData.copy()\n Space = ' ' \n ## Wing Root Section\n NewConfData[25] = '0.0'+Space+'0.0'+Space+'0.0'+\\\n Space+str(GeomData['Croot'])+Space+'0.0'+Space+'0.0'+Space+'0.0' \n ## Wing Tip Section\n NewConfData[35] = str(GeomData['Xtip'])+Space+\\\n str(GeomData['Span']/2)+Space+'1.0'+\\\n Space+str(GeomData['Ctip'])+Space+'0.0'+Space+'0.0'+Space+'0.0' \n \n ## New Configuration Input File\n NewInpFileName = 'AVLInputConf_'+str(ConfNo)+'.avl'\n NewInpFile = open(Directory+'\\\\In\\\\'+NewInpFileName,'w+')\n for line in NewConfData:\n NewInpFile.write(line+'\\n')\n NewInpFile.close()\n \n#################################################################\n InpFileName = r'In\\AVLInputConf_'+str(ConfNo)+'.avl'\n OutFileName = r'Out\\AVLOutConf_'+str(ConfNo)+'Alpha'+str(Alpha)\n RunFileName = r'Run\\AVLRunConf_'+str(ConfNo)+'Alpha'+str(Alpha)+'.run'\n RunFile = open(RunFileName,'w+')\n \n AVLRunCommand = ['load',InpFileName,'OPER','A A ',str(Alpha),'X','st',OutFileName]\n for line in AVLRunCommand:\n RunFile.write(line+'\\n')\n RunFile.close()\n \n CmdCommand = \"avl.exe < \"+RunFileName\n os.system(CmdCommand)\n","sub_path":"Windows/ConfSearch/NewConfPar.py","file_name":"NewConfPar.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"409153681","text":"import math\n\nimport torch\n\nfrom falkon.cuda import initialization\nfrom falkon.cuda.cusolver_gpu import *\nfrom falkon.utils import devices\nfrom falkon import la_helpers\nfrom falkon.utils.cuda_helpers import copy_to_device, copy_to_host\nfrom falkon.utils.helpers import choose_fn, sizeof_dtype\n# noinspection PyUnresolvedReferences\nfrom falkon.ooc_ops.cuda import parallel_potrf\nfrom falkon.options import FalkonOptions, CholeskyOptions\nfrom .ooc_utils import calc_block_sizes\nfrom ..utils.devices import DeviceInfo\nfrom ..utils.tensor_helpers import create_fortran, is_f_contig, copy_same_stride\n\n__all__ = (\"gpu_cholesky\",)\n\n\ndef _ic_cholesky(A, upper, device, cusolver_handle):\n \"\"\"Cholesky factorization of matrix `A` on the GPU\n\n Uses the cuSOLVER library for implementation of the POTRF function.\n\n Parameters:\n -----------\n A : [n, n] CPU or GPU array (column-contiguous)\n The (positive definite) matrix which should be factorized\n upper : bool\n Whether we need to factorize the upper of lower portion of `A`. The other side\n of the matrix will not be touched.\n device : int\n The GPU device on which to run the factorization\n cusolver_handle\n Pointer to the cuSOLVER context, which needs to be initialized before calling\n the function.\n\n Returns:\n --------\n A : [n, n] CPU or GPU array (column-contiguous)\n The factorization of A which overwrites the upper (or lower) triangular part\n of the matrix A. This is not a copy of the original matrix.\n \"\"\"\n # Check library initialization\n if cusolver_handle is None:\n raise RuntimeError(\"CuSolver must be initialized \"\n \"before running in-core Cholesky.\")\n if not is_f_contig(A):\n raise RuntimeError(\"Cholesky input must be F-contiguous\")\n\n uplo = 'U' if upper else 'L'\n n = A.shape[0]\n\n tc_device = torch.device(\"cuda:%d\" % (device))\n # Choose functions by dtype\n potrf_buf_size = choose_fn(A.dtype, cusolverDnDpotrf_bufferSize, cusolverDnSpotrf_bufferSize,\n \"POTRF Buffer size\")\n potrf_fn = choose_fn(A.dtype, cusolverDnDpotrf, cusolverDnSpotrf, \"POTRF\")\n\n # noinspection PyUnresolvedReferences\n with torch.cuda.device(tc_device):\n # Copy A to device memory\n if A.is_cuda:\n Agpu = A\n else:\n Agpu = create_fortran(A.shape, A.dtype, tc_device)\n copy_to_device(n, n, A, 0, 0, Agpu, 0, 0)\n\n # Create workspace buffer\n potrf_bsize = potrf_buf_size(\n handle=cusolver_handle, uplo=uplo, n=n, A=Agpu.data_ptr(), lda=n)\n potrf_wspace = create_fortran((potrf_bsize,), A.dtype, tc_device)\n dev_info = torch.tensor(4, dtype=torch.int32, device=tc_device)\n\n # Run cholesky\n potrf_fn(handle=cusolver_handle,\n uplo=uplo, n=n, A=Agpu.data_ptr(), lda=n,\n workspace=potrf_wspace.data_ptr(), Lwork=potrf_bsize, devInfo=dev_info)\n torch.cuda.synchronize()\n\n # Copy back to CPU\n if not A.is_cuda:\n copy_to_host(n, n, Agpu, 0, 0, A, 0, 0)\n del Agpu\n del potrf_wspace, dev_info\n return A\n\n\ndef _parallel_potrf_runner(A: torch.Tensor, opt: CholeskyOptions, gpu_info) -> torch.Tensor:\n num_gpus = len(gpu_info)\n N = A.shape[0]\n dt = A.dtype\n # Calculate the maximum block size such that we don't run out of GPU\n # RAM on **any** available GPU. We need a total of 2 whole columns and 1 tile:\n # block-size^2 * ((N / block-size) * 2 + 1) floats\n # (plus the cuSOLVER buffer which is small).\n # block_size < (sqrt((2*N)^2 + 4R) - 2*N) / 2\n dts = sizeof_dtype(dt)\n avail_ram = min([g.actual_free_mem for g in gpu_info]) / dts\n max_block_size = (math.sqrt(4 * N ** 2 + 4 * avail_ram) - 2 * N) / 2\n max_block_size = int(math.floor(max_block_size))\n if max_block_size < 1:\n raise RuntimeError(\n \"Cannot run parallel POTRF with minimum \"\n \"available memory of %.2fMB\" % (avail_ram * dts / 2 ** 20))\n\n block_sizes = calc_block_sizes(\n max_block_size, num_gpus, N, opt.chol_par_blk_multiplier)\n block_allocations = []\n cur_n = 0\n for i, bs in enumerate(block_sizes):\n block_allocations.append(\n (cur_n, cur_n + bs, bs, i % num_gpus, i)\n )\n cur_n += bs\n\n device_info = []\n for g in range(num_gpus):\n device_info.append(\n (0.0, initialization.cusolver_handle(g), g)\n )\n\n parallel_potrf(device_info, block_allocations, A)\n return A\n\n\n\"\"\"\nGPU Cholesky, we implement use cuSOLVER as a backend for POTRF.\n\n - In-core: Can do upper or lower, must be Fortran\n - Out of core: Can only do lower, Fortran\n\n\"\"\"\n\n\ndef can_do_ic(A: torch.Tensor, device: DeviceInfo):\n # noinspection PyUnresolvedReferences\n avail_ram = device.actual_free_mem\n # The multiplier here is a bit tricky since setting it too high results\n # in hard-to-debug cuda errors\n avail_ram *= 0.85\n\n if A.is_cuda:\n needed_ram = 100 * 8 # Not very much indeed\n else:\n needed_ram = A.shape[0] * A.shape[1] * sizeof_dtype(A.dtype)\n\n return avail_ram >= needed_ram\n\n\ndef gpu_cholesky(A: torch.Tensor, upper: bool, clean: bool, overwrite: bool, opt: FalkonOptions) -> torch.Tensor:\n \"\"\"\n Parameters\n -----------\n A : torch.Tensor\n 2D positive-definite matrix of size (n x n) that will be factorized as\n A = U.T @ U (if `upper` is True) or A = L @ L.T if `upper`\n is False.\n upper : bool\n Whether the triangle which should be factorized is the upper or lower of `A`.\n clean : bool\n Whether the \"other\" triangle of the output matrix (the one that\n does not contain the factorization) will be filled with zeros or\n not.\n overwrite : bool\n Whether to overwrite matrix A or to output the result in a new\n buffer.\n opt : FalkonOptions\n Options forwarded for block calculation, and other knobs in the out-of-core\n parallel POTRF implementation. Useful options are the ones defined in\n :class:`~falkon.options.CholeskyOptions` .\n\n Notes\n ------\n The factorization will always be the 'lower' version of the factorization\n which could however end up on the upper-triangular part of the matrix\n in case A is not Fortran contiguous to begin with.\n \"\"\"\n # Handle 'overwrite' option immediately so that its usage is reflected in memory\n # availability (in case A is on GPU).\n if not overwrite:\n # We could change the stride to be more favorable to the POTRF requirements\n # but it gets complicated. We leave such decisions to the user!\n A = copy_same_stride(A, pin_memory=True)\n\n # Decide which version of the algo we run: can be in-core or parallel.\n # (Note that the original OOC version is not going to run).\n\n # Determine GPU free RAM\n gpu_info = [v for k, v in devices.get_device_info(opt).items() if k >= 0]\n for g in gpu_info:\n g.actual_free_mem = min((g.free_memory - 300 * 2 ** 20) * 0.95,\n opt.max_gpu_mem * 0.95)\n\n if A.is_cuda:\n try:\n device = [d for d in gpu_info if d.Id == A.device.index][0]\n except IndexError:\n # This should never happen!\n raise RuntimeError(\"Device of matrix A (%s) is not recognized\" % (A.device))\n else:\n device = max(gpu_info, key=lambda g: g.actual_free_mem)\n ic = can_do_ic(A, device) and not opt.chol_force_ooc\n if opt.chol_force_in_core and not ic:\n raise RuntimeError(\"Cannot run in-core POTRF but `chol_force_in_core` was specified.\")\n\n f_order = is_f_contig(A)\n transposed = False\n if not f_order:\n A = A.T\n upper = not upper\n transposed = True\n # Now A is always in f_order. So we can only allow upper=False (ooc)\n if upper:\n # Can do only in-core!\n if not ic:\n raise ValueError(\"GPU POTRF is only implemented on the \"\n \"lower triangle for Fortran-ordered matrices (or on the upper \"\n \"triangle for C-ordered matrices)\")\n if not ic and A.is_cuda:\n _msg = \"Cannot run out-of-core POTRF on CUDA matrix 'A'.\"\n if opt.chol_force_ooc:\n _msg += \" Set the `chol_force_ooc` option to `False` in to allow in-core POTRF.\"\n raise ValueError(_msg)\n\n # Handle different implementations for POTRF: in-core and out-of-core\n if ic:\n if opt.debug:\n print(\"Using in-core POTRF\")\n _ic_cholesky(A, upper, device=device.Id,\n cusolver_handle=initialization.cusolver_handle(device.Id))\n else:\n if opt.debug:\n print(\"Using parallel POTRF\")\n _parallel_potrf_runner(A, opt, gpu_info)\n\n # Perform cleaning of the 'other side' of the matrix\n if clean:\n la_helpers.zero_triang(A, upper=not upper)\n # Undo previous matrix transformations\n if transposed:\n A = A.T\n\n return A\n","sub_path":"falkon/ooc_ops/ooc_potrf.py","file_name":"ooc_potrf.py","file_ext":"py","file_size_in_byte":9065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"162311332","text":"from sklearn.neural_network import MLPRegressor\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport random\r\n\r\nsquares = [];\r\n\r\nk=random.randint(1, 5);\r\nl=random.randint(1, 5);\r\n\r\ndef MSE(predicted_data_y, actual_data_y):\r\n print(len(predicted_data_y)==len(actual_data_y))\r\n for i in range(0, len(predicted_data_y)):\r\n squares.append((predicted_data_y[i] - actual_data_y[i])**2)\r\n return sum(squares)/len(predicted_data_y)\r\n\r\ndef f(x):\r\n return ((k/l)*(x/l)**(k-1) * np.exp(-(x/l)**(k)))\r\n\r\ndef g(x):\r\n return (f(x) + (f(x) + np.random.normal(f(x), abs(f(x)/10)))/2)\r\n\r\nall_data_x = np.arange(0, 6, 0.1)\r\nall_data = [(i, g(i)) for i in all_data_x]\r\n\r\npolydeg = int(input(\"Poly degree?\"))\r\n\r\ntotal_start = time.time()\r\n\r\ntraining_data = all_data[2:-2]\r\ntest_data = []\r\ntest_data.extend(tuple(all_data[0:2]))\r\ntest_data.extend(tuple(all_data[-2:]))\r\ngraph_min = min([i[0] for i in test_data])\r\ngraph_max = max([i[0] for i in test_data])\r\n\r\nx = np.array([i[0] for i in training_data]).reshape(-1, 1)\r\ny = np.array([i[1] for i in training_data]).ravel()\r\npoly_x = [i[0] for i in training_data]\r\n\r\nann_start = time.time()\r\nnn = MLPRegressor(hidden_layer_sizes=(15),\r\n activation='tanh', solver='lbfgs')\r\n\r\nnn.fit(x, y)\r\nann_end = time.time()\r\n\r\ntest_data_x = [i[0] for i in test_data]\r\ntest_data_y = [i[1] for i in test_data]\r\n\r\nall_data_x = [i[0] for i in all_data]\r\nall_data_y = [i[1] for i in all_data]\r\n\r\ntest_graph_x = np.arange(5/6 * graph_min, 1.2*graph_max, 0.01).reshape(-1, 1)\r\nann_y = nn.predict(test_graph_x)\r\n\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(111)\r\nax1.scatter(x, y, s=5, c='b', marker=\"o\", label='Training Data')\r\nax1.scatter(test_data_x, test_data_y, s=5, c='g', marker=\"o\", label='Test Data')\r\nax1.plot(test_graph_x, ann_y, c='r', label='ANN Prediction')\r\n\r\n\r\npoly_start=time.time()\r\npoly = np.poly1d(np.polyfit(poly_x, y, polydeg))\r\npoly_y=[poly(i) for i in test_graph_x]\r\nax1.plot(test_graph_x, poly_y, c='y', label='Poly Prediction')\r\npoly_end=time.time()\r\n\r\nann_all_y = [nn.predict(np.array(i).reshape(-1, 1)) for i in all_data_x]\r\npoly_all_y = [poly(i) for i in all_data_x]\r\n\r\nplt.legend()\r\ntotal_end=time.time()\r\nprint(\"ANN MODEL:\")\r\nprint(\"Mean-Squared-Error: \", sum(MSE(ann_all_y, all_data_y)))\r\nprint(\"Time Elapsed: \", 1000*(ann_end - ann_start), \"milliseconds\")\r\nprint(\"____________________________________\")\r\nprint(\"POLYNOMIAL MODEL:\")\r\nprint(\"Time Elapsed: \", (poly_end - poly_start))\r\nprint(\"Mean-Squared-Error: \", sum(MSE(poly_all_y, all_data_y)))\r\nprint(\"\")\r\nprint(\"Total Time Elapsed: \", 1000*(total_end - total_start), \" milliseconds\")\r\nplt.show()\r\n","sub_path":"General Weibull Curve Fitting.py","file_name":"General Weibull Curve Fitting.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"491468635","text":"\n# Standard Library\nimport logging\n\n# Third-Party\nimport pydf\nfrom rest_framework_json_api.filters import OrderingFilter\nfrom rest_framework_json_api.django_filters import DjangoFilterBackend\nfrom django_fsm import TransitionNotAllowed\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import status\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\n# Django\nfrom django.core.files.base import ContentFile\nfrom django.db.models import Sum, Q, Avg\nfrom django.template.loader import render_to_string\nfrom django.utils.text import slugify\n\n# Local\n# from .filterbackends import AppearanceFilterBackend\n# from .filterbackends import OutcomeFilterBackend\n# from .filterbackends import ScoreFilterBackend\n# from .filterbackends import SongFilterBackend\n\nfrom .filtersets import RoundFilterset\nfrom .filtersets import ScoreFilterset\n\nfrom .models import Appearance\nfrom .models import Outcome\nfrom .models import Panelist\nfrom .models import Round\nfrom .models import Score\nfrom .models import Song\n\nfrom .renderers import PDFRenderer\nfrom .renderers import XLSXRenderer\nfrom .responders import PDFResponse\nfrom .responders import XLSXResponse\nfrom .renderers import DOCXRenderer\nfrom .responders import DOCXResponse\n\nfrom .serializers import AppearanceSerializer\nfrom .serializers import OutcomeSerializer\nfrom .serializers import PanelistSerializer\nfrom .serializers import RoundSerializer\nfrom .serializers import ScoreSerializer\nfrom .serializers import SongSerializer\n\n\nlog = logging.getLogger(__name__)\n\n\nfrom rest_framework.negotiation import BaseContentNegotiation\n\n\nclass IgnoreClientContentNegotiation(BaseContentNegotiation):\n def select_parser(self, request, parsers):\n \"\"\"\n Select the first parser in the `.parser_classes` list.\n \"\"\"\n return parsers[0]\n\n def select_renderer(self, request, renderers, format_suffix):\n \"\"\"\n Select the first renderer in the `.renderer_classes` list.\n \"\"\"\n return (renderers[0], renderers[0].media_type)\n\n\nclass AppearanceViewSet(viewsets.ModelViewSet):\n queryset = Appearance.objects.select_related(\n 'round',\n # 'group',\n # 'entry',\n ).prefetch_related(\n 'owners',\n 'songs',\n # 'statelogs',\n ).order_by('id')\n serializer_class = AppearanceSerializer\n filterset_class = None\n filter_backends = [\n DjangoFilterBackend,\n # AppearanceFilterBackend,\n ]\n permission_classes = [\n DRYPermissions,\n ]\n resource_name = \"appearance\"\n\n @action(methods=['get'], detail=True)\n def mock(self, request, pk=None, **kwargs):\n \"\"\"\n Mocks an Appearance using fake data.\n \"\"\"\n object = self.get_object()\n object.mock()\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def start(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.start(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def finish(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.finish(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def verify(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.verify(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n\n @action(methods=['post'], detail=True)\n def complete(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.complete(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n\n @action(methods=['post'], detail=True)\n def advance(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.advance(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n\n @action(methods=['post'], detail=True)\n def scratch(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.scratch(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n\n @action(methods=['post'], detail=True)\n def disqualify(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.disqualify(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n\n @action(\n methods=['get'],\n detail=True,\n renderer_classes=[\n PDFRenderer,\n ],\n permission_classes=[DRYPermissions],\n content_negotiation_class=IgnoreClientContentNegotiation,\n )\n def variance(self, request, pk=None):\n appearance = Appearance.objects.get(pk=pk)\n if appearance.variance_report:\n pdf = appearance.variance_report.file\n else:\n pdf = appearance.get_variance()\n file_name = '{0} Variance Report.pdf'.format(appearance)\n return PDFResponse(\n pdf,\n file_name=file_name,\n status=status.HTTP_200_OK\n )\n\n @action(\n methods=['get'],\n detail=True,\n renderer_classes=[\n PDFRenderer,\n ],\n permission_classes=[DRYPermissions],\n content_negotiation_class=IgnoreClientContentNegotiation,\n )\n def csa(self, request, pk=None):\n \"\"\"\n Renders the Competitor Scoring Analysis in PDF\n \"\"\"\n appearance = Appearance.objects.get(pk=pk)\n if appearance.csa_report:\n pdf = appearance.csa_report.file\n else:\n pdf = appearance.get_csa()\n file_name = '{0} CSA.pdf'.format(appearance)\n return PDFResponse(\n pdf,\n file_name=file_name,\n status=status.HTTP_200_OK\n )\n\n\nclass OutcomeViewSet(viewsets.ModelViewSet):\n queryset = Outcome.objects.select_related(\n 'round',\n # 'award',\n ).prefetch_related(\n 'statelogs',\n ).order_by('id')\n serializer_class = OutcomeSerializer\n filter_backends = [\n DjangoFilterBackend,\n # OutcomeFilterBackend,\n ]\n permission_classes = [\n DRYPermissions,\n ]\n resource_name = \"outcome\"\n\n\nclass PanelistViewSet(viewsets.ModelViewSet):\n queryset = Panelist.objects.select_related(\n 'round',\n # 'user',\n ).prefetch_related(\n 'scores',\n ).order_by('id')\n serializer_class = PanelistSerializer\n filter_backends = [\n DjangoFilterBackend,\n ]\n permission_classes = [\n DRYPermissions,\n ]\n resource_name = \"panelist\"\n\n @action(\n methods=['get'],\n detail=True,\n renderer_classes=[\n PDFRenderer,\n ],\n permission_classes=[DRYPermissions],\n content_negotiation_class=IgnoreClientContentNegotiation,\n )\n def psa(self, request, pk=None):\n panelist = Panelist.objects.get(pk=pk)\n if panelist.psa_report:\n pdf = panelist.psa_report.file\n else:\n pdf = panelist.get_psa()\n file_name = '{0} PSA.pdf'.format(panelist)\n return PDFResponse(\n pdf,\n file_name=file_name,\n status=status.HTTP_200_OK\n )\n\n\nclass RoundViewSet(viewsets.ModelViewSet):\n queryset = Round.objects.select_related(\n # 'session',\n ).prefetch_related(\n 'owners',\n 'appearances',\n # 'appearances__owners',\n # 'appearances__songs',\n # 'panelists__scores',\n # 'outcomes__award',\n ).order_by('id')\n serializer_class = RoundSerializer\n filterset_class = RoundFilterset\n filter_backends = [\n DjangoFilterBackend,\n ]\n permission_classes = [\n DRYPermissions,\n ]\n resource_name = \"round\"\n\n @action(methods=['get'], detail=True)\n def mock(self, request, pk=None, **kwargs):\n object = self.get_object()\n object.mock()\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def reset(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.reset(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def build(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.build(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def start(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.start(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def complete(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.complete(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def verify(self, request, pk=None, **kwargs):\n object = self.get_object()\n try:\n object.verify(by=self.request.user)\n except TransitionNotAllowed:\n return Response(\n {'status': 'Transition conditions not met.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def publish(self, request, pk=None, **kwargs):\n object = self.get_object()\n # try:\n # object.publish(by=self.request.user)\n # except TransitionNotAllowed:\n # return Response(\n # {'status': 'Transition conditions not met.'},\n # status=status.HTTP_400_BAD_REQUEST,\n # )\n object.publish(by=self.request.user)\n object.save()\n serializer = self.get_serializer(object)\n return Response(serializer.data)\n\n @action(\n methods=['get'],\n detail=True,\n renderer_classes=[\n PDFRenderer,\n ],\n permission_classes=[DRYPermissions],\n content_negotiation_class=IgnoreClientContentNegotiation,\n )\n def oss(self, request, pk=None):\n round = Round.objects.select_related(\n # 'session',\n # 'session__convention',\n ).get(pk=pk)\n if round.oss_report:\n pdf = round.oss_report.file\n else:\n pdf = round.get_oss()\n file_name = '{0} OSS.pdf'.format(round)\n return PDFResponse(\n pdf,\n file_name=file_name,\n status=status.HTTP_200_OK\n )\n\n\n @action(\n methods=['get'],\n detail=True,\n renderer_classes=[\n PDFRenderer,\n ],\n permission_classes=[DRYPermissions],\n content_negotiation_class=IgnoreClientContentNegotiation,\n )\n def legacy(self, request, pk=None):\n round = Round.objects.get(pk=pk)\n if round.legacy_oss:\n pdf = round.legacy_oss.file\n else:\n pdf = round.get_legacy_oss()\n file_name = '{0} Legacy OSS.pdf'.format(round)\n return PDFResponse(\n pdf,\n file_name=file_name,\n status=status.HTTP_200_OK\n )\n\n\n @action(\n methods=['get'],\n detail=True,\n renderer_classes=[\n PDFRenderer,\n ],\n permission_classes=[DRYPermissions],\n content_negotiation_class=IgnoreClientContentNegotiation,\n )\n def legacyoss(self, request, pk=None):\n round = Round.objects.select_related(\n ).get(pk=pk)\n if round.legacy_oss:\n pdf = round.legacy_oss.file\n else:\n pdf = round.get_legacy_oss()\n file_name = '{0} Legacy OSS.pdf'.format(round)\n return PDFResponse(\n pdf,\n file_name=file_name,\n status=status.HTTP_200_OK\n )\n\n\n @action(\n methods=['get'],\n detail=True,\n renderer_classes=[\n PDFRenderer,\n ],\n permission_classes=[DRYPermissions],\n content_negotiation_class=IgnoreClientContentNegotiation,\n )\n def titles(self, request, pk=None):\n round = Round.objects.prefetch_related(\n 'appearances',\n ).get(pk=pk)\n pdf = round.get_titles()\n file_name = '{0} {1} {2} Titles Report'.format(\n # round.session.convention,\n round.session.get_kind_display(),\n round.get_kind_display(),\n )\n return PDFResponse(\n pdf,\n file_name=file_name,\n status=status.HTTP_200_OK\n )\n\n\n @action(\n methods=['get'],\n detail=True,\n renderer_classes=[PDFRenderer],\n permission_classes=[DRYPermissions],\n content_negotiation_class=IgnoreClientContentNegotiation,\n )\n def sa(self, request, pk=None):\n round = Round.objects.select_related(\n # 'session',\n # 'session__convention',\n ).get(pk=pk)\n if round.sa_report:\n pdf = round.sa_report.file\n else:\n pdf = round.get_sa()\n file_name = '{0} {1} {2} SA'.format(\n # round.session.convention,\n round.session.get_kind_display(),\n round.get_kind_display(),\n )\n return PDFResponse(\n pdf,\n file_name=file_name,\n status=status.HTTP_200_OK\n )\n\n\n @action(\n methods=['get'],\n detail=True,\n renderer_classes=[\n DOCXRenderer,\n ],\n permission_classes=[DRYPermissions],\n content_negotiation_class=IgnoreClientContentNegotiation,\n )\n def announcements(self, request, pk=None):\n round = Round.objects.select_related(\n ).get(pk=pk)\n docx = round.get_announcements()\n file_name = '{0} {1} {2} Announcements'.format(\n # round.session.convention,\n round.session.get_kind_display(),\n round.get_kind_display(),\n )\n return DOCXResponse(\n docx,\n file_name=file_name,\n status=status.HTTP_200_OK\n )\n\n\nclass ScoreViewSet(viewsets.ModelViewSet):\n queryset = Score.objects.select_related(\n 'song',\n 'panelist',\n ).prefetch_related(\n ).order_by('id')\n serializer_class = ScoreSerializer\n filterset_class = ScoreFilterset\n filter_backends = [\n DjangoFilterBackend,\n # ScoreFilterBackend,\n ]\n permission_classes = [\n DRYPermissions,\n ]\n resource_name = \"score\"\n\n\nclass SongViewSet(viewsets.ModelViewSet):\n queryset = Song.objects.select_related(\n 'appearance',\n ).prefetch_related(\n 'scores',\n 'scores__panelist',\n ).order_by('id')\n serializer_class = SongSerializer\n filterset_class = None\n filter_backends = [\n DjangoFilterBackend,\n # SongFilterBackend,\n ]\n permission_classes = [\n DRYPermissions,\n ]\n resource_name = \"song\"\n","sub_path":"project/apps/adjudication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"555205335","text":"import numpy as np\nimport constants\nfrom decisions import Decider\n\nclass Encounter():\n\n def __init__(self, array_generator, rotation, response, var, config, world_buffs, boss_buffs, dur_dist):\n self._array_generator = array_generator\n self._rotation = rotation\n self._response = response\n self._var = var\n self._config = config\n self._world_buffs = world_buffs\n self._boss_buffs = boss_buffs\n self._dur_dist = dur_dist\n\n def _subtime(self, sub, add_time):\n C = self._C\n running_time = self._arrays['global']['running_time']\n player = self._arrays['player']\n boss = self._arrays['boss']\n\n running_time[sub] += add_time\n player['cast_timer'][sub, :] -= add_time[:, None]\n player['spell_timer'][sub, :] -= add_time[:, None]\n player['comb_cooldown'][sub, :] -= add_time[:, None]\n player['fb_cooldown'][sub, :] -= add_time[:, None]\n boss['ignite_timer'][sub] -= add_time\n boss['tick_timer'][sub] -= add_time\n boss['scorch_timer'][sub] -= add_time\n for buff in range(C._BUFFS):\n player['buff_timer'][buff][sub, :] -= add_time[:, None]\n player['buff_cooldown'][buff][sub, :] -= add_time[:, None]\n for debuff in range(C._DEBUFFS):\n boss['debuff_timer'][debuff][sub] -= add_time\n boss['spell_vulnerability'][sub] -= add_time\n player['nightfall'][sub, :] -= add_time[:, None]\n\n def _do_cast(self, still_going, cast_array):\n C = self._C\n player = self._arrays['player']\n \n cast_ends = np.where(cast_array)[0]\n if cast_ends.size > 0:\n cst = still_going[cast_ends]\n next_hit = np.argmin(player['cast_timer'][cst, :], axis=1)\n add_time = np.min(player['cast_timer'][cst, :], axis=1)\n self._subtime(cst, add_time)\n\n if C._LOG_SIM >= 0:\n if C._LOG_SIM in cst:\n message = ' ({:6.2f}): player {:d} finished casting {:s}'\n sub_index = cst.tolist().index( C._LOG_SIM)\n message = message.format(self._arrays['global']['running_time'][C._LOG_SIM],\n next_hit[sub_index] + 1,\n C._LOG_SPELL[player['cast_type'][cst[sub_index], next_hit[sub_index]]])\n print(message)\n\n # transfer to spell\n instant_array = player['cast_type'][cst, next_hit] >= C._CAST_GCD\n no_instant = np.where(np.logical_not(instant_array))[0]\n player['spell_type'][cst[no_instant], next_hit[no_instant]] = player['cast_type'][cst[no_instant], next_hit[no_instant]]\n player['spell_timer'][cst[no_instant], next_hit[no_instant]] = C._SPELL_TIME[player['cast_type'][cst[no_instant], next_hit[no_instant]]]\n \n fire_blast = np.where(player['cast_type'][cst, next_hit] == C._CAST_FIRE_BLAST)[0]\n player['fb_cooldown'][cst[fire_blast], next_hit[fire_blast]] = C._FIRE_BLAST_COOLDOWN\n\n # apply instant spells\n combustion = np.where(player['cast_type'][cst, next_hit] == C._CAST_COMBUSTION)[0]\n player['comb_left'][cst[combustion], next_hit[combustion]] = C._COMBUSTIONS\n player['comb_stack'][cst[combustion], next_hit[combustion]] = 1\n player['comb_avail'][cst[combustion], next_hit[combustion]] -= 1\n player['comb_cooldown'][cst[combustion], next_hit[combustion]] = np.inf # temporary\n\n for buff in range(C._BUFFS):\n tbuff = np.where(player['cast_type'][cst, next_hit] == C._BUFF_CAST_TYPE[buff])[0]\n player['buff_timer'][buff][cst[tbuff], next_hit[tbuff]] = C._BUFF_DURATION[buff]\n #player['buff_avail'][buff][cst[tbuff], next_hit[tbuff]] -= 1\n player['buff_cooldown'][buff][cst[tbuff], next_hit[tbuff]] = C._BUFF_COOLDOWN[buff]\n if buff < C._DAMAGE_BUFFS:\n player['buff_ticks'][buff][cst[tbuff], next_hit[tbuff]] = 0\n # internal cooldown (MQG too)\n if buff < C._DAMAGE_BUFFS + 1:\n for buff2 in range(C._DAMAGE_BUFFS + 1):\n if buff2 == buff:\n continue\n player['buff_cooldown'][buff2][cst[tbuff], next_hit[tbuff]] =\\\n np.maximum(player['buff_cooldown'][buff2][cst[tbuff], next_hit[tbuff]], C._BUFF_DURATION[buff])\n\n # determine gcd \n gcd_array = player['gcd'][cst, next_hit] > 0.0\n yes_gcd = np.where(gcd_array)[0]\n no_gcd = np.where(np.logical_not(gcd_array))[0]\n \n # push gcd\n player['cast_type'][cst[yes_gcd], next_hit[yes_gcd]] = C._CAST_GCD\n player['cast_timer'][cst[yes_gcd], next_hit[yes_gcd]] =\\\n player['gcd'][cst[yes_gcd], next_hit[yes_gcd]]\n player['gcd'][cst[yes_gcd], next_hit[yes_gcd]] = 0.0\n\n # inc cast number\n self._arrays['global']['decision'][cst[no_gcd]] = True\n player['cast_number'][cst[no_gcd], next_hit[no_gcd]] += 1 # attempt at batching\n\n def _do_spell(self, still_going, spell_array):\n C = self._C\n player = self._arrays['player']\n boss = self._arrays['boss']\n\n epsilon = 1.0e-6\n \n spell_lands = np.where(spell_array)[0]\n if spell_lands.size > 0:\n spl = still_going[spell_lands]\n lnext_hit = np.argmin(player['spell_timer'][spl, :], axis=1)\n add_time = np.min(player['spell_timer'][spl, :], axis=1)\n self._subtime(spl, add_time)\n\n # reset timer\n player['spell_timer'][spl, lnext_hit] = C._LONG_TIME\n\n if C._LOG_SIM >= 0:\n if C._LOG_SIM in spl:\n message = ' ({:6.2f}): player {:d} {:s} landed '\n sub_index = spl.tolist().index( C._LOG_SIM)\n message = message.format(self._arrays['global']['running_time'][C._LOG_SIM],\n lnext_hit[sub_index] + 1,\n C._LOG_SPELL[player['spell_type'][C._LOG_SIM, lnext_hit[sub_index]]])\n message2 = 'misses '\n\n spell_hits = np.where(np.random.rand(spell_lands.size) < player['hit_chance'][spl, lnext_hit])[0]\n if spell_hits.size > 0:\n\n sph = spl[spell_hits]\n next_hit = lnext_hit[spell_hits]\n spell_type = player['spell_type'][sph, next_hit]\n\n #print(self._arrays['player']['cleaner'], self._arrays['player']['cleaner'].shape)\n the_cleaner = np.any(np.equal(next_hit.reshape(next_hit.size, 1), self._arrays['player']['cleaner']), axis=1)\n #the_cleaner = (next_hit == self._arrays['player']['cast_number'].shape[1] - 1) | (next_hit == 1)\n the_player = np.any(np.equal(next_hit.reshape(next_hit.size, 1), self._arrays['player']['target']), axis=1)\n is_play = np.where(the_player)[0]\n is_clean = np.where(the_cleaner)[0]\n not_clean = np.where(np.logical_not(the_cleaner))[0]\n \n is_dragonling = np.logical_and(self._arrays['global']['running_time'][sph] >= boss['dragonling'],\n self._arrays['global']['running_time'][sph] < boss['dragonling'] + C._DRAGONLING_DURATION).astype(np.float)\n buff_damage = C._DRAGONLING_BUFF*is_dragonling\n for buff in range(C._DAMAGE_BUFFS):\n active = (player['buff_timer'][buff][sph, next_hit] > 0.0).astype(np.float)\n ticks = player['buff_ticks'][buff][sph, next_hit]\n buff_damage += active*(C._BUFF_DAMAGE[buff] + ticks*C._BUFF_PER_TICK[buff])\n player['buff_ticks'][buff][sph, next_hit] += 1\n \n spell_damage = C._SPELL_BASE[spell_type] + \\\n C._SPELL_RANGE[spell_type]*np.random.rand(sph.size) +\\\n C._SP_MULTIPLIER[spell_type]*(player['spell_power'][sph, next_hit] + buff_damage)\n rolls = np.random.rand(sph.size)\n conditions = [np.logical_and(rolls >= ll, rolls < ul) for ll, ul in zip(C._RES_THRESH, C._RES_THRESH_UL)]\n partials = np.piecewise(rolls, conditions, C._RES_AMOUNT)\n partials[np.logical_not(C._IS_FIRE[spell_type].astype(np.bool))] = 1.0\n spell_damage *= partials\n\n spell_damage *= C._COE_MULTIPLIER*C._DAMAGE_MULTIPLIER[spell_type] # CoE + talents\n scorch = C._IS_FIRE[spell_type]*C._SCORCH_MULTIPLIER*boss['scorch_count'][sph]\n spell_damage *= 1.0 + scorch*(boss['scorch_timer'][sph] > 0.0).astype(np.float)\n pi = (player['buff_timer'][C._BUFF_POWER_INFUSION][sph, next_hit] > 0.0).astype(np.float)\n spell_damage *= 1.0 + C._POWER_INFUSION*pi\n spell_damage *= 1.0 + C._NIGHTFALL_BUFF*(boss[\"spell_vulnerability\"][sph] > 0.0).astype(np.float)\n \n spell_damage[not_clean] *= C._NORMAL_BUFF\n spell_damage[is_clean] *= C._NORMAL_BUFF_C\n self._damage[sph] += spell_damage\n self._arrays['global']['player'][sph[is_play]] += spell_damage[is_play] \n # ADD ADDITIONAL OVERALL MULTIPLIERS TO _DAMAGE_MULTIPLIER\n\n # handle critical hit/ignite ** READ HERE FOR MOST OF THE IGNITE MECHANICS **\n comb_crit = C._PER_COMBUSTION*player['comb_stack'][sph, next_hit] - C._IS_SCORCH[spell_type] # scorch and batching\n comb_crit *= (player['comb_left'][sph, next_hit] > 0).astype(np.float)\n comb_crit *= C._IS_FIRE[spell_type]\n \n crit_chance = player['crit_chance'][sph, next_hit] + comb_crit + C._INCIN_BONUS[spell_type]\n crit_array = np.random.rand(sph.size) < crit_chance\n \n ignite_array = crit_array & C._IS_FIRE[spell_type].astype(np.bool)\n\n lcrit_clean = np.where(crit_array & C._IS_FIRE[spell_type].astype(np.bool) & the_cleaner)[0]\n lnot_crit_clean = np.where(crit_array & C._IS_FIRE[spell_type].astype(np.bool) & np.logical_not(the_cleaner))[0]\n\n gcrit_clean = sph[np.where(crit_array & C._IS_FIRE[spell_type].astype(np.bool) & the_cleaner)[0]]\n gnot_crit_clean = sph[np.where(crit_array & C._IS_FIRE[spell_type].astype(np.bool) & np.logical_not(the_cleaner))[0]]\n\n lcrit_play = np.where(crit_array & C._IS_FIRE[spell_type].astype(np.bool) & the_player)[0]\n gcrit_play = sph[np.where(crit_array & C._IS_FIRE[spell_type].astype(np.bool) & the_player)[0]]\n\n lcl_icrits = np.where(ignite_array)[0]\n gbl_icrits = sph[lcl_icrits]\n inext_hit = next_hit[lcl_icrits]\n \n # remove ignite if expired\n rem_val = np.where(boss['ignite_timer'][gbl_icrits] <= 0.0)[0]\n boss['ignite_count'][gbl_icrits[rem_val]] = 0\n boss['ignite_value'][gbl_icrits[rem_val]] = 0.0\n\n # record late crits\n rem_val = np.where(boss['ignite_timer'][gbl_icrits] < C._DECISION_POINT)[0]\n player['crit_too_late'][gbl_icrits[rem_val], :] = True\n\n # extend by 4 secs, reset timer if no more ticks (31MAR22)\n # comment this section and uncomment next section to return to classic mechanics\n new_tick = np.where((boss['tick_timer'][gbl_icrits] > C._IGNITE_TICK) & (boss['ignite_count'][gbl_icrits] > 0))[0]\n boss['tick_timer'][gbl_icrits[new_tick]] = C._IGNITE_TICK\n boss['ignite_timer'][gbl_icrits] = C._IGNITE_TIME + epsilon\n\n # add tick if 1 tick remaining\n # comment this section and uncomment next section to return to TBC mechanics\n #new_tick = np.where(boss['ignite_timer'][gbl_icrits] <= C._IGNITE_TICK)[0]\n #boss['ignite_timer'][gbl_icrits[new_tick]] += C._IGNITE_TICK\n\n ## refresh ignite to full 4 seconds\n #boss['ignite_timer'][gbl_icrits] = C._IGNITE_TIME + epsilon\n\n # if we dont have a full stack\n mod_val = np.where(boss['ignite_count'][gnot_crit_clean] < C._IGNITE_STACK)[0]\n # add to the ignite tick damage -- 1.5 x 0.2 x spell hit damage\n boss['ignite_value'][gnot_crit_clean[mod_val]] += (1.0 + C._ICRIT_DAMAGE)*C._CRIT_BUFF*C._IGNITE_DAMAGE*spell_damage[lnot_crit_clean[mod_val]]\n mod_val = np.where(boss['ignite_count'][gcrit_clean] < C._IGNITE_STACK)[0]\n # add to the ignite tick damage -- 1.5 x 0.2 x spell hit damage\n boss['ignite_value'][gcrit_clean[mod_val]] += (1.0 + C._ICRIT_DAMAGE)*C._CRIT_BUFF_C*C._IGNITE_DAMAGE*spell_damage[lcrit_clean[mod_val]]\n\n # first in stack, set the tick\n mod_val2 = np.where(boss['ignite_count'][gnot_crit_clean] == 0)[0]\n boss['tick_timer'][gnot_crit_clean[mod_val2]] = C._IGNITE_TICK\n # comment the next line to return to TBC ignite mechanics\n boss['ignite_timer'][gnot_crit_clean[mod_val2]] = C._IGNITE_TIME + epsilon\n boss['ignite_multiplier'][gnot_crit_clean[mod_val2]] = C._IGNITE_BUFF*(1.0 + C._POWER_INFUSION*pi[lnot_crit_clean[mod_val2]])\n mod_val2 = np.where(boss['ignite_count'][gcrit_clean] == 0)[0]\n boss['tick_timer'][gcrit_clean[mod_val2]] = C._IGNITE_TICK\n # comment the next line to return to TBC ignite mechanics\n boss['ignite_timer'][gcrit_clean[mod_val2]] = C._IGNITE_TIME + epsilon\n boss['ignite_multiplier'][gcrit_clean[mod_val2]] = C._IGNITE_BUFF_C*(1.0 + C._POWER_INFUSION*pi[lcrit_clean[mod_val2]])\n\n\n # increment to max of five (will do nothing if already at 5)\n boss['ignite_count'][gbl_icrits] = np.minimum(boss['ignite_count'][gbl_icrits] + 1, C._IGNITE_STACK)\n\n # add crit to damage\n self._damage[gbl_icrits] += C._ICRIT_DAMAGE*spell_damage[lcl_icrits]\n self._arrays['global']['player'][gcrit_play] -= spell_damage[lcrit_play] \n self._arrays['global']['player'][gcrit_play] += spell_damage[lcrit_play]*(1.0 + C._ICRIT_DAMAGE)*C._CRIT_BUFF_C\n \n self._crit[gbl_icrits] += (1.0 + C._ICRIT_DAMAGE)*spell_damage[lcl_icrits]\n\n # check last combustion\n comb_off = np.where(player['comb_left'][gbl_icrits, inext_hit] == 1)[0]\n player['comb_cooldown'][gbl_icrits[comb_off], inext_hit[comb_off]] = C._COMBUSTION_COOLDOWN\n\n # remove from combustion\n player['comb_left'][gbl_icrits, inext_hit] = np.maximum(player['comb_left'][gbl_icrits, inext_hit] - 1, 0)\n\n # normal crit\n lcl_crits = np.where(crit_array & np.logical_not(C._IS_FIRE[spell_type]))[0]\n self._damage[sph[lcl_crits]] += C._CRIT_DAMAGE*spell_damage[lcl_crits]\n\n if C._LOG_SIM >= 0:\n if C._LOG_SIM in sph:\n sub_index = sph.tolist().index(C._LOG_SIM)\n if sub_index in lcl_crits:\n message2 = 'crits for {:5.0f} '.format((1.0 + C._CRIT_DAMAGE)*spell_damage[sub_index])\n elif sub_index in lcl_icrits:\n message2 = 'crits for {:5.0f} '.format((1.0 + C._ICRIT_DAMAGE)*spell_damage[sub_index])\n else:\n message2 = ' hits for {:5.0f} '.format(spell_damage[sub_index])\n\n # scorch\n scorch_out = sph[np.where(boss['scorch_timer'][sph] <= 0.0)[0]]\n boss['scorch_count'][scorch_out] = 0\n \n scorch_list = np.where(C._IS_SCORCH[spell_type])[0]\n if scorch_list.size:\n is_scorch = sph[scorch_list]\n snext_hit = next_hit[scorch_list]\n shit = np.where(np.random.rand(is_scorch.size) < player['hit_chance'][is_scorch, snext_hit])[0]\n boss['scorch_timer'][is_scorch[shit]] = C._SCORCH_TIME\n boss['scorch_count'][is_scorch[shit]] = np.minimum(boss['scorch_count'][is_scorch[shit]] + 1, C._SCORCH_STACK)\n \n fire = np.where(C._IS_FIRE[spell_type])[0]\n player['comb_stack'][sph[fire], next_hit[fire]] += 1\n\n if C._LOG_SIM >= 0:\n if C._LOG_SIM in spl:\n sub_index = spl.tolist().index(C._LOG_SIM)\n dam_done = ' {:7.0f}'.format(self._arrays['global']['total_damage'][C._LOG_SIM] + self._damage[C._LOG_SIM])\n message = message + message2\n buffs = player['buff_timer']\n is_sapp = 'sap' if buffs[C._BUFF_SAPP][C._LOG_SIM, lnext_hit[sub_index]] > 0.0 else ' '\n is_toep = 'toep' if buffs[C._BUFF_TOEP][C._LOG_SIM, lnext_hit[sub_index]] > 0.0 else ' '\n is_zhc = 'zhc' if buffs[C._BUFF_ZHC][C._LOG_SIM, lnext_hit[sub_index]] > 0.0 else ' '\n is_mqg = 'mqg' if buffs[C._BUFF_MQG][C._LOG_SIM, lnext_hit[sub_index]] > 0.0 else ' '\n is_pi = 'pi' if buffs[C._BUFF_POWER_INFUSION][C._LOG_SIM, lnext_hit[sub_index]] > 0.0 else ' '\n status = ' ic {:d} it {:4.2f} in {:s} id {:5.0f} sc {:d} st {:5.2f} cs {:2d} cl {:d} {:s} {:s} {:s} {:s} {:s}'\n ival = boss['tick_timer'][C._LOG_SIM]\n istat = '{:4.2f}'.format(ival) if ival > 0.0 and ival <= 2.0 else ' off'\n status = status.format(boss['ignite_count'][C._LOG_SIM],\n max([boss['ignite_timer'][C._LOG_SIM], 0.0]),\n istat,\n boss['ignite_value'][C._LOG_SIM],\n boss['scorch_count'][C._LOG_SIM],\n max([boss['scorch_timer'][C._LOG_SIM], 0.0]),\n player['comb_stack'][C._LOG_SIM, lnext_hit[sub_index]],\n player['comb_left'][C._LOG_SIM, lnext_hit[sub_index]],\n is_sapp,\n is_toep,\n is_zhc,\n is_mqg,\n is_pi)\n print(dam_done + message + status)\n\n def _do_tick(self, still_going, tick_array):\n C = self._C\n boss = self._arrays['boss']\n\n tick_hits = np.where(tick_array)[0]\n if tick_hits.size > 0:\n tic = still_going[tick_hits]\n add_time = boss['tick_timer'][tic]\n self._subtime(tic, add_time)\n \n ignite_expire = boss['ignite_timer'][tic] <= 0.0\n no_expire = tic[np.where(np.logical_not(ignite_expire))[0]]\n\n # new ignite mechanics (31MAR22) comment this section and uncomment block below to revert\n refresh_array = boss['ignite_timer'][tic] >= 2.0\n new_ignite = tic[np.where(refresh_array)[0]]\n no_ignite = tic[np.where(np.logical_not(refresh_array))[0]]\n boss['tick_timer'][new_ignite] = C._IGNITE_TICK\n boss['tick_timer'][no_ignite] = C._LONG_TIME\n\n ## uncomment to revert\n #yes_expire = tic[np.where(ignite_expire)[0]]\n #boss['tick_timer'][yes_expire] = C._LONG_TIME\n #boss['tick_timer'][no_expire] = C._IGNITE_TICK\n #multiplier = np.ones(no_expire.shape)\n \n scorch = C._SCORCH_MULTIPLIER*boss['scorch_count'][no_expire]\n multiplier = C._COE_MULTIPLIER*boss['ignite_multiplier'][no_expire]\n multiplier *= 1.0 + scorch*(boss['scorch_timer'][no_expire] > 0.0).astype(np.float)\n multiplier *= 1.0 + C._NIGHTFALL_BUFF*(boss[\"spell_vulnerability\"][no_expire] > 0.0).astype(np.float)\n\n rolls = np.random.rand(multiplier.size)\n conditions = [np.logical_and(rolls >= ll, rolls < ul) for ll, ul in zip(C._RES_THRESH, C._RES_THRESH_UL)]\n partials = np.piecewise(rolls, conditions, C._RES_AMOUNT)\n multiplier *= partials\n\n self._damage[no_expire] += multiplier*boss['ignite_value'][no_expire]\n self._ignite[no_expire] += multiplier*boss['ignite_value'][no_expire]\n if C._LOG_SIM >= 0:\n if C._LOG_SIM in no_expire:\n sub_index = no_expire.tolist().index(C._LOG_SIM)\n message = ' {:7.0f} ({:6.2f}): ignite ticked {:4.0f} damage done'\n print(message.format(self._arrays['global']['total_damage'][C._LOG_SIM] + self._damage[C._LOG_SIM],\n self._arrays['global']['running_time'][C._LOG_SIM],\n multiplier[sub_index]*boss['ignite_value'][C._LOG_SIM]))\n\n def _do_proc(self, still_going, proc_array):\n C = self._C\n player = self._arrays['player'] \n boss = self._arrays['boss']\n\n proc_hits = np.where(proc_array)[0]\n if proc_hits.size > 0:\n proc = still_going[proc_hits]\n next_proc = np.argmin(player['nightfall'][proc, :], axis=1)\n add_time = np.min(player['nightfall'][proc, :], axis=1)\n\n self._subtime(proc, add_time)\n\n player['nightfall'][proc, next_proc] = player[\"nightfall_period\"][next_proc]\n\n rolls = np.random.rand(proc.size)\n procs = np.where(rolls < C._NIGHTFALL_PROB)[0]\n boss[\"spell_vulnerability\"][proc[procs]] = C._NIGHTFALL_DURATION\n\n def _advance(self):\n going_array = (self._arrays['global']['running_time'] < self._arrays['global']['duration'])\n going_array &= np.logical_not(self._arrays['global']['decision'])\n still_going = np.where(going_array)[0]\n if still_going.size == 0:\n return False\n\n # cast finished\n cast_timer = np.copy(np.min(self._arrays['player']['cast_timer'][still_going, :], axis=1))\n spell_timer = np.copy(np.min(self._arrays['player']['spell_timer'][still_going, :], axis=1))\n tick_timer = np.copy(self._arrays['boss']['tick_timer'][still_going])\n proc_timer = np.copy(np.min(self._arrays['player']['nightfall'][still_going, :], axis=1))\n cast_array = (cast_timer < spell_timer) & (cast_timer < tick_timer) & (cast_timer < proc_timer)\n\n # casts\n self._do_cast(still_going, cast_array)\n\n # spell hits\n spell_array = np.logical_not(cast_array) & (spell_timer < tick_timer) & (spell_timer < proc_timer)\n self._do_spell(still_going, spell_array)\n\n # ticks\n tick_array = np.logical_not(cast_array | spell_array) & (tick_timer < proc_timer)\n self._do_tick(still_going, tick_array)\n \n # procs\n proc_array = np.logical_not(cast_array | spell_array | tick_array)\n self._do_proc(still_going, proc_array)\n \n return True\n \n def _apply_decisions(self, still_going, decisions, next_hit):\n C = self._C\n player = self._arrays['player']\n\n react_time = np.abs(self._response*np.random.randn(still_going.size)) \n\n player['cast_timer'][still_going, next_hit] = react_time\n player['cast_type'][still_going, next_hit] = decisions\n\n # spell is a fixed wait time\n wait = np.where(decisions == C._CAST_GCD)[0]\n player['cast_timer'][still_going[wait], next_hit[wait]] = C._CAST_TIME[C._CAST_GCD]\n \n # spell on global cooldown\n on_gcd = np.where(decisions < C._CAST_GCD)[0]\n player['cast_timer'][still_going[on_gcd], next_hit[on_gcd]] += C._CAST_TIME[decisions[on_gcd]]\n # mind quickening gem\n mqg = (player['buff_timer'][C._BUFF_MQG][still_going[on_gcd], next_hit[on_gcd]] > 0.0).astype(np.float)\n player['cast_timer'][still_going[on_gcd], next_hit[on_gcd]] /= (1.0 + C._MQG*mqg)\n \n player['gcd'][still_going[on_gcd], next_hit[on_gcd]] = np.maximum(0.0, C._GLOBAL_COOLDOWN + react_time[on_gcd] - player['cast_timer'][still_going[on_gcd], next_hit[on_gcd]])\n\n if C._LOG_SIM >= 0:\n if C._LOG_SIM in still_going:\n message = ' ({:6.2f}): player {:d} started casting {:s}'\n sub_index = still_going.tolist().index(C._LOG_SIM)\n message = message.format(self._arrays['global']['running_time'][C._LOG_SIM] + react_time[sub_index],\n next_hit[sub_index] + 1,\n C._LOG_SPELL[player['cast_type'][still_going[sub_index], next_hit[sub_index]]])\n print(message)\n self._arrays['global']['decision'] = np.zeros(self._arrays['global']['decision'].shape, dtype=np.bool)\n \n def run(self, ret_dist):\n double_dip = (1.0 + 0.1*float(\"sayges_dark_fortune_of_damage\" in self._world_buffs))\n double_dip *= (1.0 + 1.9*float(\"thaddius\" in self._boss_buffs))\n C = constants.Constant(double_dip)\n self._C = C\n\n self._arrays = self._array_generator.run(C, self._dur_dist is not None)\n\n decider = Decider(C,\n self._rotation,\n self._arrays['player']['cast_number'].shape,\n self._config)\n\n # prep for first player to \"move\"\n first_act = np.min(self._arrays['player']['cast_timer'], axis=1)\n self._arrays['global']['duration'] += first_act\n self._arrays['player']['cast_timer'] -= first_act[:, None]\n next_hit = np.argmin(self._arrays['player']['cast_timer'], axis=1)\n self._arrays['player']['cast_number'][np.arange(self._arrays['player']['cast_timer'].shape[0]), next_hit] += 1\n\n if C._LOG_SIM >= 0:\n constants.log_message()\n still_going = np.arange(self._arrays['global']['running_time'].size)\n while True:\n self._damage = np.zeros(self._arrays['global']['running_time'].size)\n self._crit = np.zeros(self._arrays['global']['running_time'].size)\n self._ignite = np.zeros(self._arrays['global']['running_time'].size)\n decisions, next_hit = decider.get_decisions(self._arrays, still_going)\n self._apply_decisions(still_going, decisions, next_hit)\n while self._advance():\n still_going = np.where(self._arrays['global']['running_time'] < self._arrays['global']['duration'])[0]\n if self._dur_dist is None:\n self._arrays['global']['total_damage'][still_going] += self._damage[still_going]\n self._arrays['global']['crit'][still_going] += self._crit[still_going]\n self._arrays['global']['ignite'][still_going] += self._ignite[still_going]\n else:\n for sidx, stime in enumerate(self._arrays['global']['running_time']):\n self._arrays['global']['total_damage'][sidx].append((stime, self._damage[sidx], self._ignite[sidx]))\n if not still_going.size:\n break\n if self._dur_dist is None:\n #self._arrays['global']['total_damage'] -= (1 - C._RESISTANCE_MODIFIER)*self._arrays['global']['ignite']\n #self._arrays['global']['total_damage'] *= C._RESISTANCE_MODIFIER\n #self._arrays['global']['player'] *= C._RESISTANCE_MODIFIER\n #self._arrays['global']['crit'] *= C._RESISTANCE_MODIFIER\n #self._arrays['global']['ignite'] *= C._RESISTANCE_MODIFIER*C._RESISTANCE_MODIFIER\n \n if C._LOG_SIM >= 0 and self._dur_dist is None:\n print('total log damage = {:7.0f}'.format(self._arrays['global']['total_damage'][C._LOG_SIM]/self._arrays['player']['cast_number'].shape[1]/self._arrays['global']['duration'][C._LOG_SIM]))\n print('average damage = {:9.1f}'.format(self._arrays['global']['total_damage'].mean()))\n print('std damage = {:7.1f}'.format(self._arrays['global']['total_damage'].std()))\n print('crit damage = {:9.1f}'.format(self._arrays['global']['crit'].mean()))\n print('ignite damage = {:9.1f}'.format(self._arrays['global']['ignite'].mean()))\n \n dp_mage = self._arrays['global']['player']/self._arrays['global']['duration']\n solo_mage = dp_mage + self._arrays['global']['ignite']*len(self._config[\"target\"])/self._arrays['player']['cast_number'].shape[1]/self._arrays['global']['duration']\n frac_up1 = (solo_mage > 3700.0).sum()/dp_mage.size\n frac_up2 = (solo_mage > 4000.0).sum()/dp_mage.size\n frac_up3 = (solo_mage > 4400.0).sum()/dp_mage.size\n frac_up4 = (solo_mage > 4800.0).sum()/dp_mage.size\n frac_up5 = (solo_mage > 5240.0).sum()/dp_mage.size\n if ret_dist:\n return solo_mage\n npm = (977*dp_mage.size)//1000\n #npm = (900*dp_mage.size)//1000 # kill this!\n npm2 = (23*dp_mage.size)//1000\n smage = np.sort(solo_mage)\n \n return solo_mage.mean(), solo_mage.std(), smage[npm], frac_up1, frac_up2, frac_up3, frac_up4, frac_up5, smage[npm2]\n else:\n sim_size = len(self._arrays['global']['total_damage'])\n dur_dist = self._dur_dist\n cutoff = self._var*np.random.randn(len(dur_dist), sim_size)\n for didx, dur in enumerate(dur_dist):\n cutoff[didx, :] += dur\n cutoff[cutoff < 0.0] = 0.0\n \n total_dam = np.zeros((len(dur_dist), sim_size))\n ignite_dam = np.zeros((len(dur_dist), sim_size))\n for sidx in range(sim_size):\n cuts = cutoff[:, sidx]\n ptime = 0.0\n total_damage = 0.0\n total_ignite = 0.0\n for ctime, damage, ignite in self._arrays['global']['total_damage'][sidx]:\n if ptime > 0.0:\n for didx, cut in enumerate(cuts):\n if ctime > cut and ptime <= cut:\n total_dam[didx, sidx] = total_damage/cut\n ignite_dam[didx, sidx] = total_ignite/cut\n total_damage += damage\n total_ignite += ignite\n ptime = ctime\n total_dam[ctime <= cuts, sidx] = total_damage/ctime\n ignite_dam[ctime <= cuts, sidx] = total_ignite/ctime\n total_dam = total_dam.mean(axis=1)\n ignite_dam = ignite_dam.mean(axis=1)\n \n #total_dam -= (1 - C._RESISTANCE_MODIFIER)*ignite_dam\n #total_dam *= C._RESISTANCE_MODIFIER\n\n return total_dam\n\ndef get_damage(params, ret_dist=False, dur_dist=None):\n array_generator = constants.ArrayGenerator(params)\n encounter = Encounter(array_generator,\n params['rotation'],\n params['timing']['response'],\n params['timing']['duration']['var'],\n params['configuration'],\n params[\"buffs\"][\"world\"],\n params[\"buffs\"][\"boss\"],\n dur_dist)\n return encounter.run(ret_dist)\n\n","sub_path":"src/mechanics.py","file_name":"mechanics.py","file_ext":"py","file_size_in_byte":31870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"606213594","text":"\"\"\"runs the command line app for the mailroom as part of lesson 3\"\"\"\n\n# setup initial donor list\ndonors = {'Bill Gates': [100000.00, 5.00, 3000000.00],\n 'Paul Allen': [10.00, 1000000.00],\n 'Warren Buffet': [300000000.00],\n }\n\n\ndef thank_you():\n \"\"\"If the user (you) selects ‘Send a Thank You’, prompt for a Full Name.\n If the user types ‘list’, show them a list of the donor names and\n re-prompt. If the user types a name not in the list, add that name\n to the data structure and use it. If the user types a name in the\n list, use it. Once a name has been selected, prompt for a donation\n amount. Turn the amount into a number – it is OK at this point for\n the program to crash if someone types a bogus amount.\n Once an amount has been given, add that amount to the donation history\n of the selected user. Finally, use string formatting to compose an\n email thanking the donor for their generous donation. Print the email\n to the terminal and return to the original prompt.\n It is fine (for now) to forget new donors once the script quits running.\"\"\"\n exit_ind = False\n while not exit_ind:\n thank_you_input = input('Please select name: ')\n\n if thank_you_input.lower().strip() == 'list':\n display_donors()\n elif thank_you_input.lower().strip() == 'quit':\n exit_ind = True\n else:\n if thank_you_input not in donors:\n create_donor(thank_you_input)\n donation_amount = float(input(\"Select donation amount: \"))\n create_donation(fullname=thank_you_input, amount=donation_amount)\n send_thank_you(fullname=thank_you_input)\n exit_ind = True\n\n\ndef display_donors():\n \"\"\"diplays donors\"\"\"\n [print(donor) for donor in donors.keys()]\n\n\ndef create_donation(fullname, amount):\n \"\"\"adds a donation to the donors dict from user input\"\"\"\n donors[fullname].append(amount)\n\n\ndef create_donor(fullname):\n \"\"\"adds new donor to donors\"\"\"\n donors[fullname] = []\n\n\ndef send_thank_you(fullname):\n \"\"\"prints thank you message to terminal for donation\"\"\"\n print(f'Thank you {fullname} for your generous donation!')\n\n\ndef report():\n \"\"\"handles process for main screens report selection\n\n If the user (you) selected “Create a Report”, print a list of your donors,\n sorted by total historical donation amount.\n Include Donor Name, total donated, number of donations and average\n donation amount as values in each row. You do not need to print out all\n their donations, just the summary info.\n Using string formatting, format the output rows as nicely as possible.\n The end result should be tabular (values in each column should align\n with those above and below)\n After printing this report, return to the original prompt.\n At any point, the user should be able to quit their current task and\n return to the original prompt.\n From the original prompt, the user should be able to quit the script\n cleanly.\n Your report should look something like this:\n Donor Name | Total Given | Num Gifts | Average Gift\n ------------------------------------------------------------------\n William Gates, III $ 653784.49 2 $ 326892.24\n Mark Zuckerberg $ 16396.10 3 $ 5465.37\n Jeff Bezos $ 877.33 1 $ 877.33\n Paul Allen $ 708.42 3 $ 236.14\n \"\"\"\n print(f\"{'Donor Name':<26}|{'Total Given':^15}|{'Num Gifts':^11}|{'Average Gift':^15}\")\n print('-'*70)\n donor_stats = []\n for donor in donors.keys():\n donor_stats.append(summarize_donor(donor))\n donor_stats.sort(key=lambda tup: tup[1], reverse=True)\n \n for summary in donor_stats:\n print(f\"{summary[0]:<26} ${summary[1]:>13.2f} {summary[2]:>10} ${summary[3]:>14.2f}\")\n\n\ndef summarize_donor(donor_name):\n \"\"\"generates donor summary\n args:\n donor_name: donor name matching key from donors\n returns\n tuple with fields (donor name, total given, num gifts, average gift)\"\"\"\n total_given = sum(donors[donor_name])\n num_gifts = len(donors[donor_name])\n average_gift = total_given/num_gifts\n \n return(donor_name, total_given, num_gifts, average_gift)\n\n\nif __name__ == '__main__':\n # initial placeholder for input\n user_input = None\n\n # run until user specifies to get out\n while user_input != 'quit':\n user_input = input('Options:\\n'\n '\\tSend a Thank You\\n'\n '\\tCreate a Report\\n'\n '\\tquit\\n'\n 'Please input option: ')\n\n # cleans up user input to make more robust.\n user_input = user_input.lower().strip()\n\n if user_input == 'send a thank you':\n thank_you()\n elif user_input == 'create a report':\n report()\n","sub_path":"students/paul_jurek/lesson03/mailroom.py","file_name":"mailroom.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"66284957","text":"# Karimn Daniel Hernández Castorena\r\n# Programa que permite al usuario realizar diferentes acciones.\r\n\r\ndef combinarLetras(c):\r\n acu = \"\"\r\n for a in range(len(c)):\r\n if a % 2 == 0:\r\n acu += c[a].upper()\r\n else:\r\n acu += c[a].lower()\r\n print()\r\n return acu\r\n\r\n\r\ndef contieneLasVocales(cadena):\r\n xx = cadena.lower()\r\n a = \"a\" or \"á\"\r\n e = \"e\" or \"é\"\r\n i = \"i\" or \"í\"\r\n o = \"o\" or \"ó\"\r\n u = \"u\" or \"ú\"\r\n if a in xx and e in xx and i in xx and o in xx and u in xx:\r\n print()\r\n return True\r\n else:\r\n print()\r\n return False\r\n\r\n\r\ndef formarNombreUsuario(nombre, apellido, matricula):\r\n n = nombre.lower()\r\n a = apellido.lower()\r\n m = str(matricula)\r\n y = n[0:3] + a[0:3] + m[4:7]\r\n print()\r\n return y\r\n\r\n\r\ndef esCorrecto(nom):\r\n palabras = nom.split()\r\n contadorcorrecto = 0\r\n contadorincorrecto = 0\r\n for l in palabras:\r\n minusculas = l[1:]\r\n mayuscula = l[0:1]\r\n if minusculas.islower() and mayuscula.isupper():\r\n contadorcorrecto += 1\r\n else:\r\n contadorincorrecto += 1\r\n if contadorcorrecto == 3:\r\n print()\r\n return True\r\n elif contadorincorrecto >= 1:\r\n print()\r\n return False\r\n\r\n\r\ndef traducirTelefono(tel):\r\n x = \"\"\r\n tel = tel.lower()\r\n for letras in tel:\r\n if \"a\" == letras or \"b\" == letras or \"c\" == letras:\r\n x = x + \"2\"\r\n elif \"d\" == letras or \"e\" == letras or \"f\" == letras:\r\n x = x + \"3\"\r\n elif \"g\" == letras or \"h\" == letras or \"i\" == letras:\r\n x = x + \"4\"\r\n elif \"j\" == letras or \"k\" == letras or \"l\" == letras:\r\n x = x + \"5\"\r\n elif \"m\" == letras or \"n\" == letras or \"o\" == letras:\r\n x = x + \"6\"\r\n elif \"p\" == letras or \"q\" == letras or \"r\" == letras or \"s\" == letras:\r\n x = x + \"7\"\r\n elif \"t\" == letras or \"u\" == letras or \"v\" == letras:\r\n x = x + \"8\"\r\n elif \"w\" == letras or \"x\" == letras or \"y\" == letras or \"z\" == letras:\r\n x = x + \"9\"\r\n elif \"-\" == letras:\r\n x = x + \"-\"\r\n juas = \"01800\" + x\r\n return juas\r\n\r\n\r\ndef main():\r\n print()\r\n c = (input(\"Teclea una palabra para combinarla entre mayúsculas y minúsculas: \"))\r\n print(combinarLetras(c))\r\n print()\r\n print(\"-------------------------------------------\")\r\n print()\r\n v = (input(\"Teclea una palabra y te diré si contiene todas las vocales: \"))\r\n print(contieneLasVocales(v))\r\n print()\r\n print(\"-------------------------------------------\")\r\n print()\r\n nombre = str(input(\"Teclea tu nombre: \"))\r\n apellido = str(input(\"Teclea tu apellido: \"))\r\n matrícula = int(input(\"Teclea tu matrícula: A0\"))\r\n formarNombreUsuario(nombre, apellido, matrícula)\r\n print()\r\n print(\"-------------------------------------------\")\r\n print()\r\n print(\"Vamos a analizar si la regla de las mayúsculas es correcta.\")\r\n print()\r\n print(\"Introduce tu primer nombre y tus dos apellidos.\")\r\n print(\"(Para que la regla se cumpla la primera letra de cada palabra debe ser mayúscula)\")\r\n print()\r\n nom = str(input(\"¿Cómo te llamas? \"))\r\n print(esCorrecto(nom))\r\n print()\r\n print(\"-------------------------------------------\")\r\n print()\r\n print(\"Traduciremos tu teléfono.\")\r\n print(\"Introduce dos palabras, una de tres letras y otra de cuatro.\")\r\n tel = str(input(\"Teclea las palabras (01-800-XXX-XXXX): 01-800-\"))\r\n print()\r\n print(traducirTelefono(tel))\r\n print()\r\n print(\"-------------------------------------------\")\r\n print()\r\n\r\n\r\nmain()\r\n","sub_path":"Misión_08.py","file_name":"Misión_08.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"461786688","text":"#!/usr/bin/env/python\n# \n# More of a reference of using jinaj2 without actual template files.\n# This is great for a simple output transformation to standard out.\n#\n# Of course you will need to \"sudo pip install jinja2\" first!\n#\n# I like to refer to the following to remember how to use jinja2 :)\n# http://jinja.pocoo.org/docs/templates/\n#https://gist.github.com/wrunk/1317933\n#http://matthiaseisen.com/pp/patterns/p0198/\n#\n\nfrom jinja2 import Environment, FileSystemLoader\nimport os\n# Import modules for CGI handling\nimport cgi, cgitb\nimport sozluk\nimport metaboxengine_\n\n\n\n# Capture our current directory\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\n\ndef print_html_doc():\n # Create the jinja2 environment.\n # Notice the use of trim_blocks, which greatly helps control whitespace.\n j2_env = Environment(loader=FileSystemLoader(THIS_DIR), trim_blocks=True)\n #print (j2_env.get_template('jinja_test_template.html').render( title='Hellow Gist from GutHub' ))\n title = 'Hellow Gist from GutHub'\n template=j2_env.get_template('jinja_test_template.html')\n templateVars = {\"title\": \"Test Example\",\n \"description\": \"A simple inquiry of function.\"}\n\n print (template.render( templateVars ))\n\n\t\n\t\n\n# Create instance of FieldStorage \nform = cgi.FieldStorage() \n\nif form.getvalue('text_'):\n t_flag = form.filename\nelse:\n t_flag = \"OFF\"\n\n# Get data from fields\nif form.getvalue('maths'):\n math_flag = \"ON\"\nelse:\n math_flag = \"OFF\"\n\nif form.getvalue('physics'):\n physics_flag = \"ON\"\nelse:\n physics_flag = \"OFF\"\n\nprint (\"Content-type:text/html\")\nprint()\n\n#print(a)\nprint (\"

text is is : %s

\" % t_flag)\nprint (\"

CheckBox Maths is : %s

\" % math_flag)\nprint (\"

CheckBox Physics is : %s

\" % physics_flag)\nprint(sozluk.ara(\"kitap\"))\nprint(metaboxengine_.metaboxEngine.kabiliyetleri)\nif __name__ == '__main__':\n print_html_doc()\n\t\n\t","sub_path":"pyhton_ogreniyorum/cgi-bin/jinja.py","file_name":"jinja.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"79164779","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0002_auto_20150509_2021'),\n ]\n\n operations = [\n migrations.AlterModelTable(\n name='advertising',\n table='Advertising',\n ),\n ]\n","sub_path":"bdz/website/migrations/0003_auto_20150509_2032.py","file_name":"0003_auto_20150509_2032.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"616116794","text":"#!/usr/bin/python\nimport cv2\nimport sys\nfrom PyQt5.QtWidgets import QWidget, QLabel, QApplication, QPushButton, QListWidget, QProgressBar, QStatusBar, QTabWidget, QTableWidget, QTableWidgetItem, QVBoxLayout, QMainWindow, QAbstractItemView, QLineEdit, QShortcut, QMessageBox\nfrom PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot, QObject\nfrom PyQt5.QtGui import QImage, QPixmap, QKeySequence, QDoubleValidator\nimport rospy, rostopic\nfrom sensor_msgs.msg import CompressedImage, Image, LaserScan\nimport numpy as np\nfrom numpy import inf\nfrom cv_bridge import CvBridge\nimport logging\nfrom geometry_msgs.msg import Vector3Stamped, TwistStamped, Twist\nfrom rosgraph_msgs.msg import Log\nfrom sensor_msgs.msg import Imu, Temperature\nfrom mav_msgs.msg import Status, RollPitchYawrateThrust\nfrom math import *\nimport subprocess, shlex\nimport signal\nimport os\nimport message_filters\nimport rosbag\nfrom std_msgs.msg import Int32, String, Empty\nimport unicodedata\nimport time\nfrom nav_msgs.msg import Odometry\nfrom bebop_msgs.msg import CommonCommonStateBatteryStateChanged\nimport threading\n##################################################################\n\nclass App(QWidget):\n changeQImage = pyqtSignal(QImage) #Creating a signal\n changeLog = pyqtSignal(Log) #Creating a signal\n changeBatteryBar = pyqtSignal(CommonCommonStateBatteryStateChanged) #Creating a siganl\n\n checkHz = pyqtSignal(list)\n def __init__(self):\n super(QWidget, self).__init__()\n self.velocityPub = rospy.Publisher(\"/bebop/cmd_vel\", Twist, queue_size=10)\n self.takeoffPub = rospy.Publisher('/bebop/takeoff', Empty, queue_size=10)\n self.landPub = rospy.Publisher('/bebop/land', Empty, queue_size=10)\n self.goFwrd_vel = Twist()\n self.goBwrd_vel = Twist()\n self.recording = False #creating variable that keeps record if we're recording\n self.logCount = 0 #count for adding right amount of rows to Log\n self.count5 = 0 #count so we only print one ready/error takeoff msg per takeoff\n #subscriber to battery, Image, Log and LaserScan\n self.batterySub = rospy.Subscriber(\"/bebop/states/common/CommonState/BatteryStateChanged\", CommonCommonStateBatteryStateChanged, self.batteryCallback, queue_size=10)\n self.imgSub = rospy.Subscriber(\"/bebop/image_raw\", Image, self.imageCallback, queue_size = 10)\n self.logSub = rospy.Subscriber(\"/rosout_agg\", Log, self.logCallback, queue_size = 10)\n self.recording = False\n threading.Timer(0.1, self.hzFunk).start()\n self.initUI()\n##-------------------------------------------------------------------------------------------------------\n @pyqtSlot(QImage)\n def setImage(self, image):\n self.imageLabel.setPixmap(QPixmap.fromImage(image)) #Setting QImage in imageLabel\n\n @pyqtSlot(Log)\n def setLog(self, log):\n #Setting table\n self.logger.setItem(self.logCount,0, QTableWidgetItem(log.msg)) #printing msg to table\n self.logger.setItem(self.logCount,1, QTableWidgetItem(str(log.level))) #printing error lever table\n self.logger.setItem(self.logCount,2, QTableWidgetItem(log.name)) #printing node-name to table\n self.logger.setItem(self.logCount,3, QTableWidgetItem(str(log.header.stamp))) #printing timestamp to table\n self.logger.setRowCount(4+self.logCount) #adding new row\n self.logCount+=1\n\n @pyqtSlot(CommonCommonStateBatteryStateChanged)\n def setBatteyBar(self, status):\n self.batteryBar.setValue(status.percent) # setting battery percent\n\n @pyqtSlot(list)\n def getHz(self, list):\n self.hzLogger.setItem(0,0, QTableWidgetItem(str(list[0])))\n self.hzLogger.setItem(1,0, QTableWidgetItem(str(list[1])))\n self.hzLogger.setItem(2,0, QTableWidgetItem(str(list[2])))\n try:\n if float(self.hzLogger.item(0,0).text()) < float(self.hzLogger.item(0,1).text()):\n self.hzLogger.item(0,0).setBackground(QColor(250,0,0))\n except:\n pass\n try:\n if float(self.hzLogger.item(1,0).text()) < float(self.hzLogger.item(1,1).text()):\n self.hzLogger.item(1,0).setBackground(QColor(250,0,0))\n except:\n pass\n try:\n if float(self.hzLogger.item(2,0).text()) < float(self.hzLogger.item(2,1).text()):\n self.hzLogger.item(2,0).setBackground(QColor(250,0,0))\n except:\n pass\n #Callback for QImage\n def imageCallback(self, data):\n bridge = CvBridge()\n rgbImage = bridge.imgmsg_to_cv2(data, desired_encoding=\"passthrough\") #converting rosimage to cvimage\n h, w, ch = rgbImage.shape\n bytesPerLine = ch * w\n convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888) #converting cvimage to QImage\n p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)\n self.changeQImage.emit(p) #emitting QImage through changeQImage-signal to setImage\n #Callback for Log\n def logCallback(self, log):\n self.changeLog.emit(log) #emitting rosout_agg-info through changeLog-signal to setLog\n #Callback for battery\n def batteryCallback(self, status):\n self.changeBatteryBar.emit(status) #emitting battarystatus through changeBatteryBar-signal to setBatteyBar\n##------------------------------------------------------------------------------------------------------\n def takeOff(self):\n ret = self.checkRecStatus()\n if ret == 1048576 or ret == None: #number for ignore-action\n self.takeoffPub.publish()\n else:\n pass\n\n def land(self):\n self.landPub.publish()\n\n def goFwrd(self):\n self.goFwrd_vel.linear.x = +1\n self.velocityPub.publish(self.goFwrd_vel)\n\n def goBwrd(self):\n self.goBwrd_vel.linear.x = -1\n self.velocityPub.publish(self.goBwrd_vel)\n\n def turnRight(self):\n self.turnRight = Twist()\n self.turnRight.angular.z = -0.5\n self.velocityPub.publish(self.turnRight)\n\n def turnLeft(self):\n self.turnLeft = Twist()\n self.turnLeft.angular.z = +0.5\n self.velocityPub.publish(self.turnLeft)\n\n def hover(self):\n self.hover = Twist()\n self.hover.linear.x = 0\n self.hover.linear.y = 0\n self.hover.linear.z = 0\n self.hover.angular.z = 0\n self.velocityPub.publish(self.hover)\n\n def hzFunk(self):\n a = rostopic.ROSTopicHz(-1)\n b = rostopic.ROSTopicHz(-1)\n c = rostopic.ROSTopicHz(-1)\n rospy.Subscriber(\"/bebop/cmd_vel\", Twist, a.callback_hz)\n rospy.Subscriber(\"/bebop/odom\", Odometry, b.callback_hz)\n rospy.Subscriber(\"/bebop/image_raw\", Image, c.callback_hz)\n time.sleep(1.0)\n list = []\n try:\n list.append(a.get_hz()[0])\n except:\n list.append(\"No message\")\n try:\n list.append(b.get_hz()[0])\n except:\n list.append(\"No message\")\n try:\n list.append(c.get_hz()[0])\n except:\n list.append(\"No message\")\n self.checkHz.emit(list)\n\n def checkRecStatus(self):\n if self.recording == False:\n ret = self.recMessageBox(\"You are not recording!\")\n return ret\n else:\n pass\n##------------------------------------------------------------------------------------------------------------\n #FUNCTIONS FOR RECORDING\n def rec_funk(self):\n #if we're not already recording--> changing look of rec btn, setting rec to true and\n #setting selected topics to list of all selected topics in topicLogger.\n #Else --> end recording\n if self.recording == False:\n self.rec_btn.setStyleSheet(\"border-radius :40; background-color: green; border : 2px solid darkgreen;font-size: 30px;font-family: Arial\")\n self.recording = True\n selectedTopics = self.topicLogger.selectedItems()\n # if at least one topic is selected--> record trem. Else --> record all topics.\n if len(selectedTopics)>0:\n command = \"\"\n for i in selectedTopics:\n command = command + \" \" + i.text()\n command = shlex.split(\"rosbag record \" + command)\n self.rosbag_record = subprocess.Popen(command)\n else:\n command = shlex.split(\"rosbag record -a\")\n self.rosbag_record = subprocess.Popen(command)\n else:\n list_cmd = subprocess.Popen(\"rosnode list\", shell=True, stdout=subprocess.PIPE)\n list_output = list_cmd.stdout.read()\n retcode = list_cmd.wait()\n assert retcode == 0, \"List command returned %d\" % retcode\n for string in list_output.split(\"\\n\"):\n if (string.startswith(\"/record\")):\n os.system(\"rosnode kill \" + string)\n self.rec_btn.setStyleSheet(\"border-radius :40; background-color: red; border : 2px solid darkred;font-size: 30px;font-family: Arial\")\n self.topicLogger.clearSelection()\n self.recording = False\n##-----------------------------------------------------------------------------------------------------------\n def initUI(self):\n #create mainwindow\n self.setWindowTitle(\"ROS usb_cam\")\n self.setGeometry(250, 100, 1100, 687)\n #creating edge label2\n self.edgeLabel = QLabel(self)\n self.edgeLabel.setStyleSheet(\"border: 1px solid grey; background-color: #CCCCCC\")\n self.edgeLabel.move(650, 65)\n self.edgeLabel.resize(445, 358)\n #create a Image Label\n self.imageLabel = QLabel(self)\n self.imageLabel.move(4, 4)\n self.imageLabel.resize(640, 480)\n #create QTableWidget for logger\n self.logger =QTableWidget(self)\n self.logger.resize(780,180)\n self.logger.setRowCount(6)\n self.logger.setColumnCount(4)\n self.logger.setColumnWidth(0, 400)\n self.logger.setColumnWidth(1, 80)\n self.logger.setColumnWidth(2, 158)\n self.logger.setHorizontalHeaderLabels((\"Message;Severity;Node;Timestamp\").split(\";\"))\n #create QTableWidget for topics\n self.topicLogger =QTableWidget(self)\n self.topicLogger.resize(780,180)\n self.topicLogger.setRowCount(45)\n self.topicLogger.setColumnCount(2)\n self.topicLogger.setColumnWidth(0, 400)\n self.topicLogger.setColumnWidth(1, 380)\n self.topicLogger.setHorizontalHeaderLabels((\"Topic;Msgs\").split(\";\"))\n self.topicLogger.setSelectionMode(QAbstractItemView.MultiSelection)\n topics = rospy.get_published_topics(namespace='/')\n count2 = 0\n for i in topics:\n self.topicLogger.setItem(count2,0, QTableWidgetItem(i[0]))\n self.topicLogger.setItem(count2,1, QTableWidgetItem(i[1]))\n count2 +=1\n #creating hz-sensor logger\n self.hzLogger =QTableWidget(self)\n self.hzLogger.resize(780,180)\n self.hzLogger.setRowCount(3)\n self.hzLogger.setColumnCount(2)\n self.hzLogger.setColumnWidth(0, 500)\n self.hzLogger.setVerticalHeaderLabels((\"Velocity commands;Odometry;Image\").split(\";\"))\n self.hzLogger.setHorizontalHeaderLabels((\"Frequencies;Preferd min\").split(\";\"))\n #creating tabWidget to insert logger osv\n self.tabWidget = QTabWidget(self)\n self.tabWidget.move(310, 440)\n self.tabWidget.resize(780,240)\n self.tabWidget.addTab(self.logger, \"rosout_agg info\")\n self.tabWidget.addTab(self.topicLogger, \"Topics\")\n self.tabWidget.addTab(self.hzLogger, \"Topic frequencies\")\n #create push button - deactivate\n self.takeoff_btn = QPushButton('Takeoff', self)\n self.takeoff_btn.move(700,100)\n self.takeoff_btn.clicked.connect(self.takeOff)\n #create push button - activate\n self.land_btn = QPushButton('Land', self)\n self.land_btn.move(800,100)\n self.land_btn.clicked.connect(self.land)\n #create push button - go forward\n self.goFwrd_btn = QPushButton('Go forward', self)\n self.goFwrd_btn.move(830,200)\n self.goFwrd_btn.clicked.connect(self.goFwrd)\n #create push button - go backward\n self.goBwrd_btn = QPushButton('Go backward', self)\n self.goBwrd_btn.move(830,300)\n self.goBwrd_btn.clicked.connect(self.goBwrd)\n #create push button - turn right\n self.turnRight_btn = QPushButton('Turn right', self)\n self.turnRight_btn.move(910,250)\n self.turnRight_btn.clicked.connect(self.turnRight)\n #create push button - turn left\n self.turnLeft_btn = QPushButton('Turn left', self)\n self.turnLeft_btn.move(750,250)\n self.turnLeft_btn.clicked.connect(self.turnLeft)\n #create push button - hover\n self.hover_btn = QPushButton('HOVER', self)\n self.hover_btn.move(700,140)\n self.hover_btn.clicked.connect(self.hover)\n #create push button - recording\n self.rec_btn = QPushButton('REC', self)\n self.rec_btn.setGeometry(1000, 85, 80, 80)\n self.rec_btn.setStyleSheet(\"border-radius :40; background-color: red; border : 2px solid darkred;font-size: 30px;font-family: Arial\")\n self.rec_btn.clicked.connect(self.rec_funk)\n #creating LTU logo label\n self.label_ltu_image = QLabel(self)\n pixmap_ltu_image = QPixmap(\"./image/LTU.png\")\n pixmap_ltu_image = pixmap_ltu_image.scaled(200, 200, Qt.KeepAspectRatio, Qt.FastTransformation)\n self.label_ltu_image.setPixmap(pixmap_ltu_image)\n self.label_ltu_image.move(4, 450)\n #creating robotics team logo label\n self.robteamImage = QLabel(self)\n robteamPixmap = QPixmap(\"./image/robteam.png\")\n robteamPixmap = robteamPixmap.scaled(300, 300, Qt.KeepAspectRatio, Qt.FastTransformation)\n self.robteamImage.setPixmap(robteamPixmap)\n self.robteamImage.move(4, 600)\n # #Creating battery label and bar\n self.batteryLabel = QLabel(\"Battery\", self)\n self.batteryLabel.move(5, 0)\n self.batteryLabel.resize(50, 20)\n self.batteryBar = QProgressBar(self)\n self.batteryBar.move(5, 25)\n self.batteryBar.resize(150, 30)\n self.batteryBar.setMaximum(100)\n self.show() #showing GUI\n #creating different keybord shortcuts\n self.shortcut_fwrd = QShortcut(QKeySequence(\"w\"), self)\n self.shortcut_fwrd.activated.connect(self.goFwrd)\n self.shortcut_bwrd = QShortcut(QKeySequence(\"s\"), self)\n self.shortcut_bwrd.activated.connect(self.goBwrd)\n self.shortcut_turnRight = QShortcut(QKeySequence(\"d\"), self)\n self.shortcut_turnRight.activated.connect(self.turnRight)\n self.shortcut_turnLeft = QShortcut(QKeySequence(\"a\"), self)\n self.shortcut_turnLeft.activated.connect(self.turnLeft)\n self.shortcut_takeoff = QShortcut(QKeySequence(\"k\"), self)\n self.shortcut_takeoff.activated.connect(self.takeOff)\n self.shortcut_land = QShortcut(QKeySequence(\"l\"), self)\n self.shortcut_land.activated.connect(self.land)\n\n self.changeQImage.connect(self.setImage) #connecting signal to slot\n self.changeLog.connect(self.setLog) #connecting signal to slot\n self.checkHz.connect(self.getHz) #connecting signal to slot\n self.changeBatteryBar.connect(self.setBatteyBar) #connecting signal to slot\n\n #creating warning-messagebox\n def messageBox(self, text):\n self.rec_msgBox = QMessageBox()\n self.rec_msgBox.setWindowModality(False)\n self.rec_msgBox.setIcon(QMessageBox.Warning)\n self.rec_msgBox.setWindowTitle(\"Warning\")\n self.rec_msgBox.setText(text)\n self.rec_msgBox.show()\n #creating warning-message for recording\n def recMessageBox(self, text):\n self.rec_msgBox = QMessageBox()\n self.rec_msgBox.setIcon(QMessageBox.Warning)\n self.rec_msgBox.setWindowTitle(\"Warning\")\n self.rec_msgBox.setText(text)\n self.rec_msgBox.setStandardButtons(QMessageBox.Ignore | QMessageBox.Abort)\n ret = self.rec_msgBox.exec_()\n if ret == QMessageBox.Abort:\n return ret\n else:\n return ret\n##---------------------------------------------------------------------------------------------\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n rospy.init_node('image_read', anonymous=True)\n ex = App()\n sys.exit(app.exec_())\n","sub_path":"bebop_gui.py","file_name":"bebop_gui.py","file_ext":"py","file_size_in_byte":16520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5642278","text":"import sqlite3\nfrom sqlite3 import Error\ndef create_conn():\n conn = None\n try:\n conn = sqlite3.connect('database.db')\n return conn\n except Error as e:\n print(e)\n return conn\n \n\ndef insert_accounts(values):\n conn = create_conn()\n cur = conn.cursor()\n sql = \"insert into accounts(acc_id, cust_id,acc_type,balance,message,last_updated,status) values({});\".format(values);\n \n try:\n cur.execute(sql)\n except:\n return False\n conn.commit()\n conn.close()\n return True\n \n\ndef read_accounts(condition=\"1=1\"):\n conn = create_conn()\n cur = conn.cursor()\n sql = \"select * from accounts where {};\".format(condition)\n print(sql)\n try:\n cur.execute(sql)\n except:\n print(\"Something Went wrong\")\n rows = cur.fetchall()\n \n conn.commit()\n conn.close()\n return rows\n \n\ndef update_accounts(values, condition=\"1=1\"):\n conn = create_conn()\n cur = conn.cursor()\n sql = \"update accounts set {} where {};\".format(values, condition)\n \n try:\n cur.execute(sql)\n except:\n print(\"Something Went wrong\")\n conn.commit()\n conn.close()\n \n\ndef delete_accounts(condition=\"1=1\"):\n conn = create_conn()\n cur = conn.cursor()\n sql = \"delete from accounts where {};\".format(condition)\n \n try:\n cur.execute(sql)\n except:\n print(\"Something Went wrong\")\n conn.commit()\n conn.close()\n\ndef getLastRow():\n conn = create_conn()\n cur = conn.cursor()\n sql = \"SELECT * FROM accounts ORDER BY acc_id DESC LIMIT 1;\" \n try:\n cur.execute(sql)\n except:\n print(\"Something Went wrong\")\n rows = cur.fetchall() \n conn.commit()\n conn.close()\n return rows","sub_path":"application/models/accounts.py","file_name":"accounts.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"147939546","text":"num = int(input('How many numbers: '))\nlst = []\nfor n in range(num):\n numbers = int(input('Enter number '))\n lst.append(numbers)\n\nprint(\"Maximum element in the list is :\", max(lst))\n\n# using sort\nlist1 = [10, 20, 4, 45, 99]\n\nlist1.sort()\n\nprint(\"Largest element is:\", list1[-1])\n\n# using max function\nlist1 = [10, 20, 4, 45, 99]\n\nprint(\"Largest element is:\", max(list1))\n","sub_path":"largest number in list.py","file_name":"largest number in list.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"153622877","text":"import nltk\nfrom nltk import word_tokenize\n\n# 1. Find 3 most common sequences of each long(3,4,5)\ncon_seq_3 = [re.findall(r'([^aeiou0-9\\W]{3})', word.lower()) for word in word_set]\ntop3_3 = nltk.FreqDist(sum(con_seq_3, [])).most_common(3)\n\ncon_seq_4 = [re.findall(r'([^aeiou0-9\\W]{4})', word.lower()) for word in word_set]\ntop3_4 = nltk.FreqDist(sum(con_seq_4, [])).most_common(3)\n\ncon_seq_5 = [re.findall(r'([^aeiou0-9\\W]{5})', word.lower()) for word in word_set]\ntop3_5 = nltk.FreqDist(sum(con_seq_5, [])).most_common(3)\n\nprint(top3_3)\nprint(top3_4)\nprint(top3_5)\n\n# 2. Fill in the blank\nprint('*' * 100)\nprint([int(n) for n in re.findall(r'[0-9]+', '2009-12-31')])\n\n# 3. Create long sentences and apply stemmer\nprint('*' * 100,)\n\nmy_string = '''\nI am a bus driver and my friend is writer. My friend said that creation is harder than friendship.\nAlso happiness is better than population.'''\n\nmy_string = my_string.strip().replace('\\n', ' ')\ntokens = word_tokenize(my_string)\n\nporter = nltk.PorterStemmer() # 클래스 생성\nlancaster = nltk.LancasterStemmer() # 클래스 생성\n\nporter_stem = [porter.stem(w) for w in tokens]\nlancaster_stem = [lancaster.stem(w) for w in tokens]\n\nresult_3grams = [(a,b,c) \n for a,b,c in zip(tokens, porter_stem, lancaster_stem)\n if (a != b) or (a != c)]\n\nprint('raw', 'porter_stem', 'lancaster_stem')\nfor tup in result_3grams:\n print(tup)\n\nprint('')\nprint('Stemmers sometime convert to lowercase. I think stemmer does not do their roles properly.... ')","sub_path":"NLP_HW5_Justin.py","file_name":"NLP_HW5_Justin.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"203833454","text":"#!/usr/bin/env python\n\nimport glob\nimport sys\nimport os\nimport platform\nimport subprocess\n\n\ndef install_package(package):\n\n pip_cmd = [sys.executable, \"-m\", \"pip\", \"install\", \"-U\", \"--disable-pip-version-check\", package]\n cmd = \"yes w | \" + \" \".join(pip_cmd)\n subprocess.call(cmd, shell=True)\n\n\ndef main():\n print(\"Installing mcli locally\")\n\n mlcomp_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))\n print(\"MCli dir: {}\".format(mlcomp_dir))\n\n dist_dir = os.path.join(mlcomp_dir, \"dist\")\n\n files = os.listdir(dist_dir)\n print(files)\n python_version = platform.python_version_tuple()\n potential_wheels = glob.glob(os.path.join(dist_dir, \"*-py{}*.whl\".format(python_version[0])))\n if not potential_wheels:\n raise Exception(\"Wheel file not exist in: {}\".format(dist_dir))\n\n if len(potential_wheels) != 1:\n raise Exception(\"Unexpected number of wheels: {}\".format(potential_wheels))\n\n install_package(potential_wheels[0])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mcenter_cli/bin/install_locally.py","file_name":"install_locally.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"644391045","text":"#!/usr/bin/env python\n\n'''\nCopyright 2015 William Baskin\n\n/*****************************************\n LICENSE SUMMARY\n\n This package is licensed under the \n MIT License. Please see the LICENSE.md\n file in the root folder for the \n complete license.\n\n *****************************************/\n '''\n\nimport rospy\nimport math\n\nfrom std_msgs.msg import Float32, Bool\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\n\nclass Balancer(object):\n def __init__(self):\n rospy.init_node('balancer', anonymous=True)\n\n self.last_imu = None\n self.last_odom = None\n self.last_v_odom = None\n\n # PUB/SUB Setup\n self.conf_pub = rospy.Publisher('odom_confidence', Float32, queue_size=1)\n self.init_pub = rospy.Publisher('initialize_localization', Bool, queue_size=1)\n self.cmd_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)\n self.odom_sub = rospy.Subscriber('odom2odom', Odometry, self.odom_cb)\n self.imu_sub = rospy.Subscriber('imu2odom', Odometry, self.imu_cb)\n self.v_odom_sub = rospy.Subscriber('vodom2odom', Odometry, self.visual_odom_cb)\n\n # MAIN METHOD\n\n def listal(self): # listen-talk\n hz = 10\n rate = rospy.Rate(hz)\n for _ in range(0, hz*5):\n # ask other nodes to initialize for 5 seconds\n self.conf_pub.publish(-1.0)\n self.init_pub.publish(True)\n self.cmd_pub.publish(self.zero_twist())\n rate.sleep()\n\n for _ in range(0, hz*6):\n # wait 6 seconds for other nodes to initialize for 5 seconds\n self.init_pub.publish(False)\n self.conf_pub.publish(-1.0)\n self.cmd_pub.publish(self.zero_twist())\n rate.sleep()\n\n # RUN\n\n rate = rospy.Rate(10) # 10hz\n while not rospy.is_shutdown():\n if self.last_odom and self.last_imu:\n confidence = self.calculate_confidence(\n [(self.last_odom, 1.0,)], \n [(self.last_imu, 0.8,), (self.last_v_odom, 0.0,)])\n self.conf_pub.publish(confidence)\n rate.sleep()\n else:\n rospy.loginfo('balance: no odom or imu data')\n rate.sleep()\n\n\n # ERROR MONITORING\n\n def calculate_confidence(self, primary, auditor):\n # takes in two lists: primary data sources, auditor data sources\n # primary is usually wheel odometry (\"odom\"), or whatever the robot is using as its location\n # auditor is the list of inputs (in Odometry message format) that are not directly driving \n # robot, but are local or global references to compare odometry\n # ex. Imu\n # if the auditor data and the primary data diverge, then the confidence goes down\n # at low confidence, it is likely that there is slip/error in the primary data\n # error is measured at the x,y level\n # lists are lists of tuples (triples): (Odometry_msg, confidence_weight,)\n p_x = 0\n p_y = 0\n a_x = 0\n a_y = 0\n p_weight = 0\n a_weight = 0\n rospy.loginfo('primary: '+str(primary))\n for sensor in primary:\n p_x = p_x + sensor[0].pose.pose.position.y*sensor[1]\n p_y = p_y + sensor[0].pose.pose.position.y*sensor[1]\n p_weight = p_weight + sensor[1]\n\n for sensor in auditor:\n a_x = a_x + sensor[0].pose.pose.position.y*sensor[1]\n a_y = a_y + sensor[0].pose.pose.position.y*sensor[1]\n a_weight = a_weight + sensor[1]\n\n p_x = p_x/p_weight\n p_y = p_y/p_weight\n a_x = a_x/a_weight\n a_y = a_y/a_weight\n\n acceptable = .1 # 10 cm scores .8\n dist = math.sqrt(math.pow(p_x-a_x,2) + math.pow(p_y-a_y,2))\n return self.score(acceptable, dist)\n\n def score(self, acceptable, distance):\n a = 1\n # scale so that .8 = a * exp(b * acceptable)\n # ln( .8 / a ) / acceptable = b\n b = math.log( .8 / a ) / acceptable\n min(1, a*math.exp(b*distance))\n\n # CALLBACKS\n\n def imu_cb(self, data):\n self.last_imu = data\n\n def odom_cb(self, data):\n self.last_odom = data\n\n def visual_odom_cb(self, data):\n self.last_v_odom = data\n\n # HELPER FUNCTIONS\n\n def zero_twist(self):\n t = Twist()\n t.linear.x = 0\n t.linear.y = 0\n t.linear.z = 0\n t.angular.x = 0\n t.angular.y = 0\n t.angular.z = 0\n return t\n\n# EXECUTION\n\nif __name__ == '__main__':\n try:\n n = Balancer()\n # listen and talk\n n.listal()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"scripts/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"315168584","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sympy as sp\nfrom math import *\n\n# Вычисление многочлена Тейлора\ndef calc_2():\n\tu = sp.Symbol('u')\n\tf = input('f(u) = ')\n\tu0 = float(input('u0 = '))\n\tn = int(input('n = '))\n\tu1 = float(input('Введите левую границу - '))\n\tu2 = float(input('Введите правую границу - '))\n\tz=0\n\tg=0\n\tp=1\n\tdelta = 0.002 # Шаг\n\tarea_f = 0\n\tarea_g = 0\n\tDif=[]\n\twhile (z<=n):\n\t\tDif.append(sp.Derivative(f, u, z).doit().subs({u:u0}))\n\t\tz+=1\n\tz=0\n\twhile (z<=n):\n\t\tif (z==0):\n\t\t\tg = g + Dif[z]\n\t\telse:\n\t\t\tp=p*z\n\t\t\tg = g + Dif[z]/p*((u-u0)**z)\n\t\tz+=1\n\tprint(g)\n\n\tU = np.arange(u1, u2+delta, delta)\n\n\tdef calc_f(u):\n\t\tglobal f\n\t\treturn eval(f)\n\n\tdef calc_g(u):\n\t\tglobal g\n\t\treturn eval(str(g))\n\n\ty_f = np.array([calc_f(i) for i in U])\n\ty_g = np.array([calc_g(i) for i in U])\n\tsubtrac = np.array([abs(calc_f(i) - calc_g(i)) for i in U])\n\n\tD = np.array([delta for i in U])\n\n\tarea_f = round(np.dot(y_f, D.T), 3)\n\tprint('Площадь f', area_f)\n\tarea_g = round(np.dot(y_g, D.T), 3)\n\tprint('Площадь g', area_g)\n\n\t# Средняя ошибка приближение\n\terror = round(np.dot(subtrac, D.T)/(u2+delta-u1), 3)\n\n\tprint('Погрешность - ', error)\n\n\n\tplt.figure(figsize=(14, 8))\n\n\tplt.plot(U, y_f, label=f)\n\tplt.plot(U, y_g, label=r'$P(u) - Многочлен Тейлора$')\n\tplt.plot(u0, Dif[0], label='Погрешность = '+str(error))\n\n\tplt.legend(loc='best', fontsize=12)\n\tplt.grid(True)\n\tplt.show()\n\nif __name__ == \"__main__\":\n print(\"This module is not for direct call!\")","sub_path":"Project/project_calc_2.py","file_name":"project_calc_2.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"90333734","text":"\"\"\"\nLeft-Denotation.\n\"\"\"\n\nfrom .denotation import Denotation\nfrom .errors import LedDenotationError, ParseletNotRegistered\n\n\nclass Led(Denotation):\n \"\"\"\n Left-Denotation.\n\n The specification of how an operator consumes to the right with a left-context.\n \"\"\"\n\n def power(self, parser, token):\n \"\"\"Return power for a given token.\"\"\"\n\n parselet = self._parselet(parser, token)\n power = parselet.power\n\n return power\n\n def eval(self, parser, token, left):\n \"\"\"Receive from left, evaluate and return result.\"\"\"\n\n parselet = self._parselet(parser, token)\n result = parselet.led(parser, token, left)\n\n return result\n\n def _parselet(self, parser, token):\n \"\"\"Find and return a stored parselet for a given token type.\"\"\"\n\n try:\n parselet = super()._get_parselet(token)\n except ParseletNotRegistered:\n ctx = parser.context()\n raise LedDenotationError(ctx, token)\n\n return parselet\n","sub_path":"final_task/pycalc/specification/led.py","file_name":"led.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"260047136","text":"# ==============================================================================\n# Author: Robin Chan, University of Wuppertal\n# Contact: rchan@uni-wuppertal.de\n# GitHub: https://github.com/robin-chan\n# ==============================================================================\n\n\"\"\"\nCreate heatmaps of non-detection at object-level and at pixel-level\nOutput: prediction images with class colors\n\"\"\"\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom skimage.measure import label\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom labels import labels\nfrom globals import *\n\n#####################################################################################\n#\n# function: create and save heatmap\n#\n#####################################################################################\n\ndef save_heat(array1,array2,title,filename,vmin,vmax):\n fig, (ax1,ax2) = plt.subplots(1, 2, figsize=(6, 2.15))\n divider1 = make_axes_locatable(ax1)\n divider2 = make_axes_locatable(ax2)\n cax1 = divider1.append_axes(\"right\", size=\"3%\", pad=0.1)\n cax2 = divider2.append_axes(\"right\", size=\"3%\", pad=0.1)\n ax1.grid(False)\n ax2.grid(False)\n ax1.text(0.5, 1.08, \"Bayes\", ha=\"center\",transform=ax1.transAxes,fontsize=17)\n ax2.text(0.5, 1.08, \"ML\", ha=\"center\",transform=ax2.transAxes,fontsize=17)\n ax1.tick_params(axis='both', which='both', length=0)\n ax2.tick_params(axis='both', which='both', length=0)\n plt.setp(ax1.get_xticklabels(), visible=False)\n plt.setp(ax1.get_yticklabels(), visible=False)\n plt.setp(ax2.get_xticklabels(), visible=False)\n plt.setp(ax2.get_yticklabels(), visible=False)\n heatplot1 = ax1.imshow(array1, cmap='RdBu_r',interpolation='None', vmin=vmin, vmax=vmax)\n heatplot2 = ax2.imshow(array2, cmap='RdBu_r',interpolation='None', vmin=vmin, vmax=vmax)\n plt.tight_layout()\n cbar1=fig.colorbar(heatplot1, cax=cax1, ticks=range(0,int(vmax)+1,2), format=\"%.0f\")\n cbar2=fig.colorbar(heatplot2, cax=cax2, ticks=range(0,int(vmax)+1,2), format=\"%.0f\")\n cbar1.ax.tick_params(labelsize=14)\n cbar2.ax.tick_params(labelsize=14)\n #fig.suptitle(\"Non-detection \" + title) ### optional: add title to graphic\n #fig.subplots_adjust(top=0.80)\n fig.savefig(\"heat/\" + filename + \".pdf\", bbox_inches='tight',transparent=True)\n\n#####################################################################################\n#\n# Setup\n#\n#####################################################################################\n\n# ------------------------------------------------------------------------------------\n# Choose data folder\n# ------------------------------------------------------------------------------------\n\nfolder = work_dir + \"out/graphics/\"\n\n# ------------------------------------------------------------------------------------\n# Initialization\n# ------------------------------------------------------------------------------------\n\nos.chdir(work_dir + \"out/graphics/\")\nos.environ['PATH'] = os.environ['PATH'] + ':/Library/TeX/texbin'\nplt.rc('font', size=10, family='serif')\nplt.rc('text', usetex=True)\nif not os.path.exists(\"heat\"): os.makedirs(\"heat\")\ngt_list = sorted(os.listdir(ground_truth_dir))\nml_list = sorted(os.listdir(work_dir + \"out/predictions/ML/\"))\nbayes_list = sorted(os.listdir(work_dir + \"out/predictions/B/\"))\nn = len(gt_list)\nshape = resolution[0]*resolution[1] ### height times width, number of pixel-positions\n\n#####################################################################################\n#\n# Main\n#\n#####################################################################################\n\nprint(\"########################################################################\")\nfor k in class_indices:\n\n # ----------------------------------------------------------------------------------------\n # ML heatmaps\n # ----------------------------------------------------------------------------------------\n\n print(\"Creating ML heatmaps of class \" + labels[k].name)\n heatmap_px_ml = np.zeros(shape)\n heatmap_obj_ml = np.zeros(shape)\n\n for im in range(n):\n\n # load components\n ml_comp = np.load(\"merged-comp-arrays/ML-\" + labels[k].name + \"/\" + ml_list[im] + \".npy\").flatten()\n gt_array = np.asarray(Image.open(ground_truth_dir + gt_list[im]).resize((resolution[0], resolution[1])))\n gt_comp = label(((gt_array == labels[k].color).all(axis=2)).astype(int)).flatten()\n\n # count non-detected pixels (false negatives)\n count = np.multiply((ml_comp != 0).astype(int), (gt_comp != 0).astype(int))\n count = count + (gt_comp != 0).astype(int)\n count[count != 1] = 0\n heatmap_px_ml += count\n\n # count non-detected objects (false negatives object-wise, no overlap with prediction)\n for inst in range(1, np.max(gt_comp)+1):\n check = np.multiply(ml_comp, (gt_comp == inst).astype(int))\n if np.count_nonzero(check) == 0:\n heatmap_obj_ml += (gt_comp == inst).astype(int)\n\n heatmap_px_ml.resize((resolution[1],resolution[0]))\n heatmap_obj_ml.resize((resolution[1],resolution[0]))\n\n # ----------------------------------------------------------------------------------------\n # Bayes heatmaps\n # ----------------------------------------------------------------------------------------\n\n print(\"Creating Bayes heatmaps of class \" + labels[k].name)\n heatmap_px_bay = np.zeros(shape)\n heatmap_obj_bay = np.zeros(shape)\n\n for im in range(n):\n\n # load components\n bayes_comp = np.load(\"merged-comp-arrays/B-\" + labels[k].name + \"/\" + bayes_list[im] + \".npy\").flatten()\n gt_array = np.asarray(Image.open(ground_truth_dir + gt_list[im]).resize((resolution[0], resolution[1])))\n gt_comp = label(((gt_array == labels[k].color).all(axis=2)).astype(int)).flatten()\n\n # count non-detected pixels (false negatives)\n count = np.multiply((bayes_comp != 0).astype(int), (gt_comp != 0).astype(int))\n count = count + (gt_comp != 0).astype(int)\n count[count != 1] = 0\n heatmap_px_bay += count\n\n # count non-detected objects\n for inst in range(1, np.max(gt_comp)+1):\n check = np.multiply(bayes_comp, (gt_comp == inst).astype(int))\n if np.count_nonzero(check) == 0:\n heatmap_obj_bay += (gt_comp == inst).astype(int)\n\n heatmap_px_bay.resize((resolution[1],resolution[0]))\n heatmap_obj_bay.resize((resolution[1],resolution[0]))\n\n # ----------------------------------------------------------------------------------------\n # Save heatmaps\n # ----------------------------------------------------------------------------------------\n \n vmin = np.min(heatmap_px_bay); vmax = np.max(heatmap_px_bay)\n save_heat(heatmap_px_bay,heatmap_px_ml,\"pixel-wise of \" + labels[k].name,labels[k].name + \"-px\",vmin,vmax)\n save_heat(heatmap_obj_bay, heatmap_obj_ml,\"object-wise of \" + labels[k].name, labels[k].name + \"-obj\",vmin,vmax)\n\n##############################################################################################\n\nprint(\"DONE!\")\n\n\n","sub_path":"scripts-graphics/heat.py","file_name":"heat.py","file_ext":"py","file_size_in_byte":7142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"317469554","text":"#!/usr/bin/env python3\n\n\"\"\"\na) Write a Python program that receives a line of input from the user. Split the line on white space. Now you should have a bunch of words in a list.\n\nWrite a loop that iterates over those words and sums all of the owrds that represent integers.\n[Hello 123 and how are 456 and 37]\n\nb) Place a loop around (a) so that you can input other lines as well.\n\nc) Provide a way of having the user exit the program.\n\nd) When the user chooses to exit, ask the user if they want a total of all the lines.\n\"\"\"\n\ndef addWdNum(wdLst):\n\tnumTotal = 0\t\n\tfor word in words:\n\t\tdig = word.isdigit()\n\t\tprint(dig)\n\t\tif dig == True:\n\t\t\tnum = int(word)\n\t\t\tnumTotal += num\n\treturn numTotal\n\nfinalTotal = 0\nwhile True:\n\tprint(\"If no more entries, enter 'exit'\")\n\tdataIn = input(\"Enter sentence:\")\n\twords = dataIn.split()\n\tcurSum = addWdNum(words)\n\tfinalTotal += curSum\n\tif dataIn.lower() == \"exit\":\n\t\tbreak\n\tprint(\"Sum:\", curSum)\n\nprint()\nfinalPrint = input(\"Would you like the final sum? (y/n):\")\nif finalPrint.lower() == \"y\":\n\tprint(\"This is the final total of the numbers:\", finalTotal, \"\\n\")\nelse:\n\tprint(\"Goodbye\\n\")\n","sub_path":"Exercises/01_intro_Python3/chpt3.py","file_name":"chpt3.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"413381434","text":"class Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n nums2 = sorted(nums)\n cn = {}\n for i in range(len(nums2)):\n if nums2[i] not in cn:\n cn[nums2[i]] = i\n\n print(cn)\n output = [0]*len(nums)\n for i in range(len(nums)):\n output[i] = cn[nums[i]]\n return output\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\n","sub_path":"datastructs/arrays/smaller_numbers_than_current.py","file_name":"smaller_numbers_than_current.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"431252844","text":"#!usr/bin/env python3\r\n#-*-coding: utf-8 -*-\r\n#Filename:Error_deal_test.py\r\n\r\ndef foo():\r\n r=some_function() #??? What's this?\r\n if r==(-1):\r\n return (-1)\r\n return r\r\n\r\ndef bar():\r\n r=foo()\r\n if r==(-1):\r\n print('Error')\r\n else:\r\n pass\r\n#The error deal insitute\r\ntry:\r\n print('try...')\r\n r=10/0\r\n print('result: ',r)\r\nexcept ZeroDivisionError as e:\r\n print('except: ',e)\r\nfinally:\r\n print('fianlly...Error')\r\nprint('END')\r\n\"\"\"\r\nimport logging\r\n#Try...except...finally...end\r\n#This error will not affect the run of the rest code\r\ndef foo(s):\r\n return 10/int(s)\r\ndef bar(s):\r\n return foo(s)*2\r\ndef main():\r\n try:\r\n bar('0')\r\n except Exception as e:\r\n logging.exception(e)\r\nmain()\r\nprint('END')\r\n\"\"\"\r\n#Use raise to throw out an Error\r\nclass FooError(ValueError):\r\n pass\r\ndef foo(s):\r\n n=int(s)\r\n if n==0:\r\n raise FooError('invalid value: %s'%s)\r\n return 10/n\r\nfoo('0')\r\n\r\n##Debug\r\n'''\r\n#The first method is to print the error 1st\r\ndef foo(s):\r\n n=int(s)\r\n print('>>> n=%d'%n)\r\n return 10/n\r\ndef main():\r\n foo('0')\r\nmain() '''\r\n\r\n#Use the assert to instead of print 2nd\r\ndef foo(s):\r\n n=int(s)\r\n assert n!=0,'n is zero!'\r\n return 10/n\r\n\r\ndef main():\r\n foo('0')\r\n\r\n#Use the logging to instead of print 3rd\r\nimport logging\r\nlogging.basicConfig(level=logging.INFO)\r\ns='0'\r\nn=int(s)\r\nlogging.info('n=%d'%n)\r\nprint(10/n)\r\n","sub_path":"Error_deal_test.py","file_name":"Error_deal_test.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"542076436","text":"import requests\nimport json\ndef main(x):\n url = \"https://covid-19-coronavirus-statistics.p.rapidapi.com/v1/stats\"\n\n querystring = {\"country\":x}\n\n headers = {\n 'x-rapidapi-host': \"covid-19-coronavirus-statistics.p.rapidapi.com\",\n 'x-rapidapi-key': \"9e27549a4cmsh0589e1006cbae50p151d08jsnc050a6748ebf\"\n }\n\n response1 = requests.request(\"GET\", url, headers=headers, params=querystring)\n a = json.loads(requests.get(url,headers=headers,params=querystring).text)\n data = a[\"data\"].get(\"lastChecked\")\n \n data1 = a[\"data\"].get(\"covid19Stats\")\n \n count = 0\n count1 = 0\n count2 = 0\n for i in range(len(data1)):\n count += data1[i].get(\"confirmed\")\n count1 += data1[i].get(\"deaths\")\n count2 += data1[i].get(\"recovered\")\n\n update = \"Update lần cuối: \" + data\n country = \"\\nQuốc Gia: \" + data1[0].get(\"country\")\n case1 = \"\\nSố ca nhiễm bệnh: \" + str(count)\n case2 = \"\\nSố ca tử vong: \" + str(count1)\n case3 = \"\\nSố ca hồi phục: \" + str(count2)\n soure = \"\\nNguồn: https://rapidapi.com/KishCom/api/covid-19-coronavirus-statistics/endpoints\"\n total = update + country + case1 + case2 + case3 + soure\n return total\n","sub_path":"BotDeeptry123/covid.py","file_name":"covid.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"595385537","text":"import config\n\ndef menu(message, bot, con=\"send\"):\n input_array = [{\"text\": \"Каталог\", \"query\": \"c=buy\"},\n {\"text\": \"Новое объявление\", \"query\": \"c=type\"},\n {\"text\": \"Кабинет\", \"query\": \"c=cab\"}]\n keyboard = bot.get_keyboard(input_array=input_array, grid=1)\n text = \"Главное меню\"\n bot.send_message(text=text, message=message, con=con, keyboard=keyboard, parse_mode=\"HTML\")\ndef cabinet_menu(call,bot, con=\"send\"):\n input_array = [{\"text\": \"Мои объявление\", \"query\": \"c=post\"},\n {\"text\": \"Аккаунт\", \"query\": \"c=acc\"},\n {\"text\": \"Cделки\", \"query\": \"c=deal\"},\n {\"text\": \"Вывод денег\", \"query\": \"c=epay\"},\n {\"text\": \"Назад\", \"query\": \"c=menu\"}]\n keyboard = bot.get_keyboard(input_array=input_array, grid=2)\n text = \"Личный кабинет\"\n bot.send_message(text=text, message=call, con=con, keyboard=keyboard, parse_mode=\"HTML\")\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"132338597","text":"class Status:\n def __init__(self, json_response):\n self.current = None\n self.options = {\n True: {\n 'status': 'available',\n 'color': '0 255 0'\n },\n 'xa': {\n 'status': 'away',\n 'color': '255 255 0'\n },\n 'dnd': {\n 'status': 'do_not_disturb',\n 'color': '255 0 0'\n },\n False: {\n 'status': 'offline',\n 'color': '0 0 0'\n }\n }\n self.determine_status(json_response)\n\n def determine_status(self, response):\n if response['presence']:\n presence = response['presence']\n try:\n self.current = self.options[presence['show']]\n except KeyError:\n self.current = self.options[presence['is_online']]\n\n try:\n self.current['message'] = self.options['status']\n except KeyError:\n self.current['message'] = None\n else:\n self.current = self.options[False]\n \n def get_color(self):\n return self.current['color']\n","sub_path":"status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"91456112","text":"#!/usr/bin/env python\n\"\"\"\n================================================================================\n:mod:`human` -- Utility functions to represent values in a more human format\n================================================================================\n\n.. module:: human\n :synopsis: Utility functions to represent values in a more human format\n\n.. inheritance-diagram:: human\n\n\"\"\"\n\n# Script information for the file.\n__author__ = \"Philippe T. Pinard\"\n__email__ = \"philippe.pinard@gmail.com\"\n__version__ = \"0.1\"\n__copyright__ = \"Copyright (c) 2012 Philippe T. Pinard\"\n__license__ = \"GPL v3\"\n\n# Standard library modules.\nimport re\n\n# Third party modules.\n\n# Local modules.\n\n# Globals and constants variables.\n_REGEX_CAMEL_CASE = re.compile('([a-z0-9])([A-Z])')\n\ndef human_time(time_s):\n \"\"\"\n Converts a time in seconds to a string using days, hours, minutes and seconds.\n \"\"\"\n time_s = int(time_s) # Ensure int\n\n out = []\n\n days = time_s // 86400\n if days == 1:\n out.append('%i day' % days)\n time_s -= days * 86400\n elif days >= 1:\n out.append('%i days' % days)\n time_s -= days * 86400\n\n hours = time_s // 3600\n if hours >= 1:\n out.append('%i hr' % hours)\n time_s -= hours * 3600\n\n minutes = time_s // 60\n if minutes >= 1:\n out.append('%i min' % minutes)\n time_s -= minutes * 60\n\n if time_s >= 1:\n out.append('%i sec' % time_s)\n\n return ' '.join(out)\n\ndef camelcase_to_words(text):\n return _REGEX_CAMEL_CASE.sub(r'\\1 \\2', text)\n","sub_path":"pymontecarlo/util/human.py","file_name":"human.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"446514167","text":"import requests\nimport feedparser\nfrom bs4 import BeautifulSoup\nfrom time import time\n\ndef get_item_id(link):\n parts = link.split(\"/\")\n return parts[len(parts)-2]\n\ndef run():\n url = \"http://www.iran-newspaper.com/newspaper/feed\"\n feed = feedparser.parse(url)\n\n documents = []\n\n for item in feed.entries:\n link = item.link\n title = item.title\n internal_id = get_item_id(link)\n news_page_content = requests.get(link).content\n soup_page = BeautifulSoup(news_page_content, 'html.parser')\n content = soup_page.find(id=\"item_block\" + get_item_id(link)).find(id=\"newsbody\").text\n print(\"crawling news:\\ntitle: {0}\\nlink: {1}\".format(title, link))\n documents.append({\n \"title\": title,\n \"link\": link,\n \"internal_id\": internal_id,\n \"content\": content,\n \"type\": \"newspaper\",\n \"main_class\": \"iran\",\n \"tags\": [],\n \"date\": time()\n })\n return documents\n","sub_path":"py/crawlers/newspaper_iran.py","file_name":"newspaper_iran.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"68110882","text":"#!/usr/bin/env python\n\n\nimport rospy\nimport cv2\n\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import CameraInfo, Image\nfrom ximea import xiapi\nfrom yolo_opencv import YoloDetector\nimport time\n\n\nclass CamerasSn:\n MyCam = \"25790759\" # 25794659/25790759\n\n\nclass Camera:\n def __init__(self, name, path):\n self.cam = xiapi.Camera(path)\n self.name = name\n self.path = path\n\n self.cv_bridge = CvBridge()\n self.last_img = xiapi.Image()\n self.image_topic = \"ximea_camera/\" + self.name + \"/image_raw\"\n self.image_pub = rospy.Publisher(self.image_topic, Image, queue_size=10)\n self._started = False\n\n def open(self):\n self.cam.open_device_by_SN(self.path)\n self.cam.set_downsampling(\"XI_DWN_4x4\")\n\n self.start()\n # self.cam.set_limit_bandwidth_mode(\"XI_ON\")\n # self.cam.set_limit_bandwidth(1)\n\n def close(self):\n if self._started:\n self.stop()\n\n self.cam.close_device()\n\n def start(self):\n self.cam.set_exposure(100000)\n self.cam.set_imgdataformat(\"XI_RGB24\")\n self.cam.set_downsampling(\"XI_DWN_4x4\")\n\n # self.cam.set_acq_timing_mode(\"XI_ACQ_TIMING_MODE_FRAME_RATE\")\n # self.cam.set_framerate(10)\n\n self.cam.start_acquisition()\n self._started = True\n\n def stop(self):\n self._started = False\n self.cam.stop_acquisition()\n\n def publish_img(self):\n try:\n if not self._started:\n return\n\n self.cam.get_image(self.last_img)\n img = self.last_img.get_image_data_numpy()\n\n cv2.imshow(\"Image\", img)\n stamp = rospy.Time.now()\n msg = self.cv_bridge.cv2_to_imgmsg(img, encoding=\"bgr8\")\n cv2.imshow(\"Image\", img)\n\n msg.header.stamp = stamp\n self.image_pub.publish(msg)\n except Exception as ex:\n rospy.logwarn(\"publish_img: %s\", str(ex))\n\n def get_cvimage(self):\n if not self._started:\n return\n\n self.cam.get_image(self.last_img)\n img = self.last_img.get_image_data_numpy()\n return (img)\n\n\ndef main():\n node_name = \"ximea_camera1\"\n\n camera = Camera('MyCam', CamerasSn.MyCam)\n detector = YoloDetector()\n\n try:\n camera.open()\n except Exception as ex:\n rospy.logwarn(\"open camera exception: %s\", str(ex))\n\n # rospy.init_node(node_name)\n # rate = rospy.Rate(20)\n while True:\n # camera.publish_img()\n cv_image = camera.get_cvimage()\n cv_image = detector.get_prediction(cv_image)\n cv2.imshow(\"object detection\", cv_image)\n cv2.waitKey(delay=int(100))\n\n # rate.sleep()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"my_ximea_cam.py","file_name":"my_ximea_cam.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"81951532","text":"print('hhh')\nb='123'\nb=list(b)\nprint(b)\n\n\ndef factorial(num):\n if num==1:\n return 1\n else:\n return num*factorial(num-1)\n\ndef hannotower(n,x,y,z):\n if n==1:\n print(x,'-->',z)\n else:\n hannotower(n-1,x,z,y)\n print(x,'-->',z)\n hannotower(n-1,y,x,z)\n\nif __name__=='__main__':\n num=int(input('please input a number:'))\n result=factorial(num)\n print('%d的阶乘是:%d'%(num,result))\n n=int(input('层数:'))\n hannotower(n,'x','y','z')","sub_path":"04test2.py","file_name":"04test2.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"21612372","text":"# ----------------------------------------------------------------------\n# Numenta Platform for Intelligent Computing (NuPIC)\n# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from\n# Numenta, Inc. a separate commercial license for this software code, the\n# following terms and conditions apply:\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see http://www.gnu.org/licenses.\n#\n# http://numenta.org/licenses/\n# ----------------------------------------------------------------------\n\n# Python imports\n\n\n# Common 3rd party imports\nfrom enthought.traits.api import *\nfrom enthought.traits.ui.api import *\nimport numpy\nfrom PIL import Image\nimport wx\n\n# NuPIC Imports\nimport nupic\nfrom nupic.analysis.inspectors.region.tabs import RegionInspectorTab\nfrom nupic.ui.enthought.editors import ImageEditor\n\n_kScaling = 2\n_kInputCoverageScaling = 4\n_kBorderPix = 1\n\n_kImgCaptionStr = \"Input Coverage of Learned Coincidences\"\n_kWantWinnersOnlyLabelStr = \"Only Show Coverage of Winners\"\n_kShowUnderOverCoverageStr = \"Show Under(green) and Over(red) Coverage of Input\"\n\ndef _getSelf(region):\n return region.getSelf()\n\n################################################################################\nclass InputCoverageTab(RegionInspectorTab):\n \"\"\"Displays all master coincidences.\n\n This is expected to be useful in cloning, where there aren't too many.\n \"\"\"\n\n ####################################################################\n @staticmethod\n def isRegionSupported(region):\n \"\"\"Return True if the tab is appropriate for this region, False otherwise.\n\n @return isRegionSupported True if this is a supported region.\n \"\"\"\n return ('CLARegion' in region.type) and (not region.getParameter('disableSpatial'))\n\n ####################################################################\n def __init__(self, region):\n \"\"\"InputCoverageTab constructor.\n\n @param region The RuntimeRegion.\n \"\"\"\n # Call superclass. This will init, among other things, self.region...\n super(InputCoverageTab, self).__init__(region)\n\n # --------------------------------------------------------------------\n # Init members, just to be pretty. Many of these are actually set in\n # self.switchRegion().\n self._regionRef = None\n\n self._inputShape = (-1, -1)\n self._columnsShape = (-1, -1)\n\n self.switchRegion(self.region, update=False)\n\n self.add_trait('inputCoverageImg', Instance(Image.Image))\n self.add_trait('wantWinnersOnly', Bool(True))\n self.add_trait('showUnderOverCoverage', Bool(False))\n self.add_trait('_spacer1', Str())\n\n # View\n viewItems = []\n viewItems.extend((\n Group(\n Item('_spacer1', style='readonly', show_label=False, springy=True),\n Item('inputCoverageImg',\n editor=ImageEditor(\n width=self._inputShape[1]*_kInputCoverageScaling,\n height=self._inputShape[0]*_kInputCoverageScaling,\n caption=_kImgCaptionStr,\n nearestNeighbor=True,\n ),\n show_label=False),\n Item('_spacer1', style='readonly', show_label=False, springy=True),\n orientation='horizontal',\n ),\n Item('wantWinnersOnly', label=_kWantWinnersOnlyLabelStr),\n Item('showUnderOverCoverage', label=_kShowUnderOverCoverageStr),\n ))\n\n self.traits_view = View(*viewItems, title='InputCoverage', scrollable=True)\n\n ####################################################################\n def update(self, methodName=None, elementName=None, args=None, kwargs=None):\n \"\"\"Called automatically in response to runtime engine activity.\n\n Extra arguments (optional) are passed by the wrapped methods,\n and they can be used to avoid unnecessary updating.\n\n @param methodName -- Class method that was called.\n @param elementName -- Name of RuntimeElement.\n @param args -- Positional arguments passed to the method.\n @param kwargs -- Keyword arguments passed to the method.\n \"\"\"\n\n if methodName and methodName != 'run':\n return\n if not self._regionRef.hasRunInference():\n return\n\n if self.wantWinnersOnly or self.showUnderOverCoverage:\n outputs = numpy.array(self.region.getParameter('spatialPoolerOutput'))\n outputs = (outputs != 0)\n\n # Make an image representing coverage over the input space...\n inputHeight, inputWidth = self._inputShape\n columnsHeight, columnsWidth = self._columnsShape\n\n # ------------------------------------------------------------------\n # Generate color image showing where we have over or under-coverage of the\n # actual input\n if self.showUnderOverCoverage:\n coincCoverage = numpy.zeros(inputHeight*inputWidth, 'float32')\n activeCols = outputs.nonzero()[0]\n for col in activeCols:\n coincCoverage += self._regionRef.getSfdrLearnedRow(col)\n coincCoverage = coincCoverage > 0\n\n # Which inputs were covered?\n actInputs = numpy.array(self.region.getParameter('spatialPoolerInput')) > 0\n\n numpyImage = numpy.zeros((inputHeight*inputWidth, 3), 'uint8')\n\n # White for explained inputs\n numpyImage[actInputs & coincCoverage] = (255, 255, 255)\n\n # Green for under-coverage\n numpyImage[actInputs & numpy.logical_not(coincCoverage)] = (0, 255, 0)\n\n # Red for over -coverage\n numpyImage[coincCoverage & numpy.logical_not(actInputs)] = (255, 0, 0)\n numpyImage = numpyImage.reshape((inputHeight, inputWidth, 3))\n inputCoverageImg = Image.fromarray(numpyImage, 'RGB')\n\n\n # ------------------------------------------------------------------\n # Generate gray scale image showing where we have coverage\n else:\n inputCoverageArrayFlat = numpy.zeros(self._inputShape, 'float64').reshape(-1)\n for i in xrange(columnsWidth * columnsHeight):\n if (not self.wantWinnersOnly) or (outputs[i]):\n inputCoverageArrayFlat += self._regionRef.getSfdrLearnedRow(i)\n\n if True:\n inputCoverageArrayFlat *= 255 / inputCoverageArrayFlat.max()\n inputCoverageArray = inputCoverageArrayFlat.astype('uint8').reshape(self._inputShape)\n else:\n # Non-linear...\n neededForShowing = inputCoverageArrayFlat.max() * .40\n toShow = inputCoverageArrayFlat >= neededForShowing\n inputCoverageArray = numpy.zeros(inputCoverageArrayFlat.shape, 'uint8')\n inputCoverageArray[toShow] = 255\n inputCoverageArray = inputCoverageArray.reshape(self._inputShape)\n\n inputCoverageImg = Image.fromarray(inputCoverageArray, 'L')\n\n # Resize the image\n self.inputCoverageImg = inputCoverageImg.resize(\n (inputCoverageImg.size[0]*_kInputCoverageScaling,\n inputCoverageImg.size[1]*_kInputCoverageScaling),\n Image.NEAREST\n )\n\n ####################################################################\n def switchRegion(self, region, update=True):\n \"\"\"Switch to a different region within the same region or multiregion.\n\n @param region The RuntimeRegion to switch to.\n @param update If True, we'll call self.update().\n \"\"\"\n\n # Save the region, plus a reference to the real region itself...\n self.region = region\n self._regionRef = _getSelf(self.region)\n\n # ----------------------------------------------------------------------\n # Store a few parameters to the region that will be useful in our\n # visualization...\n self._inputShape = self._regionRef.inputShape\n self._columnsShape = self._regionRef.coincidencesShape\n\n if update:\n self.update()\n\n ####################################################################\n def _wantWinnersOnly_changed(self):\n \"\"\"Handle when the wantWinnersOnly changes.\"\"\"\n self.update()\n\n ####################################################################\n def _showUnderOverCoverage_changed(self):\n \"\"\"Handle when the showUnderOverCoverage changes.\"\"\"\n\n if self.showUnderOverCoverage:\n self.wantWinnersOnly = True\n\n self.update()","sub_path":"analysis/inspectors/region/tabs/InputCoverageTab.py","file_name":"InputCoverageTab.py","file_ext":"py","file_size_in_byte":8408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"359577492","text":"\"\"\"\n Write a program that maps a list of words\n into a list of integers representing\n the lengths of the corresponding words.\n\"\"\"\n\n\ndef map_words_numbers():\n words = [\"Rocky\", \"Balboa\", \"Apollo\", \"Creed\"]\n numbers = []\n for i in words:\n numbers.append(len(i))\n print(numbers)\n\n\nmap_words_numbers()","sub_path":"exercises/46/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"575623806","text":"class Solution:\n def maxArea(self, height: List[int]) -> int:\n h1 = 0\n h2 = len(height) - 1\n # [1,8,6,2,5,4,8,3,7]\n # \n maxArea = 0\n while h1 != h2:\n # print(height[h1], height[h2])\n if height[h1] < height[h2]:\n maxArea = max(maxArea, (h2-h1)*(height[h1]))\n h1 += 1\n elif height[h2] <= height[h1]:\n maxArea = max(maxArea, (h2-h1)*(height[h2]))\n h2 -= 1\n return maxArea","sub_path":"Blind75/array/11containerWithMostWater.py","file_name":"11containerWithMostWater.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5341335","text":"import json\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nCHROMEDRIVER_PATH = '/Users/gg/Documents/my_stuff/automate_with_python/going_headless/chromedriver'\n\n\ndef enable_download_in_headless_chrome(browser, download_dir):\n #add missing support for chrome \"send_command\" to selenium webdriver\n browser.command_executor._commands[\"send_command\"] = (\"POST\", '/session/$sessionId/chromium/send_command')\n\n params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_dir}}\n browser.execute(\"send_command\", params)\n\noptions = Options()\noptions.set_headless(headless=False)\noptions.add_argument(\"--no-sandbox\")\noptions.add_argument(\"start-maximized\")\noptions.add_argument(\"disable-infobars\")\noptions.add_argument(\"--disable-extensions\")\ndriver = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH,\n options=options\n ) \nenable_download_in_headless_chrome(driver, \"/Users/gg/Movies/hotstar/\")\n\n# driver = webdriver.Chrome(CHROMEDRIVER_PATH)\ndriver.get('https://www.youtube.com/watch?v=l_MyUGq7pg')\ntime.sleep(5)\ntimings = driver.execute_script(\"return window.performance.getEntries();\")\ntimee = timings\nwith open('youtube.json', 'w') as json_file:\n json.dump(timee, json_file, indent=4)\n\n","sub_path":"youtube/youtube2json.py","file_name":"youtube2json.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"413664365","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import User\n\nfrom .models import *\nfrom .forms import *\n\ndef get_question_type(question_id):\n \"\"\"returns the type of question and the route to the appropriate admin page\"\"\"\n question = Question.objects.get_subclass(pk=question_id)\n link = \"/admin/questions/\"\n if isinstance(question, ProgrammingFunction):\n subclass = \"Function Programming Question\"\n link += \"programmingfunction/\"\n elif isinstance(question, Programming):\n subclass = \"Programming Question\"\n link += \"programming/\"\n elif isinstance(question, BuggyFunction):\n subclass = \"Function Debugging Question\"\n link += \"buggyfunction/\"\n elif isinstance(question, Buggy):\n subclass = \"Debugging Question\"\n link += \"buggy/\"\n else:\n subclass = \"Parsons Problem\"\n link += \"question/\"\n return (subclass, link)\n\n### INLINES ###\n\nclass ProfileInline(admin.StackedInline):\n model = Profile\n can_delete = False\n verbose_name_plural = 'Profile'\n fk_name = 'user'\n\nclass ProgramTestCaseInline(admin.StackedInline):\n model = TestCaseProgram\n form = TestCaseProgramForm\n extra = 1\n\nclass FunctionTestCaseInline(admin.StackedInline):\n model = TestCaseFunction\n form = TestCaseFunctionForm\n extra = 1\n\n### CUSTOM ADMINS ###\n\nclass CustomUserAdmin(UserAdmin):\n inlines = (ProfileInline, )\n\n def get_inline_instances(self, request, obj=None):\n if not obj:\n return list()\n return super(CustomUserAdmin, self).get_inline_instances(request, obj)\n\nadmin.site.unregister(User)\nadmin.site.register(User, CustomUserAdmin)\n\nclass CustomGenericQuestionAdmin(admin.ModelAdmin):\n list_display = ('title', 'type_display')\n search_fields = ('title',)\n\n def type_display(self, obj):\n subclass, _ = get_question_type(obj.pk)\n return subclass\n\n type_display.short_description = \"Question Type\"\n\n@admin.register(Question)\nclass CustomQuestionAdmin(CustomGenericQuestionAdmin):\n\n def render_change_form(self, request, context, *args, **kwargs):\n self.change_form_template = 'admin/change_form_with_help_text.html'\n\n if context['original']:\n pk = context['original'].pk\n question = Question.objects.get_subclass(pk=pk)\n subclass, link = get_question_type(pk)\n is_correct_type = not isinstance(question, Buggy) and not isinstance(question, Programming)\n else:\n subclass = ''\n link = ''\n is_correct_type = True\n\n extra = {\n 'is_correct_type': is_correct_type,\n 'error_message_part_1': 'This page is intended for editing Parsons Problems only. Please go to the ',\n 'subclass': subclass,\n 'link': link,\n 'error_message_part_2': ' page to edit this question.',\n 'help_text': 'To define which blocks are displayed, write the program in the correct order in the solution box. Use 2 spaces for each level of indentation. A distractor can be added by ending the line with #distractor'\n }\n context.update(extra)\n return super(CustomQuestionAdmin, self).render_change_form(request, context, *args, **kwargs)\n\n\n@admin.register(Programming)\nclass CustomProgramQuestionAdmin(CustomGenericQuestionAdmin):\n inlines = [ProgramTestCaseInline, ]\n\n def render_change_form(self, request, context, *args, **kwargs):\n self.change_form_template = 'admin/change_form_with_help_text.html'\n\n if context['original']:\n pk = context['original'].pk\n question = Question.objects.get_subclass(pk=pk)\n subclass, link = get_question_type(pk)\n is_correct_type = not isinstance(question, ProgrammingFunction)\n else:\n subclass = ''\n link = ''\n is_correct_type = True\n\n extra = {\n 'is_correct_type': is_correct_type,\n 'error_message_part_1': 'This page is intended for editing program-type programming questions only. Please go to the ',\n 'subclass': subclass,\n 'link': link,\n 'error_message_part_2': ' page to edit this question.',\n 'help_text': 'When creating test cases, test input (stdin) and expected output (stdout) will always be strings so you do not need to put quotes around them. Any quotes you enter will be escaped.'\n }\n context.update(extra)\n return super(CustomProgramQuestionAdmin, self).render_change_form(request, context, *args, **kwargs)\n\n\n@admin.register(Buggy)\nclass CustomBuggyAdmin(CustomGenericQuestionAdmin):\n\n def render_change_form(self, request, context, *args, **kwargs):\n self.change_form_template = 'admin/change_form_with_help_text.html'\n \n if context['original']:\n pk = context['original'].pk\n question = Question.objects.get_subclass(pk=pk)\n subclass, link = get_question_type(pk)\n is_correct_type = not isinstance(question, BuggyFunction)\n else:\n subclass = ''\n link = ''\n is_correct_type = True\n\n extra = {\n 'is_correct_type': is_correct_type,\n 'error_message_part_1': 'This page is intended for editing program-type debugging questions only. Please go to the ',\n 'subclass': subclass,\n 'link': link,\n 'error_message_part_2': ' page to edit this question.',\n 'help_text': 'The buggy program is the one that will be shown to the user. It is important that the correct solution works and is different in some way to the buggy program.\\nPlease indent using four spaces (not tabs).'\n }\n context.update(extra)\n return super(CustomBuggyAdmin, self).render_change_form(request, context, *args, **kwargs)\n\n\n@admin.register(ProgrammingFunction)\nclass CustomFunctionQuestionAdmin(CustomGenericQuestionAdmin):\n inlines = [FunctionTestCaseInline, ]\n\n def render_change_form(self, request, context, *args, **kwargs):\n self.change_form_template = 'admin/change_form_with_help_text.html'\n\n extra = {\n 'is_correct_type': True,\n 'error_message_part_1': 'This page is intended for editing function-type programming questions only. Please go to the ',\n 'subclass': \"\",\n 'link': \"\",\n 'error_message_part_2': ' page to edit this question.',\n 'help_text': 'Remember to tell the user the function name somewhere in the question text.\\nWhen creating test cases, test input (stdin) and expected output (stdout) will always be strings so you do not need to put quotes around them. Any quotes you enter will be escaped.\\nHowever, for function params and expected return, the value needs to be valid Python so quotes around strings are necessary.\\nFunction params are comma separated.'\n }\n context.update(extra)\n return super(CustomFunctionQuestionAdmin, self).render_change_form(request, context, *args, **kwargs)\n \n\n@admin.register(BuggyFunction)\nclass CustomBuggyFunctionQuestionAdmin(CustomGenericQuestionAdmin):\n def render_change_form(self, request, context, *args, **kwargs):\n self.change_form_template = 'admin/change_form_with_help_text.html'\n\n extra = {\n 'is_correct_type': True,\n 'error_message_part_1': 'This page is intended for editing function-type debugging questions only. Please go to the ',\n 'subclass': \"\",\n 'link': \"\",\n 'error_message_part_2': ' page to edit this question.',\n 'help_text': 'The buggy program is the one that will be shown to the user. It is important that the correct solution works and is different in some way to the buggy program.\\nPlease indent using four spaces (not tabs).'\n }\n context.update(extra)\n return super(CustomBuggyFunctionQuestionAdmin, self).render_change_form(request, context, *args, **kwargs)\n\n\n### FOR DEV PURPOSES ONLY ###\n# @admin.register(TestCase)\n# class TestCaseAdmin(admin.ModelAdmin):\n# form = TestCaseForm\n\n# @admin.register(TestCaseProgram)\n# class TestCaseProgramAdmin(admin.ModelAdmin):\n# form = TestCaseProgramForm\n\n# @admin.register(TestCaseFunction)\n# class TestCaseFunctionAdmin(admin.ModelAdmin):\n# form = TestCaseFunctionForm\n\n# admin.site.register(SkillArea)\n# admin.site.register(Token)\n# admin.site.register(Badge)\n# admin.site.register(Earned)\n# admin.site.register(Attempt)\n# admin.site.register(Skill)\n# admin.site.register(LoginDay)","sub_path":"prototype/questions/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":8625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"456474013","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom scipy.stats import multivariate_normal as normal\n\n\nclass EM:\n \n \n data = []\n cl_data = None\n km_data = None\n \n k_count = 0 \n n_count = 0\n\n means = None\n covs = None\n gammas = None\n \n priori = None\n n = None\n \n \n def execute(self, _means, _data, _km_data):\n \n self.data = _data \n \n self.km_data = _km_data \n \n self.k_count = len(self.km_data)\n self.n_count = len(self.data) \n \n \n \n \n self.covs = [None] * self.k_count \n self.priori = [0] * self.k_count \n \n \n self.initialize(self.gammas,self.n, _means, self.covs, self.priori)\n \n \n for i in range(0,10): \n self.expectation(self.gammas, self.priori, self.means, self.covs)\n self.maximization(self.n, self.covs, self.means, self.priori, self.gammas)\n \n self.cl_data = self.maxPosteriori(self.data,self.gammas)\n \n return self.means,self.covs,self.priori,self.gammas, self.cl_data\n \n \n def init_gammas(self):\n \n gammas = [ [] for i in range(self.k_count) ]\n \n \n for k in range(0, self.k_count) :\n for n,x in enumerate(self.data): \n gammas[k].append( 1.0 if x in self.km_data[k] else 0.0 ) \n \n \n return gammas\n\n\n\n \n def initialize(self, gammas, n, means, covs, priori):\n self.gammas = self.init_gammas() \n self.n = self.calc_n(self.gammas) \n self.means = means\n \n for k in range(self.k_count): \n self.covs[k] = self.covariance(k, self.n, self.means[k], self.gammas)\n self.priori[k] = self.pi(self.n[k])\n\n\n\n \n def expectation(self, gammas, pi, means, covs): \n \n gama_norm = [0] * self.n_count\n #calc. gamas\n for k in range(0, self.k_count): \n for n,x in enumerate(self.data): \n gammas[k][n] = pi[k] * normal.pdf( x, mean = means[k], cov = covs[k] )\n gama_norm[n] += gammas[k][n]\n \n \n \n #normalizing\n for k in range(0, self.k_count):\n for n in range(0, self.n_count):\n gammas[k][n] = gammas[k][n] / gama_norm[n]\n \n self.n = self.calc_n(gammas) \n\n\n \n def maximization(self, n, covs, means, pi, gammas):\n for k in range(0, self.k_count):\n means[k] = self.mi(k,n[k], gammas) \n covs[k] = self.covariance(k, n, means[k], gammas)\n pi[k] = self.pi(n[k])\n \n\n \n def calc_n(self, gammas):\n n = [0] * self.k_count\n \n for k,g in enumerate(gammas):\n for gamma in g:\n n[k] += gamma \n return n\n\n\n \n def covariance(self, k, _n, mean, gammas):\n \n result = np.zeros(shape = ( len(mean),len(mean) ))\n \n for n,x in enumerate(self.data):\n variance_vec = ( np.matrix(x) - np.matrix(mean) ) \n result += (gammas[k][n] / _n[k]) * ( variance_vec.T * variance_vec )\n\n \n \n return result\n\n\n \n def mi(self, k, _n, gammas):\n result = np.array([0.0] * len(self.data[0]))\n\n \n for n,x in enumerate(self.data): \n result += gammas[k][n] * np.array(x)\n \n \n result = result / _n \n \n return tuple(result)\n \n \n \n def pi(self, n):\n return n / float( self.n_count )\n \n\n def maxPosteriori(self,data,gammas):\n cl_data = [ [] for i in range(self.k_count) ]\n \n for n,point in enumerate(data): \n biggest_k = 0\n for k in range(self.k_count):\n if gammas[k][n] > gammas[biggest_k][n]:\n biggest_k = k \n cl_data[biggest_k].append(point)\n \n return cl_data\n \n \n \n \n","sub_path":"gmm.py","file_name":"gmm.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"646012059","text":"from collections import defaultdict\n\n\ndef tree():\n return defaultdict(tree)\n\n\ndef make_metric_tree(metrics):\n metric_tree = tree()\n\n for metric in metrics:\n parts = metric.split('.')\n mapping = metric_tree\n\n # We'll create keys as they are referenced. See:\n # https://en.wikipedia.org/wiki/Autovivification\n for part in parts:\n mapping = mapping[part]\n\n return metric_tree\n","sub_path":"envoy/datadog_checks/envoy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"8572928","text":"#一个画图的例子\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nx = np.linspace(-3,3,50)\r\ny1 = 2*x + 1\r\nm = x**2\r\n\r\nplt.figure()\r\n\r\nplt.plot(x,y1,color = 'red',linewidth = 1.0,linestyle = '--')\r\nplt.plot(x,m)\r\n\r\nplt.xlim((-1,2))\r\nplt.ylim((-2,3))\r\nplt.xlabel('i am x')\r\nplt.ylabel('i am y')\r\n\r\n#new_ticks = np.linspace(-1,2,5)\r\n#print(new_ticks)\r\n#plt.xticks(np.linspace(-1,2,5))\r\nplt.yticks([-1,0,1],\r\n [r'$medium\\ \\alpha\\ well$','well','well done'])\r\n\r\n\r\nax = plt.gca()\r\nax.spines['right'].set_color('none')\r\nax.spines['top'].set_color('none')\r\nax.xaxis.set_ticks_position('bottom')\r\nax.yaxis.set_ticks_position('left')\r\nax.spines['bottom'].set_position(('data',0))\r\nax.spines['left'].set_position(('data',0))\r\n\r\n\r\nplt.show()\r\n\r\n\r\n","sub_path":"practicePy/01画图.py","file_name":"01画图.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"48220155","text":"import redis\nfrom casexml.apps.case.exceptions import IllegalCaseId\nfrom corehq.form_processor.backends.sql.dbaccessors import CaseAccessorSQL\nfrom corehq.form_processor.backends.sql.update_strategy import SqlCaseUpdateStrategy\nfrom corehq.form_processor.casedb_base import AbstractCaseDbCache\nfrom corehq.form_processor.exceptions import CaseNotFound\nfrom corehq.form_processor.models import CommCareCaseSQL\n\n\nclass CaseDbCacheSQL(AbstractCaseDbCache):\n case_model_classes = (CommCareCaseSQL,)\n case_update_strategy = SqlCaseUpdateStrategy\n\n def __init__(self, domain=None, strip_history=False, deleted_ok=False,\n lock=False, wrap=True, initial=None, xforms=None):\n super(CaseDbCacheSQL, self).__init__(domain, strip_history, deleted_ok, lock, wrap, initial, xforms)\n if not self.wrap:\n raise ValueError('CaseDbCacheSQL does not support unwrapped models')\n\n def _validate_case(self, case):\n if self.domain and case.domain != self.domain:\n raise IllegalCaseId(\"Bad case id\")\n elif case.is_deleted:\n if not self.deleted_ok:\n raise IllegalCaseId(\"Case [%s] is deleted \" % case.case_id)\n\n def _get_case(self, case_id):\n try:\n if self.lock:\n try:\n case, lock = CommCareCaseSQL.get_locked_obj(_id=case_id)\n except redis.RedisError:\n case = CaseAccessorSQL.get_case(case_id)\n else:\n self.locks.append(lock)\n else:\n case = CaseAccessorSQL.get_case(case_id)\n except CaseNotFound:\n return None\n\n return case\n\n def _iter_cases(self, case_ids):\n return iter(CaseAccessorSQL.get_cases(case_ids))\n\n def get_cases_for_saving(self, now):\n cases = self.get_changed()\n\n for case in cases:\n if case.is_saved():\n modified = CaseAccessorSQL.case_modified_since(case.case_id, case.server_modified_on)\n assert not modified, (\n \"Aborting because the case has been modified\"\n \" by another process. {}\".format(case.case_id)\n )\n case.server_modified_on = now\n return cases\n\n def get_reverse_indexed_cases(self, case_ids):\n return CaseAccessorSQL.get_reverse_indexed_cases(self.domain, case_ids)\n","sub_path":"corehq/form_processor/backends/sql/casedb.py","file_name":"casedb.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"178114266","text":"#!/usr/bin/python\n\nn = int(raw_input().strip())\ncommands = []\nresult = []\n\nfor i in range(n):\n commands.append(raw_input().strip())\n\nfor cmd in commands:\n\n args = cmd.split(' ')\n if len(args) == 3:\n index = int(args[1])\n value = int(args[2])\n elif len(args) == 2:\n value = int(args[1])\n\n if 'insert' in cmd:\n result.insert(index, value) \n elif 'print' in cmd:\n print(result)\n elif 'remove' in cmd:\n result.remove(value)\n elif 'append' in cmd:\n result.append(value)\n elif 'sort' in cmd:\n result.sort()\n elif 'pop' in cmd:\n result.pop()\n elif 'reverse' in cmd:\n result.reverse()\n\n","sub_path":"python/lists/p0.py","file_name":"p0.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"374466576","text":"__author__ = 'arvie'\n\nimport os\nimport sys\nimport scipy.io as spio\n\nsys.path.append('/u/arvie/Deep_Learning_Tutorials/')\n\nfrom Finshed_Code.data import *\nfrom Finshed_Code.MDN import *\nfrom Finshed_Code.NN_Layers import *\nfrom Finshed_Code.RegressionMLP import *\n\n\nremove_data = False\n\n\ndef run_MDN(data, num_artic_values):\n batch_size=20\n rng = np.random.RandomState(1234)\n\n layer_params = MDNParams()\n layer_params.num_components = 3\n layer_params.hidden_layers_n_in = [num_artic_values, num_artic_values]\n layer_params.hidden_layers_n_out = [num_artic_values, num_artic_values]\n layer_params.hidden_layers_activation_types = ['tanh', 'tanh']\n #layer_params.hidden_layers_activation_types = ['relu', 'relu']\n\n #layer_params.hidden_layers_n_in = [10]\n #layer_params.hidden_layers_n_out = [10]\n #layer_params.hidden_layers_activation_types = ['tanh']\n\n learning_rate = 0.005\n L1_reg = 0.00\n L2_reg = 0.01\n n_epochs = 500\n mdn_model = MDN(data, rng, layer_params, batch_size)\n mdn_model.train(learning_rate, L1_reg, L2_reg, n_epochs)\n mdn_test_score = mdn_model.infere()\n\n return mdn_test_score\n\n\ndef run_RegNN(data, num_artic_values):\n\n batch_size=20\n rng = np.random.RandomState(1234)\n srng = T.shared_randomstreams.RandomStreams(rng.randint(123456))\n\n layer_params = RegressionMLPParams()\n layer_params.hidden_layers_n_in = [num_artic_values, num_artic_values]\n layer_params.hidden_layers_n_out = [num_artic_values, num_artic_values]\n #layer_params.hidden_layers_activation_types = ['tanh', 'tanh']\n layer_params.hidden_layers_activation_types = ['relu', 'relu']\n\n #layer_params.hidden_layers_n_in = [10]\n #layer_params.hidden_layers_n_out = [10]\n #layer_params.hidden_layers_activation_types = ['tanh']\n\n layer_params.use_drop_out = True\n layer_params.drop_out_rate = 0.5\n\n model = RegressionMLP(data, rng, srng, layer_params, batch_size)\n model.train(0.005, 0.00, 0.01, 500)\n regNN_test_score = model.infere()\n\n return regNN_test_score\n\n\n\ndef call_both(data_in_path, file):\n print(file)\n\n # load in data\n (linear_inversions, correlations, v1_training, v1_tuning, v1_testing, v2_training, v2_tuning, v2_testing) = load_mat_file(data_in_path+file)\n\n #print(linear_inversions)\n\n #print(len(v1_training[0]))\n #print(len(v2_training[0]))\n #print(type(v1_training))\n\n\n MDN_RMSEs = []\n MDN_RMSEs_validation = []\n RegNN_RMSEs = []\n\n\n # work for each articularoty value\n num_artic_values = v2_training.shape[0]\n for artic_idx in range(num_artic_values):\n y_train = v2_training[artic_idx, :]\n y_test = v2_testing[artic_idx, :]\n Y_valid = v2_tuning[artic_idx, :]\n\n print('STARTING MDN THEANO for artic # '+str(artic_idx))\n data = (v1_training.T, y_train.T, v1_tuning.T, Y_valid.T, v1_testing.T, y_test.T)\n\n mdn_test_score = run_MDN(data, num_artic_values)\n\n MDN_RMSEs.append(mdn_test_score)\n print('Final MDN RMSE for artic # '+str(artic_idx)+' is '+str(mdn_test_score))\n\n\n print('STARTING RegressionNN THEANO for artic # '+str(artic_idx))\n data = (v1_training.T, y_train.T, v1_tuning.T, Y_valid.T, v1_testing.T, y_test.T)\n\n regNN_test_score = run_RegNN(data, num_artic_values)\n\n RegNN_RMSEs.append(regNN_test_score)\n print('Final Reg for artic # '+str(artic_idx)+' is '+str(regNN_test_score))\n\n\n print('Average RMSE for RegNN: '+str(sum(RegNN_RMSEs)/float(len(RegNN_RMSEs))))\n print('Average RMSE for MDN: '+str(sum(MDN_RMSEs)/float(len(MDN_RMSEs))))\n\n\n\n name = file[1:-4]\n\n #path = '/u/arvie/'\n path = data_in_path\n spio.savemat(path+name+'-RegNN_MDN_out.mat', mdict={'RegNN_RMSEs': RegNN_RMSEs, 'MDN_RMSEs': MDN_RMSEs, 'linear_inversions': linear_inversions, 'correlations': correlations})\n\n\n if remove_data:\n print('Removing '+name)\n os.remove(data_in_path+file)\n\n print('Done: '+name)\n\n\nif __name__ == \"__main__\":\n\n sys.path.append('/u/arvie/Deep_Learning_Tutorials/')\n\n #default_example_path = '/ais/clspace5/u/arvie/Parkinsons/Params/subj_independent/ctrls_and_subjs/' #used for testing\n #call_both(default_example_path, DCCA_200_200_tanh_0.001_0.0005_0.9_0.95.mat)\n\n\n\n if len(sys.argv) > 1:\n data_in_path = sys.argv[1]\n else:\n data_in_path = '/ais/clspace5/u/arvie/Parkinsons/Params/subj_independent/ctrls_and_subjs/'\n\n\n for file in os.listdir(data_in_path):\n if file.endswith('.mat'):\n if not file.endswith('-RegNN_MDN_out.mat'):\n print(data_in_path+file)\n try:\n call_both(data_in_path, file)\n except:\n print('Could not do '+file)\n","sub_path":"Finshed_Code/Parkinsons_Experiment_1.py","file_name":"Parkinsons_Experiment_1.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"570685333","text":"#!/usr/bin/env python\n\n\"\"\"\nproc.py: Image Perspective Alignment\n\nFrom the article on: https://www.learnopencv.com/image-alignment-feature-based-using-opencv-c-python/\n\nusage:\n proc --ref \"../imgs/21543780.png\" --img \"../imgs/*.png\" --out \"../aligned/\"\n\n\"\"\"\nimport glob\nimport os\nimport sys\nimport getopt\n\nfrom proc.Align import Align\nfrom proc.Compositor import Compositor\n\n\ndef usage():\n \"\"\"How to call this script\"\"\"\n print(\"proc.py --ref --img --out \")\n\n\ndef get_opt():\n \"\"\"Process Arguments to this script\"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hr:i:o:\", [\"help\", \"ref=\", \"img=\", \"out=\"])\n except getopt.GetoptError as err:\n # print help information and exit:\n print(err) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\n\n reference_filename = None\n images_filename = None\n out_directory = None\n # Process options\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif o in (\"-r\", \"--ref\"):\n reference_filename = a\n elif o in (\"-i\", \"--img\"):\n images_filename = a\n elif o in (\"-o\", \"--out\"):\n out_directory = a\n else:\n assert False, \"unhandled option\"\n # Missing Parameters\n if reference_filename is None or images_filename is None or out_directory is None:\n usage()\n sys.exit()\n\n return reference_filename, images_filename, out_directory\n\n\nif __name__ == '__main__':\n ref_filename, imgs_filename, out_dir = get_opt()\n\n assert os.path.exists(ref_filename), \"Reference image not found\"\n assert os.path.exists(out_dir), \"Output directory does not exist\"\n\n imgs = glob.glob(imgs_filename)\n assert len(imgs) > 0, \"Must have at least one image to convert.\"\n\n compositor = Compositor.get_additive()\n\n aligner = Align(ref_filename, imgs, out_dir, compositor)\n\n aligned_dic = aligner.align()\n\n","sub_path":"img-align/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"365750040","text":"#!/usr/bin/env python3\n\"\"\"Convert a sequence of images into a CloudVolume archive.\n\nThe volume is broken into \"layers\", or small non-overlapping sub-sequences of\nimages that can be added to CloudVolume in parallel.\n\nFunctions\n---------\nparse_args\n Parse command line arguments.\nload_layer\n Load a sequence of images.\ninitialize_cloudvolume\n Create a new CloudVolume archive.\nwrite_layer\n Write a layer to CloudVolume.\nmain\n Create and write data to a new CloudVolume archive.\n\nDependencies\n------------\ncloud-volume\ndill\nmpi4py\nnumpy\nscikit-image\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport re\nimport sys\n\nfrom cloudvolume import CloudVolume\nfrom mpi4py import MPI\nimport numpy as np\nimport skimage.io as io\n\n\nCOMM = MPI.COMM_WORLD\nRANK = COMM.Get_rank()\nSIZE = COMM.Get_size()\n\nLOGGER = logging.getLogger('img_to_cloudvolume.py')\nsyslog = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s %(name)s Rank %(rank)s : %(message)s')\nsyslog.setFormatter(formatter)\nLOGGER.setLevel(logging.INFO)\nLOGGER.addHandler(syslog)\nLOGGER = logging.LoggerAdapter(LOGGER, {'rank': str(RANK)})\n\n\ndef parse_args():\n \"\"\"Parse command line arguments.\"\"\"\n p = argparse.ArgumentParser()\n\n p.add_argument('--input', type=str,\n help='path to the directory holding the images')\n p.add_argument('--output', type=str,\n help='path to write the CloudVolume')\n p.add_argument('--mode', type=str, choices=['image', 'segmentation'],\n default='image', help='write mode for configuring the info file')\n p.add_argument('--ext', type=str, default='.tif',\n help='extension of the images to load')\n p.add_argument('--resolution', type=int, nargs='*', default=[10, 10, 10],\n help='resolution of the dataset')\n p.add_argument('--mip', type=int, default=0,\n help='number of mip levels')\n p.add_argument('--chunk-size', type=int, nargs='*', default=[64, 64, 64],\n help='size of each CloudVolume block file')\n p.add_argument('--z-step', type=int, default=None)\n p.add_argument('--factor', type=int, nargs='*', default=[2, 2, 2],\n help='factor to scale between mip levels')\n p.add_argument('--flip-xy', action='store_true',\n help='pass to transplose the X and Y axes')\n p.add_argument('--memory-limit', type=float, default=10000,\n help='max memory available to CloudVolume')\n p.add_argument('--offset', type=int, nargs='*', default=[0, 0, 0],\n help='offset into the volume from the upper-left corner')\n p.add_argument('--quiet', action='store_true',\n help='pass to disable logging')\n\n return p.parse_args()\n\n\ndef load_layer(imagelist, z_start, z_end):\n \"\"\"Load a sequence of images.\n\n Parameters\n ----------\n imagelist : list of str\n Sorted list of filepaths pointing to images to load.\n z_start : int\n The index of the first image in the layer.\n\n Returns\n -------\n layer : numpy.ndarray\n The (n, h, w) ndarray containing the n sequential images in the layer.\n \"\"\"\n # Set up the array for preallocation\n layer = None\n\n # Load each image in the layer and insert it into the array.\n for i, img in enumerate(imagelist[z_start:z_end]):\n img = io.imread(img,\n plugin='tifffile' if '.tif' in os.path.splitext(imagelist[0])[1] else None)\n if layer is None:\n layer = np.zeros((int(z_end - z_start),) + img.shape,\n dtype=img.dtype)\n layer[i] += img\n LOGGER.info('Loaded images with shape {}.'.format(layer.shape))\n return layer\n\n\ndef initialize_cloudvolume(path, mode, dtype, resolution, offset, volume_size,\n chunk_size, mip, factor):\n \"\"\"Create a new CloudVolume archive.\n\n Parameters\n ----------\n path : str\n Filepath to the location to write the archive.\n mode : {'image','segmentation'}\n Write mode for configuring the info file.\n dtype : str\n The data type of the images to write.\n resolution : tuple of int\n Imaging resolution of the images in each dimension.\n offset : tuple of int\n Offset within the volume to the start of the archive.\n volume_size : tuple of int\n The dimensions of the volume in pixels.\n chunk_size : tuple of int\n The size of each CloudVolume block in pixels.\n mip : int\n The number of mip levels to include.\n factor : tuple of int\n The factor of change in each dimension across mip levels.\n\n Returns\n -------\n cv_args : dict\n The parameters needed to re-access the CloudVolume archive.\n \"\"\"\n # Set the parameters of the info file.\n if mode == 'image':\n info = CloudVolume.create_new_info(\n num_channels=1,\n layer_type=mode,\n data_type=str(dtype),\n encoding='raw',\n resolution=resolution,\n voxel_offset=offset,\n volume_size=volume_size,\n chunk_size=chunk_size,\n max_mip=0,\n factor=factor\n )\n elif mode == 'segmentation':\n info = CloudVolume.create_new_info(\n num_channels=1,\n layer_type=mode,\n data_type='uint32',\n encoding='compressed_segmentation',\n resolution=resolution,\n voxel_offset=offset,\n volume_size=list(volume_size),\n chunk_size=chunk_size,\n max_mip=0,\n factor=factor\n )\n else:\n raise ValueError('Cannot write layer of type {}. Must be one of [\"image\", \"segmentation\"]')\n\n # Set up and initialize the CloudVolume object\n cv_args = dict(\n bounded=True, fill_missing=True, autocrop=False,\n cache=False, compress_cache=None, cdn_cache=False,\n progress=False, info=info, provenance=None,\n compress=(mode == 'segmentation'), non_aligned_writes=True,\n parallel=1)\n\n if mode == 'segmentation':\n for i in range(1, mip + 1):\n info['scales'][i]['compressed_segmentation_block_size'] = \\\n info['scales'][0]['compressed_segmentation_block_size']\n\n cv = CloudVolume(path, mip=0, **cv_args)\n\n # Create the info file.\n LOGGER.info('Initializing image layer with config {}'.format(cv_args))\n cv.commit_info()\n return cv_args\n\n\ndef write_layer(path, mode, layer, flip_xy, z_start, mip, factor):\n \"\"\"Write a layer to CloudVolume.\n\n Parameter\n ---------\n path : str\n Filepath to the location to write the archive.\n layer : numpy.ndarray\n Image data to write to the archive.\n flip_xy : bool\n If True, order ``layer`` as [Y, X, Z]. Otherwise, order ``layer`` as\n [X, Y, Z].\n z_start\n The starting index of ``layer`` within the archive.\n mip\n The number of mip levels to compute.\n factor\n The factor by which to reduce each mip level along each dimension.\n cv_args\n Arguments used to access the CloudVolume archive.\n \"\"\"\n # Transpose the axes to match the CloudVolume order\n if flip_xy:\n layer = np.transpose(layer, axes=[1, 2, 0])\n else:\n layer = np.transpose(layer, axes=[2, 1, 0])\n\n cv_args = dict(\n bounded=True, fill_missing=True, autocrop=False,\n cache=False, compress_cache=None, cdn_cache=False,\n progress=False, info=None, provenance=None,\n compress=(mode == 'segmentation'), non_aligned_writes=True,\n parallel=1)\n\n # Set the volume for each mip level\n for m in range(1):\n LOGGER.info('Writing images {}-{} to MIP level {}'.format(z_start, z_start + layer.shape[-1], mip))\n # Access the CloudVolume\n cv = CloudVolume(path, mip=m, **cv_args)\n\n # Compute the index of this layer in the CloudVolume archive\n offset = cv.mip_voxel_offset(m)\n step = np.power(np.array(factor), m)\n cv_z_start = int(z_start // step[2] + offset[2])\n cv_z_end = int(min(cv_z_start + layer.shape[-1], cv.shape[-2]))\n\n # Set the layer\n cv[:, :, cv_z_start:cv_z_end] = layer\n\n # Reduce the size of the layer to match the next mip level\n layer = layer[::factor[0], ::factor[1], ::factor[2]]\n\n\ndef img2cv(input, output, mode='image', ext='.tif', resolution=(10, 10, 10),\n mip=0, chunk_size=(64, 64, 64), z_step=None, factor=(2, 2, 2),\n flip_xy=False, memory_limit=10000, offset=(0, 0, 0), quiet=False):\n \"\"\"Create and write data to a new CloudVolume archive.\n\n Parameters\n ----------\n input : str or list of str\n The path to a file containing images or a list of image filepaths.\n output : str\n Path to create the CloudVolume layer at.\n mode : {'image','segmentation'}\n Type of CloudVolume layer to create.\n ext : str\n The image extension to search for if ``input`` is a directory of\n images.\n resolution : tuple of int\n The imaging resolution of the images (e.g., in microns, nanometers).\n chunk_size : tuple of int\n The size of each block file.\n z_step : int, optional\n The step between entries along the z-axis.\n factor : tuple of int\n Scale factor for MIP levels.\n flip_xy : bool\n If True, reorder axes to be (Y, X, Z) instead of (X, Y, Z).\n memory_limit : int\n Max amount of memory allowed to be used by CloudVolume.\n offset : tuple of int\n Offset of this volume from the origin if handling a subvolume.\n quiet : bool\n If True, suppress informational logs.\n \"\"\"\n if quiet:\n LOGGER.logger.removeHandler(syslog)\n noop = logging.NullHandler()\n LOGGER.logger.addHandler(noop)\n\n outpath = os.path.abspath(output)\n\n if os.path.isdir(outpath) and mode not in os.path.basename(outpath):\n outpath = outpath + '/{}'.format(mode)\n\n if not re.search(r'^file://.+$', outpath):\n outpath = 'file://' + outpath\n\n # On rank 0, initialize the CloudVolume info file, and load in the list of\n # images to insert into the archive.\n if RANK == 0:\n if isinstance(input, str):\n imagelist = sorted(glob.glob(os.path.join(input, '*' + ext)))\n elif isinstance(input, list):\n imagelist = input\n img = io.imread(imagelist[0],\n plugin='tifffile' if '.tif' in os.path.splitext(imagelist[0])[1] else None)\n dtype = img.dtype\n volume_shape = (len(imagelist),) + img.shape[:2]\n del img\n LOGGER.info('Converting {} with shape {} to CloudVolume'.format(input, volume_shape))\n LOGGER.info('Initialized CloudVolume image layer at {}'.format(outpath))\n initialize_cloudvolume(\n outpath,\n mode,\n dtype,\n resolution,\n offset,\n volume_shape[::-1],\n chunk_size,\n mip,\n factor)\n LOGGER.info('Broadcasting image list {}.'.format(imagelist))\n else:\n imagelist = None\n volume_shape = None\n\n # Send the CloudVolume parameters and list of images to all MPI ranks.\n imagelist = COMM.bcast(imagelist, root=0)\n volume_shape = COMM.bcast(volume_shape, root=0)\n\n # Iterate over layers of the volume. Each rank will load and write one\n # layer at a time. If there are fewer ranks than layers, increment to\n # n_ranks + rank and load the layer at that index.\n layer_idx = int(RANK * chunk_size[-1])\n while layer_idx < volume_shape[0]:\n # Compute the index of the first image in this layer, including any\n # offset from the volume origin.\n layer_shape = int(min(layer_idx + chunk_size[-1], len(imagelist)))\n LOGGER.info('Loading images {}-{}.'.format(layer_idx, layer_idx + layer_shape))\n layer = load_layer(imagelist, layer_idx, layer_shape)\n\n if mode == 'segmentation':\n layer = layer.astype(np.uint32)\n\n # Write the layer to the archive.\n LOGGER.info('Writing images {}-{}'.format(layer_idx, layer_idx + layer_shape))\n write_layer(\n outpath,\n mode,\n layer,\n flip_xy,\n layer_idx,\n mip,\n factor)\n\n # Increment to the next known layer that does not overlap with any\n # other rank.\n layer_idx += int(SIZE * chunk_size[-1])\n LOGGER.info('Done')\n\n\ndef main():\n \"\"\"Command line entry point to convert images to a CloudVolume layer.\"\"\"\n args = parse_args()\n img2cv(args.input,\n args.output,\n mode=args.mode,\n ext=args.ext,\n resolution=args.resolution,\n mip=args.mip,\n chunk_size=args.chunk_size,\n z_step=args.z_step,\n factor=args.factor,\n flip_xy=args.flip_xy,\n memory_limit=args.memory_limit,\n offset=args.offset,\n quiet=args.quiet)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"happyneuron/io/img_to_cloudvolume.py","file_name":"img_to_cloudvolume.py","file_ext":"py","file_size_in_byte":13059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"261541034","text":"from finansez.api.responses.base import BaseResponse\n\n\nclass StatByCategoryResponse(BaseResponse):\n def __init__(self, data):\n self.data = data\n \"\"\":type: list\"\"\"\n\n def get_response(self):\n result = {\n 'income': [],\n 'spend': [],\n }\n\n for row in self.data:\n target = 'income' if row['is_income'] else 'spend'\n result[target].append({\n 'id': str(row['id']),\n 'name': row['name'],\n 'sum': float(row['sum'].normalize())\n })\n\n return result\n","sub_path":"finansez/finansez/api/responses/stat_by_category.py","file_name":"stat_by_category.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"425083857","text":"import numpy as np\nimport scipy.io as sio\nimport math\n\n# define how much features we select\nFEATURE_NUM = 300\n\ndef pre_process(matrix):\n F = matrix.T\n variances = {} # select the top FEATURE_NUM variances of 784 features\n for i in range(len(F)): #compute variances of each feature\n variances[i] = np.var(F[i])\n variances = sorted(variances.items(), key=lambda x: x[1], reverse=True)\n variances = variances[:FEATURE_NUM]\n\n top_F = np.array(F[variances[0][0]]) # get the data of selected features\n for i in range(1, len(variances)):\n item = np.array(F[variances[i][0]])\n top_F = np.vstack((top_F, item))\n\n normed_F = (top_F[0] - top_F[0].mean(axis=0)) / top_F[0].std(axis=0) # normalize each of the features to have mean zero and variance one\n for i in range(1, len(top_F)):\n item = (top_F[i] - top_F[i].mean(axis=0)) / top_F[i].std(axis=0)\n normed_F = np.vstack((normed_F, item))\n\n return normed_F.T # shape: (data size) * FEATURE_NUM\n\n\ndef data_split_index(labelset,label): # split original data with 10 labels, recording the indexes of data in each label\n data_given_label = []\n count = 0\n for i in range(len(labelset)):\n if labelset[i]==label:\n data_given_label.append(i)\n count = count +1\n return data_given_label, count\n\n\nclass NbClassifier(object):\n\n def __init__(self, training_filename, split_size):\n data = sio.loadmat(training_filename)\n self.datas = np.array(data['X']) # combine datas[X] and labels[Y]\n self.labels = np.array(data['Y'])\n self.dataset = np.concatenate((self.datas, self.labels), axis=1)\n np.random.shuffle(self.dataset) # shuffle input data and label together\n self.datas = self.dataset[:,:784] # split datas[X] and labels[Y]\n self.labels = self.dataset[:,784]\n\n self.features = pre_process(self.datas) # shape: 10000 * FEATURE_NUM\n self.train_images = self.features[:split_size,:] # shape: SPLIT_SIZE * FEATURE_NUM\n self.test_images = self.features[split_size:,:] # shape: (10000-SPLIT_SIZE) * FEATURE_NUM\n self.train_labels = self.labels[:split_size,]\n self.test_labels = self.labels[split_size:,]\n\n self.class_priors = {} # record the ten classes and their class prior\n self.multi_mean = {} # record the means of ten gaussian distributions\n self.multi_cov = {} # record the covariances of ten gaussian distributions\n self.part_1 = {} # record the ten first-parts of computing multivariate gaussian probability density\n self.inv = {} # record the inverse of the ten covariances\n\n self.train(split_size)\n\n\n def train(self,split_size):\n class_split = {}\n multi_mean = {}\n multi_cov = {}\n part_1 = {}\n inv = {}\n\n I = np.matrix(np.identity(FEATURE_NUM), copy=False) # identity matrix which will be added to covariance\n\n for k in range(10): # for each of the ten classes\n data_index, count = data_split_index(self.train_labels,k)\n class_split[(k,count)] = data_index\n self.class_priors[k] = count/split_size # compute class prior\n\n data = self.train_images[data_index[0]] # combine all training data from same class\n for i in range(1,len(data_index)):\n data = np.vstack((data,self.train_images[data_index[i]]))\n\n multi_mean[k] = np.mean(data, axis=0) # compute mean for each class\n\n cov = np.zeros(shape=(FEATURE_NUM,FEATURE_NUM)) # compute covariance for each class\n for j in range(len(data)):\n item_1 = (data[j]-multi_mean[k]).reshape(FEATURE_NUM,1)\n item_2 = (data[j]-multi_mean[k]).reshape(1,FEATURE_NUM)\n cov = cov + np.dot(item_1,item_2)\n multi_cov[k] = cov/count + 0.8 * I # add an identity matrix to covariance\n part_1[k] = 1/math.sqrt(np.linalg.det(multi_cov[k])) # compute the first part of computing probability density\n inv[k] = np.linalg.inv(multi_cov[k]) # compute the inverse of covariance\n\n\n self.multi_mean = multi_mean\n self.multi_cov = multi_cov\n self.part_1 = part_1\n self.inv = inv\n\n\n def predict(self):\n result = [] # record the prediction labels\n same = 0 # record the number of true predictions\n\n for k in range(len(self.test_images)): # for each of the test data, compute ten probabilities P[X|Y]*P[Y], record the maximum of them through result[]\n predictions = {}\n for i in range(10):\n mid_res = np.dot((self.test_images[k]-self.multi_mean[i]).reshape(1,FEATURE_NUM),self.inv[i])\n mid_res = np.dot(mid_res,(self.test_images[k]-self.multi_mean[i]).reshape(FEATURE_NUM,1))\n part_2 = math.exp(-0.5 * mid_res)\n p = self.part_1[i] * part_2\n p = p * self.class_priors[i]\n predictions[i] = p\n\n result.append(max(predictions.keys(), key=(lambda k: predictions[k])))\n\n for i in range(len(self.test_labels)): # compute accuracy\n if result[i] == int(self.test_labels[i]):\n same = same + 1\n return same/len(self.test_labels)\n\n\n\nif __name__ == \"__main__\":\n classifier = NbClassifier('hw1data.mat',8000)\n classifier.predict()\n\n\n\n","sub_path":"p5_1.py","file_name":"p5_1.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"581053547","text":"\nfrom sexp import SExp, SAtom\nfrom builtin import cons\nfrom errors import VividSyntaxError\n\n\nclass Parser:\n def __init__(self, lexer):\n \"\"\"tokens are feteched throw a lexer\"\"\"\n self.lexer = lexer\n\n def form_sexp(self):\n tok = self.lexer.tokenize()\n if tok.literal == '(':\n stack = []\n while self.lexer.peek().literal != ')':\n stack.append(self.form_sexp())\n self.lexer.tokenize() # reap the ')' off\n s = SExp()\n while len(stack) > 0:\n s = cons(stack.pop(), s)\n return s\n elif tok.literal == ')':\n raise VividSyntaxError('Unexpected \")\" at line %d, col %d' % (tok.lineno, tok.colno))\n else:\n return SAtom(tok.literal)\n\n\n","sub_path":"lib/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"275663797","text":"import datetime\nimport time\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport itertools\nimport urllib\nimport urllib.robotparser as robotparser\nimport urllib.parse as urlparse\n#from multiprocessing import Queue\n\n\n\n\n# #检查网站所用技术\n# pip install builtwith\n# import builtwith\n# builtweth.parse('...')\n#寻找网站所有者\n#pip install python-whois\n#import whois\n#print(whois.shois('appspot.com')\n\n#proxies={\"http\":\"http://10.10.1.10:3128\",\n # \"https\":\"http://10.10.1.10:1080\"}\n\ndef download(url,num_retries,proxies=None,headers=None): #下载函数\n\n print('Downloading:',url)\n try:\n r=requests.get(url,proxies=proxies,headers=headers)\n if r.status_code!=200:\n r.raise_for_status()\n except requests.HTTPError as e:\n print('Download error code:',r.status_code)\n if num_retries>0:\n if 500 <=r.status_code<600: #5xx,服务器错误\n return download(url,num_retries-1)\n r=None\n return r\n\n# r=download('http://example.webscraping.com/sitemap.xml')\n# print(r.text)\n\ndef crawl_sitemap(url): #1,网站地图爬虫\n sitemap=download(url).text\n soup=BeautifulSoup(sitemap,'xml')\n # print(soup)\n links=soup.findAll('loc')\n\n\n for link in links:\n\n html=download(link.string) #得到标签里字符串\n return\n# crawl_sitemap('http://example.webscraping.com/sitemap.xml')\ndef crawl_iditer(): #2,id遍历爬虫\n max_errors=5 #最大错误次数\n num_errors=0 #当前错误次数\n for page in itertools.count(1): #itertools.count 无限迭代\n url='http://example.webscraping.com/view/-%d' % page #字符串匹配\n html=download(url)\n if html is None:\n num_errors+=1\n if num_errors==max_errors:\n break\n else:\n # success-can scrape the result\n num_errors=0\n\n#crawl_iditer()\n\ndef link_crawler(seed_url,link_regex=None,delay=5,max_depth=-1,max_urls=-1,user_agent='wswp',headers=None,\n proxy=None,num_retries=1,scrape_callback=None): #3,链接爬虫\n \"\"\"Crawl from the given seed URL following links matched by link_regex\"\"\"\n\n # the queue of URL's that still need to be crawled\n ######## crawl_queue=Queue.deque([seed_url]) #deque:双向队列\n\n # 记录爬过的url和其深度\n seen={seed_url:0}\n rp=get_robots(seed_url)\n # rp=robotparser.RobotFileParser(seed_url)\n throttle=Throttle(delay)\n headers=headers or {}#传值,则等于headers,不传值则等于{}\n crawl_queue=[seed_url]\n num_urls=0\n #seen=set(crawl_queue)#避免重复下载\n # depth=seen[url]\n\n if user_agent:\n headers['User-agent']=user_agent\n while crawl_queue:\n url=crawl_queue.pop() #移除最后一个元素,返回元素值\n if rp.can_fetch(user_agent,url):\n throttle.wait(url)\n #html是response对象\n html=download(url,num_retries=num_retries,proxies=proxy,headers=headers) # 下载网页\n # print('htmlresponse',html)\n links=[]\n if scrape_callback:\n links.extend(scrape_callback(url,html) or [])\n depth=seen[url]\n if depth!=max_depth:\n if link_regex:\n print('linkregex',link_regex)\n links.extend(link for link in get_links(html.text) if re.match(link_regex,link))\n print('linksmatched',links)\n # for link in get_links(html): #提取网页中的链接,加入queue\n # if re.match(link_regex,link):\n # link=urllib.urlparse.urljoin(seed_url,link ) #url组合\n\n for link in links:\n link=normalize(seed_url,link)\n if link not in seen:\n #seen.add(link)\n seen[link]=depth+1\n if same_domain(seed_url,link):\n crawl_queue.append(link)\n print('queueappend',link)\n num_urls+=1\n # print('numurls',num_urls)\n if num_urls==max_urls:\n break\n else:\n print('Blocked by robots.txt',url)\ndef get_links(html):\n strhtml=str(html)\n print('strhtml',strhtml)\n # 从网页提取所有链接的正则\n webpage_regex=re.compile(']+href=[\"\\'](.*?)[\"\\']',re.IGNORECASE)\n #网页中所有链接的list\n print('links',webpage_regex.findall(strhtml))\n return webpage_regex.findall(strhtml)\n\n# import urllib.robotparser as robotparser\n# rp=robotparser.RobotFileParser()\n# rp.set_url('http://example.webscraping.com/robots.txt')\n# rp.read()\n# url='http://example.webscraping.com'\n# user_agent='BadCrawler'\n# rp.can_fetch(user_agent,url)\n# user_agent='GoodCrawler'\n# rp.can_fetch(user_agent,url)\n\nclass Throttle:\n #下载之间延时\n def __init__(self,delay):\n self.delay=delay\n #timestamp of when a domain was last accessed\n self.domains={} #字典{域名:时间}\n def wait(self,url):\n domain=urlparse.urlparse(url).netloc #地址or域名\n last_accessed=self.domains.get(domain)#得到上次访问时间\n\n if self.delay>0 and last_accessed is not None:\n sleep_secs=self.delay-(datetime.datetime.now()-last_accessed).seconds# 如果间隔过小则sleep\n if sleep_secs>0:\n time.sleep(sleep_secs) #sleep,推迟执行\n self.domains[domain]=datetime.datetime.now()#记录域名访问的时间\n#\n# throttle=Throttle(delay)\n# throttle.wait(url)\n# result=download(...)\n\ndef normalize(seed_url,link):\n #urldefrag:# 将url分解成去掉fragment的新url和去掉的fragment的二元组\n link,_=urlparse.urldefrag(link) #remove hash to avoid duplicates\n return urlparse.urljoin(seed_url,link)\n\ndef get_robots(url):\n rp=robotparser.RobotFileParser()\n rp.set_url(urlparse.urljoin(url,'/robots.txt'))\n rp.read()\n return rp\n\ndef same_domain(url1,url2):\n return urlparse.urlparse(url1).netloc==urlparse.urlparse(url2).netloc\n\n# __name__ 是当前模块名,当模块被直接运行时模块名为 __main__ 。这句话的意思就是,\n# 当模块被直接运行时,以下代码块将被运行,当模块是被导入时,代码块不被运行。\nif __name__ == '__main__':\n link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, user_agent='BadCrawler')\n link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, max_depth=1, user_agent='GoodCrawler')\n # delay = 5, max_depth = -1, max_urls = -1, user_agent = 'wswp', headers = None, proxy = None, num_retries = 1)\n\n\n\n\n","sub_path":"1/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"105985089","text":"import sys\r\n\r\ninput = list(map(int, input().strip().split(' ')))\r\nresult = []\r\nflag = True\r\n\r\nwhile(flag == True):\r\n if(len(input)):\r\n result.append(input.pop())\r\n # print(input.pop())\r\n input.pop()\r\n else:\r\n flag = False\r\n\r\n \r\nfor i in result:\r\n print(i, sep=' ', end=' ', flush=True)","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"641767864","text":"from messages_pb2 import *\r\nimport constants as C\r\n\r\nclass arduino:\r\n\r\n\tdef __init__(self, port_num):\r\n\t\tself.ser1 = serial.Serial('COM'+port_num, C.BAUDRATE) \r\n\r\n\r\n\tdef send(self, data):\r\n\t\tself.ser1.write(str(data).encode())\r\n\r\n\tdef read(self):\r\n\t\tmessage = self.ser1.readline()\r\n\r\n\t\tpbm = BaseToBoat.parseFromString(message)\r\n\r\n\t\tcase = pbm.WhichOneOf('command')\r\n\t\tif case == 'rudder' :\r\n\t\t\tresult = [\"rudder \" + pbm.RudderCommand.position]\r\n\t\telif case == 'sail' :\r\n\t\t\tresult = [\"sail \" + pbm.SailCommand.position]\r\n\t\telif case == 'skipper' :\r\n\t\t\tresult = [\"sail \" + pbm.SkipperCommand.sailPosition, \"rudder \" + pbm._SkipperCommand.rudderPosition]\r\n\t\telif case == 'mode' :\r\n\t\t\tresult = [\"mode \" + pbm.Mode.mode]\r\n\r\n\t\treturn result\r\n\r\n","sub_path":"Old Stuff/transceiver.py","file_name":"transceiver.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"106741842","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\nclass PDA(object):\n\n # Product Variables\n IDs = []\n types = []\n products = []\n quantities = []\n volumes = []\n cost_benefit_factors = []\n prices = []\n\n # Class Variables\n store_id = \"501\"\n option = \"0\"\n\n def __init__(self):\n # It doesn't initialize anything LOL\n pass\n\n def set_store(self):\n # Will set the Class Variable \"store_id\" to your desired store\n import os\n import time\n\n stores = {\n 501: \"Loja Online\",\n 81: \"Rua Clodomiro Amazonas 955 - (Itaim Bibi)\",\n 141: \"Av. Washington Luís 3919 - (Vila Mascote)\",\n 10: \"Rua Teodoro Sampaio 1933 - (Pinheiros)\",\n 122: \"Av. Magalhães de Castro 6118 - (Real Parque)\",\n 361: \"Rua Joaquim Floriano 24 - (Joaquim Floriano)\",\n 261: \"Rua Bairi 435 - (Cerro Cora)\",\n 461: \"Rua Prof. Serafim Orlandi 299 - (Ricardo Jafet)\",\n 641: \"Av. Eng Armando 2022 - (Jabaquara)\",\n 5: \"Av. Professor Francisco Morato 2385 - (Morumbi)\",\n 941: \"Al. Ministro Rocha Azevedo 1136 - (Oscar Freire)\",\n 841: \"Praça Panamericana 217 - (Panamericana)\",\n 101: \"Al. Gabriel Monteiro Da Silva 1351 - (Gabriel Monteiro)\",\n 1061: \"Av. Reg Feijo 1425 - (Analia Franco)\",\n 1161: \"Rua Domingos De Morais 486 - (Ana Rosa)\",\n }\n os.system(\"cls\")\n print(\"These are the stores in SP!\\n\")\n for key in stores:\n print(\"ID: {} - Address: {}\".format(key, stores[key]))\n print(\"\")\n x = input(\"Type you requeried store ID: \")\n if int(x) in stores:\n self.store_id = x\n else:\n os.system(\"cls\")\n print(\"Invalid ID!\\nTry it again\")\n time.sleep(2)\n self.set_store()\n\n def set_option(self):\n import time\n import os\n\n print(\"What kind of liquor do you want?\\n\")\n print(\"1 - Standard Beers\")\n print(\"2 - Special Beers\")\n print(\"3 - Vines\")\n print(\"4 - Vodkas, Cachaças And Sakes\")\n print(\"A - All kinds :)\")\n x = input(\"\\nPlease Input your option: \")\n if x in [\"1\", \"2\", \"3\", \"4\", \"A\", \"a\"]:\n self.option = x.upper()\n else:\n os.system(\"cls\")\n print(\"Invalid Option!\\nTry it again\")\n time.sleep(2)\n self.set_option()\n\n def get_infos(self, option):\n import requests\n import json\n\n urls = {\n \"1\": r\"https://api.gpa.digital/pa/products/list/secoes/C4215/cervejas?storeId=1161&qt=12&s=&ftr=facetSubShelf_ss%3A4215_Cervejas&p=1&rm=>=list&isClienteMais=true\",\n \"2\": r\"https://api.gpa.digital/pa/products/list/secoes/C4215/cervejas-especiais?storeId=1161&qt=12&s=&ftr=facetSubShelf_ss%3A4215_Cervejas%20Especiais&p=1&rm=>=list&isClienteMais=true\",\n \"3\": r\"https://api.gpa.digital/pa/products/list/secoes/C4215/?storeId=1161&qt=12&s=&ftr=facetSubShelf_ss:4215_Vinhos%20e%20Espumantes&p=1&rm=>=grid&isClienteMais=true\",\n \"4\": r\"https://api.gpa.digital/pa/products/list/secoes/C4215/vodka-cachacas-e-saques?storeId=1161&qt=12&s=&ftr=facetSubShelf_ss%3A4215_Vodka%2C%20Cacha%C3%A7as%20e%20Saqu%C3%AAs&p=1&rm=>=list&isClienteMais=true\",\n }\n\n x = urls[option].replace(\"storeId=1161\", \"storeId=\" + self.store_id)\n new_urls = []\n for i in range(1, 501):\n new_urls.append(x.replace(\"&p=1\", \"&p=\" + str(i)))\n for i in new_urls:\n HTML_page = requests.get(i).text\n number_elements = json.loads(\n HTML_page)[\"content\"][\"numberOfElements\"]\n if number_elements == 0:\n break\n else:\n for x in range(0, number_elements):\n self.IDs.append(json.loads(HTML_page)[\n \"content\"][\"products\"][x][\"id\"])\n product_type = json.loads(\n HTML_page)[\"content\"][\"products\"][x][\"shelfList\"][0][\"name\"]\n if product_type == \"Bebidas\":\n self.types.append(\n json.loads(HTML_page)[\"content\"][\"products\"][x][\"shelfList\"][1][\"name\"])\n else:\n self.types.append(product_type)\n self.products.append(\n str(json.loads(HTML_page)[\"content\"][\"products\"][x][\"name\"]).strip())\n quantity = json.loads(HTML_page)[\n \"content\"][\"products\"][x][\"totalQuantity\"]\n if quantity == 0:\n self.quantities.append(1)\n else:\n self.quantities.append(quantity)\n self.prices.append(\n round(\n json.loads(HTML_page)[\"content\"][\"products\"][x][\"currentPrice\"],\n 2))\n\n def get_volume(self, Product):\n import re\n\n volumeRegex = re.compile(\n \"(\\d+)([ ]*)(ml|litro|litros)+\", re.IGNORECASE)\n result = volumeRegex.findall(Product)\n if len(result) > 0:\n volume = result[0][0]\n volume = int(volume)\n if \"litro\" in (item.lower() for item in result[0]):\n volume *= 1000\n else:\n volume = \"Unavailable\"\n return volume\n\n def export_xlsx(self):\n # import Workbook object from Openpyxl\n from openpyxl import Workbook\n\n wb = Workbook()\n ws = wb.active\n self.spreed_sheet_name = input(\"Export Excel file name: \")\n\n ws.append([\"IDs\", \"Types\", \"Products\",\n \"Quantities\", \"Volumes\", \"prices\"])\n\n for i in range(0, len(self.IDs)):\n ws.append([self.IDs[i],\n self.types[i],\n self.products[i],\n self.quantities[i],\n self.get_volume(self.products[i]),\n self.prices[i]])\n wb.save(self.spreed_sheet_name + \".xlsx\")\n\n def make_cost_benefit(self):\n # makes an array of quantity * volume / price\n for i in range(0, len(self.IDs)):\n try:\n self.cost_benefit_factors.append(\n self.quantities[i] * self.volumes[i] / self.prices[i])\n except BaseException:\n self.cost_benefit_factors(\"Unavailable\")\n\n def run(self):\n import os\n import threading\n\n if self.option == \"1\":\n os.system(\"cls\")\n print(\"Getting the information.....\")\n print(\"Please wait for it!\")\n self.get_infos(self.option)\n elif self.option == \"2\":\n os.system(\"cls\")\n print(\"Getting the information.....\")\n print(\"Please wait for it!\")\n self.get_infos(self.option)\n elif self.option == \"3\":\n os.system(\"cls\")\n print(\"Getting the information.....\")\n print(\"Please wait for it!\")\n self.get_infos(self.option)\n elif self.option == \"4\":\n os.system(\"cls\")\n print(\"Getting the information.....\")\n print(\"Please wait for it!\")\n self.get_infos(self.option)\n elif self.option == \"a\" or self.option == \"A\":\n os.system(\"cls\")\n print(\"Getting the information.....\")\n print(\"Please wait for it!\")\n self.get_infos(\"1\")\n self.get_infos(\"2\")\n self.get_infos(\"3\")\n self.get_infos(\"4\")\n # It actually doesn't work because it append out of order :(\n\n #t1 = threading.Thread(target=self.get_infos, args=(\"1\"))\n #t2 = threading.Thread(target=self.get_infos, args=(\"2\"))\n #t3 = threading.Thread(target=self.get_infos, args=(\"3\"))\n #t4 = threading.Thread(target=self.get_infos, args=(\"4\"))\n # t1.start()\n # t2.start()\n # t3.start()\n # t4.start()\n # t1.join()\n # t2.join()\n # t3.join()\n # t4.join()\n","sub_path":"OPP.py","file_name":"OPP.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"650416877","text":"\"\"\"Tests checks module.\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport unittest\n\nimport checks\n\n\nclass TestChecks(unittest.TestCase):\n\n def test_verify_and_extract_time_not_success(self):\n \"\"\"Tests extract the cpu model name.\"\"\"\n smi_test = 'unittest_files/10_mixed_results/result_7.txt'\n smi_test = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), smi_test)\n sub_check = checks.SubmissionChecks()\n dt, start_time = sub_check.verify_and_extract_time(smi_test, 'closed',\n 'ncf')\n self.assertEqual(dt, checks.INFINITE_TIME)\n self.assertEqual(start_time, 1541638706.6702664)\n\n def test_verify_and_extract_time_success(self):\n \"\"\"Tests extract the cpu model name.\"\"\"\n smi_test = 'unittest_files/10_mixed_results/result_2.txt'\n smi_test = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), smi_test)\n sub_check = checks.SubmissionChecks()\n dt, start_time = sub_check.verify_and_extract_time(smi_test, 'closed',\n 'ncf')\n self.assertEqual(dt, 210.3895456790924)\n self.assertEqual(start_time, 1541635651.95072)\n\n def test_add_result(self):\n \"\"\"Tests adding result to metadata dict.\"\"\"\n sub_check = checks.SubmissionChecks()\n meta = {}\n meta['entry_name'] = {}\n meta['entry_name']['result_name'] = {}\n sub_check._add_result(meta['entry_name']['result_name'],\n 1,\n 10.11,\n 343434343.91)\n result = meta['entry_name']['result_name'][1]\n self.assertEqual(result['dt'], 10.11)\n self.assertEqual(result['start_time'], 343434343.91)\n\n def test_sort_results(self):\n \"\"\"tests sorting results.\"\"\"\n results_dict = []\n results_dict.append(self._create_result_dict(23, 1.993))\n results_dict.append(self._create_result_dict(55, 19.993))\n results_dict.append(self._create_result_dict(1, 0.993))\n results_dict.append(self._create_result_dict(99, 999991.993))\n\n sub_check = checks.SubmissionChecks()\n sorted_results = sub_check._sorted_results(results_dict)\n self.assertEqual(sorted_results, [1, 23, 55, 99])\n\n def _create_result_dict(self, dt, start_time):\n result = {}\n result['dt'] = dt\n result['start_time'] = start_time\n return result\n","sub_path":"compliance/verify_submission/mlperf_submission_helper/checks_test.py","file_name":"checks_test.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"286900069","text":"\nfrom SR02 import SR02_Ultrasonic as Ultrasonic_Sensor\n\nimport front_wheels\nimport rear_wheels\nimport time\n\nif __name__ == '__main__':\n\n try:\n\n # Example Of Front Servo Motor Control\n\n direction_controller = front_wheels.Front_Wheels(db='config')\n\n direction_controller.turn_straight()\n time.sleep(1)\n\n # Example Of Real Motor Control\n\n driving_controller = rear_wheels.Rear_Wheels(db='config')\n\n driving_controller.ready()\n driving_controller.forward_with_speed(50)\n time.sleep(1)\n driving_controller.stop()\n time.sleep(1)\n driving_controller.backward_with_speed(50)\n time.sleep(1)\n\n driving_controller.stop()\n driving_controller.power_down()\n\n # Example of Ultrasonic Sensor\n\n distance_detector = Ultrasonic_Sensor.Ultrasonic_Avoidance(35)\n\n for i in range(10):\n distance = distance_detector.get_distance()\n print(\"Distance is \", distance)\n time.sleep(1)\n\n except KeyboardInterrupt:\n back.stop()\n back.power_down()\n","sub_path":"car_control_example.py","file_name":"car_control_example.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"235257803","text":"\"\"\"\nMy shop calculator\nA shop requires a small program that would allow them to quickly work out \nthe total price for a number of items, each with different prices.\n\nThe program allows the user to enter the number of items and the price of \neach different item. Then the program computes and displays the total price \nof those items.\n\nIf the total price is over $100, then a 10% discount is applied to that total \nbefore the amount is displayed on the screen.\n\nThe output should look something like (bold text represents user input)\n\"\"\"\n\n# My variables\nnum_items = 0\ntot = 0\n\n# While loop true at the begining to check items number\nwhile num_items <= 0:\n print(\"Please a valid number of items\")\n num_items = int(input(\"Number of items: \"))\n# Loop for for asking the price of every item\nfor i in range(num_items):\n price = float(input(\"Please insert price for item \" + str(i+1) + \" --> \"))\n tot = tot + price\n# Condition to apply a discount\nif tot > 100:\n tot = tot * 0.9\n\n# Print total amount\n#print(\"Total amount for \", num_items, \" items is $\", tot, sep='')\n# Formatting total amount\nprint(\"Total price for {} items is ${:.2f}\".format(num_items, tot))\n","sub_path":"prac_1/shop_calculator.py","file_name":"shop_calculator.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"67485890","text":"\"\"\"\r\nContains the functionality to run the NPL Full EIV harmonisation method.\r\n\"\"\"\r\n\r\n'''___Python Modules___'''\r\n\r\n'''___Third Party Modules___'''\r\n\r\n'''___Harmonisation Modules___'''\r\nimport convert_data\r\nfrom pc_algo import PCAlgo\r\nfrom GN_algo import GNAlgo\r\n\r\n'''___Authorship___'''\r\n__author__ = [\"Sam Hunt\", \"Peter Harris\"]\r\n__created__ = \"16/04/2017\"\r\n__credits__ = [\"Arta Dillo\", \"Jon Mittaz\"]\r\n__version__ = \"0.0\"\r\n__maintainer__ = \"Sam Hunt\"\r\n__email__ = \"sam.hunt@npl.co.uk\"\r\n__status__ = \"Development\"\r\n\r\n\r\nclass HarmAlgo:\r\n \"\"\"\r\n Class to run the NPL Full EIV harmonisation method.\r\n\r\n Sample code:\r\n\r\n .. code-block:: python\r\n\r\n H = HarmAlgo(HData)\r\n H.run()\r\n\r\n where ``HData`` is a ``harm_data_reader.HarmData`` object, containing the match-up data to be harmonised\r\n\r\n :Attributes:\r\n ..py:attribute:: HData:\r\n\r\n *harm_data_reader.HarmData*\r\n\r\n Input harmonisation data object containing match-up data to be harmonised\r\n\r\n ..py:attribute:: convert_data:\r\n\r\n *obj*\r\n\r\n Object containing functionality to convert *harm_data_reader.HarmData* objects\r\n\r\n :Methods:\r\n .. py:method:: run(...):\r\n\r\n Return harmonised parameters and diagnostic data for input harmonisaton match-up data\r\n \"\"\"\r\n\r\n def __init__(self, HData):\r\n \"\"\"\r\n Initialise HarmAlgo class\r\n\r\n :type HData: harm_data_reader.HarmData\r\n :param HData: Input harmonisation data object containing match-up data to be harmonised\r\n \"\"\"\r\n\r\n # Initialise class\r\n self.convert_data = convert_data.ConvertData()\r\n self.HData = HData\r\n\r\n def run(self):\r\n \"\"\"\r\n Return harmonised parameters and diagnostic data for input harmonisaton match-up data\r\n\r\n :type HData: harm_data_writer.HarmData\r\n :param HData: Input harmonisation match-up data object\r\n\r\n :return:\r\n :a: *numpy.ndarray*\r\n\r\n Harmonised parameters\r\n\r\n :Ia: *numpy.ndarray*\r\n\r\n Harmonised parameter sensor names\r\n\r\n :V: *numpy.ndarray*\r\n\r\n Harmonised parameters convariance matrix\r\n\r\n :F: *float*\r\n\r\n Objective function final value\r\n\r\n :v: *float*\r\n\r\n Objective function degrees of freedom\r\n\r\n :p: *float*\r\n\r\n Chi-squared probability\r\n \"\"\"\r\n\r\n ################################################################################################################\r\n # 1.\tPrepare Data\r\n ################################################################################################################\r\n\r\n HData = self.HData\r\n\r\n # Flatten values into required 1d form\r\n HData.values = HData.flatten_values(HData.values, HData.idx)\r\n\r\n ################################################################################################################\r\n # 2.\tCompute Approximate Solution to find Pre-conditioner to Full Problem\r\n ################################################################################################################\r\n\r\n print(\"Determine approximate solution to find pre-conditioner to full problem...\")\r\n\r\n # a. sample data for preconditioning\r\n HData_sample = self.convert_data.sample4PC(HData, sf=1)\r\n\r\n # b. determine preconditioner solution\r\n PC = PCAlgo(HData_sample)\r\n a_PC, S = PC.runPC(tol=1e-6)\r\n HData.a = a_PC # set PC output parameters as current parameter estimates\r\n\r\n ################################################################################################################\r\n # 3.\tCompute Full Solution using Gauss-Newton Algorithm\r\n ################################################################################################################\r\n\r\n print(\"Computing full solution...\")\r\n\r\n # a. reparameterise input data such that output data are independent quantities\r\n HData = self.convert_data.convert2ind(HData)\r\n\r\n # b. run GN algorithm on modified data\r\n GN = GNAlgo(HData, S)\r\n a, V, F, v, p, H_res, K_res = GN.runGN(show=True)\r\n\r\n return a, V, F, v, p, H_res, K_res\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\r\n","sub_path":"src/main/full_EIV/src/main/harm_algo_EIV.py","file_name":"harm_algo_EIV.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"278011911","text":"def incrementer():\n i = 1\n while True:\n yield i\n i += 1\n\n\ninc = incrementer()\nprint(next(inc))\nprint(next(inc))\nprint(next(inc))\nprint(next(inc))\n\ndef fibonacci_generator():\n a = b = 1\n while True:\n yield a\n a, b = b, a + b\n\n\nfib = fibonacci_generator()\n\nfor i in fib:\n if i > 15:\n break\n else:\n print(\"Generated: \", i)\n\nchar = ['zero', 'one', 'two', 'three']\n\n\ndef display(elem):\n print(type(elem))\n assert type(elem) is int, 'elem must be integer'\n print('List element ', elem, '= ', char[elem])\n\n#Check if work\n\n\nelem = 6\nelem //= 3\ndisplay(elem)\n","sub_path":"Free_1.py","file_name":"Free_1.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"362746852","text":"import requests\nfrom slack import RTMClient\nfrom slack import WebClient\nfrom slack.errors import SlackApiError\n\n\ndef get_channel_info(channel):\n channel_info = {\n 'id': channel['id'],\n 'name': channel['name']\n }\n return channel_info\n\n\nclass SlackService:\n def __init__(self, slack_token: str):\n self.slack_token = slack_token\n self.slack_client = WebClient(token=slack_token)\n self.rtm_client = RTMClient(token=slack_token)\n\n def post_message(self, channel: str, text: str) -> int:\n try:\n response = self.slack_client.chat_postMessage(\n channel=channel,\n text=text\n )\n return response.status_code\n except SlackApiError as e:\n raise e\n\n def list_channels(self, types : str) -> list:\n response = self.slack_client.conversations_list(types=types)\n list_channels = response.data['channels']\n list_ids = list(map(get_channel_info, list_channels))\n return list_ids\n\n\nif __name__ == '__main__':\n slack_service = SlackService('xoxb-2425917630453-2436304297300-mqWaBb7VNAToVQY0FhyhbQnq')\n try:\n print(\"empezando uwu\")\n slack_service.rtm_client.start()\n print(\"Terminado 7u7\")\n except Exception as err:\n print(err)\n\n\n# @RTMClient.run_on(event=\"message\")\n# def amusebot(**payload):\n# print(payload)\n# \"\"\"\n# This function triggers when someone sends\n# a message on the slack\n# \"\"\"\n# data = payload[\"data\"]\n# web_client = payload[\"web_client\"]\n# bot_id = data.get(\"bot_id\", \"\")\n#\n# # If a message is not send by the bot\n# if bot_id == \"\":\n# channel_id = data[\"channel\"]\n#\n# # Extracting message send by the user on the slack\n# text = data.get(\"text\", \"\")\n# text = text.split(\">\")[-1].strip()\n#\n# response = \"\"\n# if \"help\" in text.lower():\n# user = data.get(\"user\", \"\")\n# response = f\"Hi <@{user}>! I am AmuseBot :)\"\n# else:\n# activity_json_response = requests.get(\"http://www.boredapi.com/api/activity/\").json()\n# activity = activity_json_response['activity']\n# response = str(activity)\n#\n# # Sending message back to slack\n# web_client.chat_postMessage(channel=channel_id, text=response)\n#\n#\n# try:\n# rtm_client = RTMClient(token=\"xoxb-2425917630453-2436304297300-mqWaBb7VNAToVQY0FhyhbQnq\")\n# print(\"Bot is up and running!\")\n# rtm_client.start()\n# except Exception as err:\n# print(err)\n","sub_path":"libs/slack_service.py","file_name":"slack_service.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"493657375","text":"\"\"\"\nNombre: Alejandro Tejada\nCurso: Diseño Compiladores\nFecha: noviembre 2021\nPrograma: mainSemantic.py\nPropósito: Programa de nueva version del main anterior\nV 2.0\n\"\"\"\n\n# ZONA DE IMPORTS\nfrom decafAlejandroLexer import decafAlejandroLexer\nfrom decafAlejandroParser import decafAlejandroParser\nfrom decafAlejandroListener import decafAlejandroListener\nfrom antlr4.error.ErrorListener import ErrorListener\nfrom antlr4 import *\nfrom antlr4.tree.Trees import TerminalNode\nfrom funciones import *\nfrom ErrorClass import *\nfrom symbolTable import *\nimport emoji\nimport sys\nfrom pprint import pprint\nfrom itertools import groupby\nfrom symbolTable import *\n# we import Node\nfrom NodoCodigo import *\n\n\nclass MyErrorListener(ErrorListener):\n def __init__(self):\n self.hasErrors = False\n self.lexicalErrors = []\n super(MyErrorListener, self).__init__()\n pass\n\n def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):\n self.hasErrors = True\n errorMsg = str(line) + \":\" + str(column) + \\\n \": sintaxis ERROR encontrado \" + str(msg)\n self.lexicalErrors.append(errorMsg)\n\n def getHasError(self):\n return self.hasErrors\n\n\nclass DecafAlejandroPrinter(decafAlejandroListener):\n def __init__(self):\n self.root = None\n # data types primitivos\n self.BOOLEAN = 'boolean'\n self.VOID = 'void'\n self.STRING = 'char'\n self.INT = 'int'\n self.ERROR = 'error'\n # un diccionario con primitivos\n self.data_type = {\n 'char': self.STRING,\n 'int': self.INT,\n 'boolean': self.BOOLEAN,\n 'void': self.VOID,\n 'error': self.ERROR\n }\n # variables distintas\n self.ambitos = []\n self.scope_Actual = None\n self.tablaVariables = dictTableVars()\n self.errores = SemanticError()\n self.tabla_metodos = dictTableMetods()\n self.tabla_estructuras = dictTableStruct()\n self.tabla_parametros = tableDictParameters()\n\n self.dictNodosCodigoIntermedio = {}\n self.contadorNodos = 0\n\n self.tipoNodo = {} # el tipo de nodo de cada valor que iteraremos\n\n super().__init__()\n\n def popScope(self):\n self.scope_Actual.valueToTable()\n self.scope_Actual = self.ambitos.pop()\n\n def addScope(self):\n self.ambitos.append(self.scope_Actual)\n self.scope_Actual = generalSymbolTable()\n\n def findVar(self, variable):\n \"\"\"\n *@param variable: busca la variable en el scope actual\n \"\"\"\n innerArray = []\n innerVar = self.scope_Actual.getSymbolFromTable(variable)\n if innerVar == 0:\n innerArray = self.ambitos.copy()\n innerArray.reverse()\n for scope in innerArray:\n innerVar2 = scope.getSymbolFromTable(variable)\n if innerVar2 != 0:\n return innerVar2\n return 0\n else:\n return innerVar\n\n def Intersection(self, a, b):\n \"\"\"\n Realiza la interseccion de dos valores\n \"\"\"\n return [v for v in a if v in b]\n\n def all_equal(self, iterable):\n \"\"\"\n Iterable es la variable que busca el valor\n \"\"\"\n g = groupby(iterable)\n return next(g, True) and not next(g, False)\n\n def ChildrenHasError(self, ctx):\n \"\"\"\n REvisa que el hijo tenga errores. Retorna TRUE si hay o FALSE si no\n *@param ctx: el contexto\n \"\"\"\n non_terminals = [self.tipoNodo[i] for i in ctx.children if type(\n i) in [decafAlejandroParser.LocationContext,\n decafAlejandroParser.ExprContext,\n decafAlejandroParser.BlockContext,\n decafAlejandroParser.DeclarationContext]]\n if self.ERROR in non_terminals:\n return True\n return False\n\n def enterProgram(self, ctx: decafAlejandroParser.ProgramContext):\n print('----------> INICIO COMPILACION <--------------')\n self.root = ctx\n self.scope_Actual = generalSymbolTable()\n\n def enterMethod_declr(self, ctx: decafAlejandroParser.Method_declrContext):\n metodo = ctx.method_name().getText()\n parameters = []\n\n if self.tabla_metodos.getSymbolFromTable(metodo) == 0:\n if ctx.return_type().var_type() is not None:\n tipo = ctx.return_type().var_type().getText()\n else:\n tipo = ctx.return_type().getText()\n hijos = ctx.getChildCount()\n\n for i in range(hijos):\n if isinstance(ctx.getChild(i), decafAlejandroParser.Var_typeContext):\n typeParameter = self.data_type[ctx.getChild(i).getText()]\n idParameter = ctx.getChild(i + 1).getText()\n if idParameter in [i['Id'] for i in parameters]:\n line = ctx.getChild(i + 1).start.line\n col = ctx.getChild(i + 1).start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_VARDUPLICADA)\n\n parameters.append(\n {'Tipo': typeParameter, 'Id': idParameter})\n self.tabla_parametros.AddEntryToTable(\n typeParameter, idParameter)\n\n self.tabla_metodos.AddEntryToTable(\n tipo, metodo, parameters, None, 0)\n else:\n # self.tipoNodo\n line = ctx.method_name().start.line\n col = ctx.method_name().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_VARDUPLICADA)\n\n self.addScope()\n\n for parameter in parameters:\n type_symbol = self.tablaVariables.getSymbolFromTable(\n parameter['Tipo'])\n size = type_symbol['Size']\n offset = self.scope_Actual.offsetVariables\n self.scope_Actual.AddEntryToTable(\n parameter['Tipo'], parameter['Id'], size, offset, True)\n\n def exitMethod_declr(self, ctx: decafAlejandroParser.Method_declrContext):\n metodo = ctx.method_name().getText()\n self.tabla_parametros.cleanTable()\n self.popScope()\n\n return_type = ctx.return_type().getText()\n block_type = self.tipoNodo[ctx.block()]\n\n if return_type == self.VOID and block_type != self.VOID and block_type != self.ERROR:\n self.tipoNodo[ctx] = self.ERROR\n line = ctx.return_type().start.line\n col = ctx.return_type().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_TIPOVOID)\n return\n\n if return_type != block_type:\n if block_type == self.ERROR:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n self.tipoNodo[ctx] = self.ERROR\n line = ctx.block().start.line\n col = ctx.block().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_TIPO_RETORNO)\n\n self.tipoNodo[ctx] = self.VOID\n\n def enterVardeclr(self, ctx: decafAlejandroParser.VardeclrContext):\n tipo = ctx.var_type().getText()\n\n # TOMAR EN CUENTA DECLARACION DE ARRAY'S\n if ctx.field_var().var_id() is not None:\n id = ctx.field_var().var_id().getText()\n\n # Si no encuentra una variable, la guarda en la tabla de simbolos\n # En caso contrario, ya está declarada, y eso es ERROR.\n\n if self.tabla_parametros.getSymbolFromTable(id) != 0:\n self.tipoNodo[ctx] = self.ERROR\n self.tipoNodo[ctx.field_var()] = self.ERROR\n line = ctx.field_var().var_id().start.line\n col = ctx.field_var().var_id().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_VAR_REPETIDA)\n return\n\n if self.scope_Actual.getSymbolFromTable(id) == 0:\n type_symbol = self.tablaVariables.getSymbolFromTable(tipo)\n if type_symbol == 0:\n line = ctx.var_type().start.line\n col = ctx.var_type().start.column\n self.errores.AddEntryToTable(\n line, col, f'El tipo {tipo} de variable no ha sido declarado previamente..')\n self.tipoNodo[ctx] = self.ERROR\n self.tipoNodo[ctx.field_var()] = self.ERROR\n return\n size = type_symbol['Size']\n offset = self.scope_Actual.offsetVariables\n\n self.scope_Actual.AddEntryToTable(\n tipo, id, size, offset, False)\n else:\n self.tipoNodo[ctx] = self.ERROR\n self.tipoNodo[ctx.field_var()] = self.ERROR\n line = ctx.field_var().var_id().start.line\n col = ctx.field_var().var_id().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_VARDUPLICADA)\n elif ctx.field_var().array_id() is not None:\n id = ctx.field_var().array_id().getChild(0).getText()\n\n if self.tabla_parametros.getSymbolFromTable(id) != 0:\n self.tipoNodo[ctx] = self.ERROR\n self.tipoNodo[ctx.field_var()] = self.ERROR\n line = ctx.field_var().var_id().start.line\n col = ctx.field_var().var_id().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_VAR_REPETIDA)\n return\n\n if self.scope_Actual.getSymbolFromTable(id) == 0:\n type_symbol = self.tablaVariables.getSymbolFromTable(tipo)\n if type_symbol == 0:\n line = ctx.var_type().start.line\n col = ctx.var_type().start.column\n self.errores.AddEntryToTable(\n line, col, f'El tipo {tipo} de variable no ha sido declarado previamente.')\n self.tipoNodo[ctx] = self.ERROR\n self.tipoNodo[ctx.field_var()] = self.ERROR\n return\n\n tipo_array = 'array' + tipo\n size = 0\n\n if ctx.field_var().array_id().int_literal() is not None:\n size = int(\n ctx.field_var().array_id().int_literal().getText())\n # agregamos el size del valor\n innerSize = 0\n innerSize = self.tablaVariables.getSymbolFromTable(tipo)[\n \"Size\"]\n if(innerSize != 0):\n size = size * innerSize\n\n if 'struct' in tipo_array:\n self.tablaVariables.AddEntryToTable(\n tipo_array, size, self.tablaVariables.ARRAY + self.tablaVariables.STRUCT)\n else:\n self.tablaVariables.AddEntryToTable(\n tipo_array, size, self.tablaVariables.ARRAY)\n\n type_symbol = self.tablaVariables.getSymbolFromTable(\n tipo_array)\n\n size = type_symbol['Size']\n offset = self.scope_Actual.offsetVariables\n\n self.scope_Actual.AddEntryToTable(\n tipo_array, id, size, offset, False)\n\n else:\n self.tipoNodo[ctx] = self.ERROR\n self.tipoNodo[ctx.field_var()] = self.ERROR\n line = ctx.field_var().var_id().start.line\n col = ctx.field_var().var_id().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_VARDUPLICADA)\n\n def enterStruct_declr(self, cstx: decafAlejandroParser.Struct_declrContext):\n self.addScope()\n\n def exitStruct_declr(self, ctx: decafAlejandroParser.Struct_declrContext):\n tipo = ctx.getChild(0).getText() + ctx.getChild(1).getText()\n\n if self.tablaVariables.getSymbolFromTable(tipo) == 0:\n size_scope = self.scope_Actual.getSize()\n self.tablaVariables.AddEntryToTable(\n tipo, size_scope, self.tablaVariables.STRUCT)\n self.tabla_estructuras.ExtractInfo(\n tipo, self.scope_Actual, self.tablaVariables)\n self.popScope()\n\n self.tipoNodo[ctx] = self.VOID\n for child in ctx.children:\n if not isinstance(child, TerminalNode):\n if self.tipoNodo[child] == self.ERROR:\n self.tipoNodo[ctx] = self.ERROR\n break\n else:\n self.tipoNodo[ctx] = self.ERROR\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_VARDUPLICADA)\n\n def enterVar_id(self, ctx: decafAlejandroParser.Var_idContext):\n parent = ctx.parentCtx\n if parent in self.tipoNodo.keys():\n self.tipoNodo[ctx] = self.tipoNodo[parent]\n\n def exitVar_id(self, ctx: decafAlejandroParser.Var_idContext):\n parent = ctx.parentCtx\n if parent in self.tipoNodo.keys() or ctx in self.tipoNodo.keys():\n return\n\n # if ctx.getChildCount() == 1:\n id = ctx.getText()\n variable = self.findVar(id)\n if variable == 0:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" no ha sido declarada previamente.')\n self.tipoNodo[ctx] = self.ERROR\n else:\n if variable['Tipo'] in [self.INT, self.STRING, self.BOOLEAN]:\n self.tipoNodo[ctx] = self.data_type[variable['Tipo']]\n else:\n self.tipoNodo[ctx] = self.VOID\n # else:\n\n def enterArray_id(self, ctx: decafAlejandroParser.Array_idContext):\n parent = ctx.parentCtx\n if parent in self.tipoNodo.keys():\n self.tipoNodo[ctx] = self.tipoNodo[parent]\n\n def exitArray_id(self, ctx: decafAlejandroParser.Array_idContext):\n parent = ctx.parentCtx\n if parent in self.tipoNodo.keys() or ctx in self.tipoNodo.keys():\n return\n\n id = ctx.getChild(0).getText()\n variable = self.findVar(id)\n if variable == 0:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" no ha sido declarada previamente.')\n self.tipoNodo[ctx] = self.ERROR\n else:\n tipo = variable['Tipo']\n if ctx.int_literal() is not None:\n if 'array' in tipo:\n if tipo.split('array')[-1] in [self.INT, self.STRING, self.BOOLEAN]:\n self.tipoNodo[ctx] = self.data_type[tipo.split(\n 'array')[-1]]\n else:\n self.tipoNodo[ctx] = self.VOID\n else:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" debe ser del tipo ARRAY.')\n self.tipoNodo[ctx] = self.ERROR\n elif ctx.var_id() is not None:\n tipo = variable['Tipo']\n tipo_var = self.findVar(ctx.var_id().getText())\n self.CheckErrorInArrayId(ctx, tipo, tipo_var)\n\n def exitVar_type(self, ctx: decafAlejandroParser.Var_typeContext):\n self.tipoNodo[ctx] = self.VOID\n\n def exitField_var(self, ctx: decafAlejandroParser.Field_varContext):\n if ctx not in self.tipoNodo.keys():\n if ctx.var_id() is not None:\n self.tipoNodo[ctx] = self.tipoNodo[ctx.getChild(0)]\n elif ctx.array_id() is not None:\n self.tipoNodo[ctx] = self.tipoNodo[ctx.getChild(0)]\n\n def enterField_declr(self, ctx: decafAlejandroParser.Field_declrContext):\n tipo = ctx.var_type().getText()\n\n for child in ctx.children:\n if not isinstance(child, TerminalNode) and isinstance(child, decafAlejandroParser.Field_varContext):\n id = child.var_id().getText()\n\n if self.scope_Actual.getSymbolFromTable(id) == 0:\n type_symbol = self.tablaVariables.getSymbolFromTable(tipo)\n size = type_symbol['Size']\n offset = self.scope_Actual.offsetVariables\n\n self.scope_Actual.AddEntryToTable(\n tipo, id, size, offset, False)\n else:\n self.tipoNodo[child] = self.ERROR\n line = child.var_id().start.line\n col = child.var_id().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_VARDUPLICADA)\n\n def exitField_declr(self, ctx: decafAlejandroParser.Field_declrContext):\n self.tipoNodo[ctx] = self.VOID\n for child in ctx.children:\n if not isinstance(child, TerminalNode):\n if self.tipoNodo[child] == self.ERROR:\n self.tipoNodo[ctx] = self.ERROR\n break\n\n def exitVardeclr(self, ctx: decafAlejandroParser.VardeclrContext):\n self.tipoNodo[ctx] = self.VOID\n for child in ctx.children:\n if not isinstance(child, TerminalNode):\n if self.tipoNodo[child] == self.ERROR:\n self.tipoNodo[ctx] = self.ERROR\n break\n\n def exitString_literal(self, ctx: decafAlejandroParser.String_literalContext):\n self.tipoNodo[ctx] = self.STRING\n\n def exitInt_literal(self, ctx: decafAlejandroParser.Int_literalContext):\n self.tipoNodo[ctx] = self.INT\n\n def exitBool_literal(self, ctx: decafAlejandroParser.Bool_literalContext):\n self.tipoNodo[ctx] = self.BOOLEAN\n\n def exitLiteral(self, ctx: decafAlejandroParser.LiteralContext):\n self.tipoNodo[ctx] = self.tipoNodo[ctx.getChild(0)]\n\n def enterBlock(self, ctx: decafAlejandroParser.BlockContext):\n parent = ctx.parentCtx\n\n if not isinstance(parent, decafAlejandroParser.Method_declrContext):\n self.addScope()\n\n def exitBlock(self, ctx: decafAlejandroParser.BlockContext):\n parent = ctx.parentCtx\n\n if not isinstance(parent, decafAlejandroParser.Method_declrContext):\n self.popScope()\n\n for child in ctx.children:\n if not isinstance(child, TerminalNode):\n if self.tipoNodo[child] == self.ERROR:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n hijos_tipo = [self.tipoNodo[i] for i in ctx.children if isinstance(\n i, decafAlejandroParser.StatementContext)]\n filtered = list(filter(lambda tipo: tipo != self.VOID, hijos_tipo))\n if len(filtered) == 0:\n self.tipoNodo[ctx] = self.VOID\n return\n\n if len(filtered) == 1:\n self.tipoNodo[ctx] = filtered.pop()\n return\n\n if self.all_equal(filtered):\n self.tipoNodo[ctx] = filtered.pop()\n else:\n self.tipoNodo[ctx] = self.ERROR\n\n def exitMethod_call(self, ctx: decafAlejandroParser.Method_callContext):\n name = ctx.method_name().getText()\n parameters = []\n\n for child in ctx.children:\n if isinstance(child, decafAlejandroParser.ExprContext):\n parameters.append(child)\n\n method_info = self.tabla_metodos.getSymbolFromTable(name)\n if method_info == 0:\n self.tipoNodo[ctx] = self.ERROR\n line = ctx.method_name().start.line\n col = ctx.method_name().start.column\n self.errores.AddEntryToTable(\n line, col, f'El método \"{name}\" no existe o no ha sido declarado antes del scope actual.')\n return\n\n if len(parameters) != len(method_info['Parameters']):\n self.tipoNodo[ctx] = self.ERROR\n line = ctx.method_name().start.line\n col = ctx.method_name().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_CANTIDAD_PARAMETROS)\n return\n\n if len(parameters) == 0:\n self.tipoNodo[ctx] = method_info['Tipo']\n return\n\n hasError = False\n for i in range(len(parameters)):\n tipo_parametro = self.tipoNodo[parameters[i]]\n if tipo_parametro == self.ERROR:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n tipo_metodo = method_info['Parameters'][i]['Tipo']\n\n if tipo_parametro != tipo_metodo:\n hasError = True\n\n line = parameters[i].start.line\n col = parameters[i].start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_TIPOMETODOS)\n\n if hasError:\n self.tipoNodo[ctx] = self.ERROR\n else:\n self.tipoNodo[ctx] = method_info['Tipo']\n\n def GetMethodType(self, ctx):\n nodo = ctx.parentCtx\n hijos = [str(type(i))\n for i in nodo.children if not isinstance(i, TerminalNode)]\n while str(decafAlejandroParser.Return_typeContext) not in hijos:\n nodo = nodo.parentCtx\n hijos = [str(type(i))\n for i in nodo.children if not isinstance(i, TerminalNode)]\n\n if nodo.return_type().var_type() is not None:\n return nodo.return_type().var_type().getText()\n else:\n return nodo.return_type().getText()\n\n def exitStatement_if(self, ctx: decafAlejandroParser.Statement_ifContext):\n error = self.ChildrenHasError(ctx)\n if error:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n tipo_if = self.tipoNodo[ctx.expr()]\n\n if tipo_if != self.BOOLEAN:\n self.tipoNodo[ctx] = self.ERROR\n line = ctx.expr().start.line\n col = ctx.expr().start.column\n self.errores.AddEntryToTable(line, col, self.errores.errrorText_IF)\n return\n\n hijos_tipo = [i for i in ctx.children if isinstance(\n i, decafAlejandroParser.BlockContext)]\n tipo_return = self.GetMethodType(ctx)\n if len(hijos_tipo) == 1:\n hijo_1 = hijos_tipo.pop()\n if tipo_return == self.tipoNodo[hijo_1]:\n self.tipoNodo[ctx] = self.tipoNodo[hijo_1]\n else:\n self.tipoNodo[ctx] = self.ERROR\n line = hijo_1.start.line\n col = hijo_1.start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_TIPO_RETORNO)\n else:\n if self.tipoNodo[hijos_tipo[0]] != tipo_return and self.tipoNodo[hijos_tipo[1]] != tipo_return:\n self.tipoNodo[ctx] = self.ERROR\n line = hijos_tipo[0].start.line\n col = hijos_tipo[0].start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_TIPO_RETORNO)\n\n line = hijos_tipo[1].start.line\n col = hijos_tipo[1].start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_TIPO_RETORNO)\n return\n elif self.tipoNodo[hijos_tipo[0]] != tipo_return:\n self.tipoNodo[ctx] = self.ERROR\n line = hijos_tipo[0].start.line\n col = hijos_tipo[0].start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_TIPO_RETORNO)\n return\n elif self.tipoNodo[hijos_tipo[1]] != tipo_return:\n self.tipoNodo[ctx] = self.ERROR\n line = hijos_tipo[1].start.line\n col = hijos_tipo[1].start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_TIPO_RETORNO)\n return\n\n if self.tipoNodo[hijos_tipo[0]] == self.tipoNodo[hijos_tipo[1]]:\n self.tipoNodo[ctx] = self.tipoNodo[hijos_tipo.pop()]\n else:\n self.tipoNodo[ctx] = self.ERROR\n\n def exitStatement_while(self, ctx: decafAlejandroParser.Statement_whileContext):\n error = self.ChildrenHasError(ctx)\n if error:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n tipo_while = self.tipoNodo[ctx.expr()]\n\n if tipo_while != self.BOOLEAN:\n self.tipoNodo[ctx] = self.ERROR\n line = ctx.expr().start.line\n col = ctx.expr().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_WHILE)\n return\n\n hijos_tipo = [self.tipoNodo[i] for i in ctx.children if isinstance(\n i, decafAlejandroParser.BlockContext)]\n if len(hijos_tipo) == 1:\n self.tipoNodo[ctx] = hijos_tipo.pop()\n\n def exitStatement_return(self, ctx: decafAlejandroParser.Statement_returnContext):\n error = self.ChildrenHasError(ctx)\n if error:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n self.tipoNodo[ctx] = self.tipoNodo[ctx.expr()]\n\n def exitStatement_methodcall(self, ctx: decafAlejandroParser.Statement_methodcallContext):\n error = self.ChildrenHasError(ctx)\n if error:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n self.tipoNodo[ctx] = self.tipoNodo[ctx.method_call()]\n\n def exitStatement_break(self, ctx: decafAlejandroParser.Statement_breakContext):\n error = self.ChildrenHasError(ctx)\n if error:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n self.tipoNodo[ctx] = self.VOID\n\n def exitStatement_assign(self, ctx: decafAlejandroParser.Statement_assignContext):\n error = self.ChildrenHasError(ctx)\n if error:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n left = self.tipoNodo[ctx.location()]\n right = self.tipoNodo[ctx.expr()]\n result_type = self.VOID\n\n if left != right:\n result_type = self.ERROR\n line = ctx.assign_op().start.line\n col = ctx.assign_op().start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_EQUALS)\n self.tipoNodo[ctx] = result_type\n\n def exitExpr(self, ctx: decafAlejandroParser.ExprContext):\n hasError = self.ChildrenHasError(ctx)\n # if hasError:\n # self.tipoNodo[ctx] = self.ERROR\n # return\n\n nodes_nonterminals = []\n for child in ctx.children:\n if not isinstance(child, TerminalNode):\n nodes_nonterminals.append(child)\n\n if len(nodes_nonterminals) == 1:\n non_terminal = nodes_nonterminals.pop()\n\n self.tipoNodo[ctx] = self.tipoNodo[non_terminal]\n # elif len(nodes_nonterminals) == 0:\n # self.tipoNodo[ctx] = self.VOID\n else:\n tipo1 = self.tipoNodo[ctx.getChild(0)]\n tipo2 = self.tipoNodo[ctx.getChild(2)]\n\n if self.ERROR in [tipo1, tipo2]:\n self.tipoNodo[ctx] = self.ERROR\n return\n\n result_type = self.ERROR\n error = ''\n hasError = False\n\n if ctx.eq_op() is not None:\n if len(self.Intersection([tipo1, tipo2], [self.STRING, self.INT, self.BOOLEAN])) > 0 and tipo1 == tipo2:\n result_type = self.BOOLEAN\n else:\n hasError = True\n line = ctx.getChild(0).start.line\n col = ctx.getChild(0).start.column\n error = self.errores.errrorText_EQ_OPS\n elif ctx.arith_op() is not None or ctx.rel_op() is not None:\n if tipo1 == self.INT and tipo2 == self.INT:\n result_type = self.INT\n if ctx.rel_op() is not None:\n result_type = self.BOOLEAN\n \"\"\" elif tipo1 == self.FLOAT and tipo2 == self.INT:\n result_type = self.FLOAT\n if ctx.rel_op() is not None:\n result_type = self.BOOLEAN\n\n elif tipo1 == self.INT and tipo2 == self.FLOAT:\n result_type = self.FLOAT\n if ctx.rel_op() is not None:\n result_type = self.BOOLEAN\n \"\"\"\n else:\n hasError = True\n if tipo1 != self.INT:\n line = ctx.getChild(0).start.line\n col = ctx.getChild(0).start.column\n else:\n line = ctx.getChild(2).start.line\n col = ctx.getChild(2).start.column\n\n if ctx.arith_op() is not None:\n error = self.errores.errrorText_ARITMETICA\n else:\n error = self.errores.errrorText_REL_OP\n elif ctx.cond_op() is not None:\n if tipo1 == self.BOOLEAN and tipo2 == self.BOOLEAN:\n result_type = self.BOOLEAN\n else:\n hasError = True\n if tipo1 != self.BOOLEAN:\n line = ctx.getChild(0).start.line\n col = ctx.getChild(0).start.column\n else:\n line = ctx.getChild(2).start.line\n col = ctx.getChild(2).start.column\n\n error = self.errores.errrorText_CONDICIONALES_GENERAL\n else:\n result_type = self.VOID\n\n if hasError:\n self.errores.AddEntryToTable(line, col, error)\n self.tipoNodo[ctx] = result_type\n\n def CheckErrorInArrayId(self, ctx, tipo, tipo_var):\n id = ctx.getChild(0).getText()\n # variable = self.findVar(id)\n # tipo = variable['Tipo']\n\n if ctx.int_literal() is not None:\n if 'array' in tipo:\n if tipo.split('array')[-1] in [self.INT, self.STRING, self.BOOLEAN]:\n self.tipoNodo[ctx] = self.data_type[tipo.split(\n 'array')[-1]]\n else:\n self.tipoNodo[ctx] = self.VOID\n else:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" debe ser del tipo ARRAY.')\n self.tipoNodo[ctx] = self.ERROR\n elif ctx.var_id() is not None:\n # tipo_var = self.findVar(ctx.var_id().getText())\n if tipo_var == 0:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{ctx.var_id().getText()}\" no ha sido declarada previamente.')\n self.tipoNodo[ctx] = self.ERROR\n return\n\n if 'array' in tipo and tipo_var['Tipo'] == self.INT:\n if tipo.split('array')[-1] in [self.INT, self.STRING, self.BOOLEAN]:\n self.tipoNodo[ctx] = self.data_type[tipo.split(\n 'array')[-1]]\n else:\n self.tipoNodo[ctx] = self.VOID\n elif 'array' in tipo and tipo_var['Tipo'] != self.INT:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{ctx.var_id().getText()}\" debe ser INT para intetar acceder a un ARRAY.')\n self.tipoNodo[ctx] = self.ERROR\n elif 'array' not in tipo:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" debe ser del tipo ARRAY.')\n self.tipoNodo[ctx] = self.ERROR\n elif tipo_var['Tipo'] != self.INT:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{ctx.var_id().getText()}\" debe ser INT para intetar acceder a un ARRAY.')\n self.tipoNodo[ctx] = self.ERROR\n\n def IterateChildren(self, location, parent_type, description):\n if location.var_id() is not None:\n # CASO BASE\n if location.var_id().location() is None:\n tipo_retorno = self.ERROR\n id = location.var_id().getChild(0).getText()\n if description is None:\n self.tipoNodo[location] = self.ERROR\n # line = location.start.line\n # col = location.start.column\n # self.errores.AddEntryToTable(line, col, f'Variable \"{id}\" no ha sido declarada previamente.')\n else:\n if 'struct' in description:\n child = self.tabla_estructuras.getChild(\n parent_type, id)\n if child == 0:\n self.tipoNodo[location] = self.ERROR\n line = location.start.line\n col = location.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" no ha sido declarada previamente.')\n else:\n tipo_nodo = self.tablaVariables.getSymbolFromTable(\n child['Tipo'])\n tipo_retorno = tipo_nodo['Tipo']\n self.tipoNodo[location] = tipo_nodo['Tipo']\n else:\n line = location.start.line\n col = location.start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_ESTRUCTURAGENERAL)\n self.tipoNodo[location] = self.ERROR\n\n return tipo_retorno\n\n id = location.var_id().getChild(0).getText()\n tipo_nodo = None\n child_type = None\n child_desc = None\n\n if description is None:\n line = location.start.line\n col = location.start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_ESTRUCTURAGENERAL)\n else:\n if 'struct' in description:\n child = self.tabla_estructuras.getChild(parent_type, id)\n if child == 0:\n line = location.start.line\n col = location.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" no ha sido declarada previamente.')\n else:\n child_type = child['Tipo']\n child_desc = child['Description']\n tipo_nodo = self.tablaVariables.getSymbolFromTable(\n child['Tipo'])\n else:\n line = location.start.line\n col = location.start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_ESTRUCTURAGENERAL)\n\n result_type = self.IterateChildren(\n location.var_id().location(), child_type, child_desc)\n self.tipoNodo[location] = result_type\n return result_type\n\n elif location.array_id() is not None:\n # CASO BASE\n\n if location.array_id().location() is None:\n tipo_retorno = self.ERROR\n id = location.array_id().getChild(0).getText()\n if description is None:\n self.tipoNodo[location] = self.ERROR\n # line = location.start.line\n # col = location.start.column\n # self.errores.AddEntryToTable(line, col, f'Variable \"{id}\" no ha sido declarada previamente.')\n else:\n if 'struct' in description:\n child = self.tabla_estructuras.getChild(\n parent_type, id)\n if child == 0:\n self.tipoNodo[location] = self.ERROR\n line = location.start.line\n col = location.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" no ha sido declarada previamente.')\n else:\n # HIJO IZQUIERDO\n tipo_nodo = self.tablaVariables.getSymbolFromTable(\n child['Tipo'])\n tipo_retorno = tipo_nodo['Tipo'].split('array')[-1]\n\n # HIJO DERECHO\n if location.array_id().int_literal() is not None:\n if 'array' not in child['Tipo']:\n line = location.array_id().start.line\n col = location.array_id().start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" debe ser del tipo ARRAY.') # ATENCION\n self.tipoNodo[location] = self.ERROR\n else:\n self.tipoNodo[location] = child['Tipo'].split(\n 'array')[-1]\n elif location.array_id().var_id() is not None:\n tipo = child['Tipo']\n tipo_var = self.findVar(\n location.array_id().var_id().getText())\n self.CheckErrorInArrayId(\n location.array_id(), tipo, tipo_var)\n\n if self.tipoNodo[location.array_id()] != self.ERROR:\n self.tipoNodo[location] = tipo_nodo['Tipo'].split(\n 'array')[-1]\n else:\n tipo_retorno = self.ERROR\n self.tipoNodo[location] = self.ERROR\n else:\n line = location.start.line\n col = location.start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_ESTRUCTURAGENERAL)\n self.tipoNodo[location] = self.ERROR\n return tipo_retorno\n\n id = location.array_id().getChild(0).getText()\n tipo_nodo = None\n child_type = None\n child_desc = None\n\n tipo_retorno = self.VOID\n if 'struct' in description:\n child = self.tabla_estructuras.getChild(parent_type, id)\n if child == 0:\n line = location.start.line\n col = location.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" no ha sido declarada previamente.')\n else:\n child_type = child['Tipo']\n child_desc = child['Description']\n # tipo_nodo = self.tablaVariables.getSymbolFromTable(child['Tipo'])\n\n # HIJO IZQUIERDO\n tipo_nodo = self.tablaVariables.getSymbolFromTable(\n child['Tipo'])\n\n # HIJO DERECHO\n if location.array_id().int_literal() is not None:\n if 'array' not in child['Tipo']:\n line = location.array_id().start.line\n col = location.array_id().start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" debe ser un array.')\n self.tipoNodo[location] = self.ERROR\n elif location.array_id().var_id() is not None:\n tipo = child['Tipo']\n tipo_var = self.findVar(\n location.array_id().var_id().getText())\n self.CheckErrorInArrayId(\n location.array_id(), tipo, tipo_var)\n\n if location.array_id() in self.tipoNodo.keys():\n if self.tipoNodo[location.array_id()] == self.ERROR:\n tipo_retorno = self.ERROR\n # self.tipoNodo[location] = self.ERROR\n else:\n line = location.start.line\n col = location.start.column\n self.errores.AddEntryToTable(\n line, col, self.errores.errrorText_ESTRUCTURAGENERAL)\n\n result_type = self.IterateChildren(\n location.array_id().location(), child_type, child_desc)\n self.tipoNodo[location] = result_type\n if tipo_retorno == self.ERROR:\n self.tipoNodo[location] = tipo_retorno\n result_type = tipo_retorno\n return result_type\n\n def enterLocation(self, ctx: decafAlejandroParser.LocationContext):\n parent = ctx.parentCtx\n if parent in self.tipoNodo.keys():\n if self.tipoNodo[parent] == self.ERROR:\n self.tipoNodo[ctx] = self.ERROR\n\n if ctx in self.tipoNodo.keys():\n return\n if ctx.var_id() is not None:\n if ctx.var_id().location() is None:\n return\n elif ctx.array_id() is not None:\n if ctx.array_id().location() is None:\n return\n\n if ctx.var_id() is not None:\n if ctx.var_id().location() is not None:\n id = ctx.var_id().getChild(0).getText()\n self.scope_Actual.valueToTable()\n\n symbol = self.findVar(id)\n if symbol == 0:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{ctx.var_id().getChild(0).getText()}\" no ha sido declarada previamente.')\n self.tipoNodo[ctx] = self.ERROR\n else:\n tipo_id = self.tablaVariables.getSymbolFromTable(\n symbol['Tipo'])\n print('Tipo de variable', tipo_id)\n if 'array' in tipo_id['Tipo']:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{ctx.var_id().getChild(0).getText()}\" debe ser un del tipo ARRAY.')\n self.tipoNodo[ctx] = self.ERROR\n return\n result_type = self.IterateChildren(\n ctx.var_id().location(), tipo_id['Tipo'], tipo_id['Description'])\n self.tipoNodo[ctx] = result_type\n\n if ctx.array_id() is not None:\n if ctx.array_id().location() is not None:\n id = ctx.array_id().getChild(0).getText()\n symbol = self.findVar(id)\n if symbol == 0:\n line = ctx.start.line\n col = ctx.start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{ctx.array_id().getChild(0).getText()}\" no ha sido declarada previamente.')\n self.tipoNodo[ctx] = self.ERROR\n else:\n tipo_id = self.tablaVariables.getSymbolFromTable(\n symbol['Tipo'])\n result_type = self.IterateChildren(\n ctx.array_id().location(), tipo_id['Tipo'], tipo_id['Description'])\n self.tipoNodo[ctx] = result_type\n\n # Hijo derecho\n if ctx.array_id().int_literal() is not None:\n if 'array' not in tipo_id['Tipo']:\n line = ctx.array_id().start.line\n col = ctx.array_id().start.column\n self.errores.AddEntryToTable(\n line, col, f'Variable \"{id}\" debe ser un array.')\n self.tipoNodo[ctx] = self.ERROR\n elif ctx.array_id().var_id() is not None:\n tipo = tipo_id['Tipo']\n tipo_var = self.findVar(\n ctx.array_id().var_id().getText())\n self.CheckErrorInArrayId(\n ctx.array_id(), tipo, tipo_var)\n\n if ctx.array_id() in self.tipoNodo.keys():\n if self.tipoNodo[ctx.array_id()] == self.ERROR:\n self.tipoNodo[ctx] = self.ERROR\n\n def exitLocation(self, ctx: decafAlejandroParser.LocationContext):\n if ctx not in self.tipoNodo.keys():\n self.tipoNodo[ctx] = self.tipoNodo[ctx.getChild(0)]\n\n def exitDeclaration(self, ctx: decafAlejandroParser.DeclarationContext):\n self.tipoNodo[ctx] = self.tipoNodo[ctx.getChild(0)]\n\n def exitProgram(self, ctx: decafAlejandroParser.ProgramContext):\n main_method = self.tabla_metodos.getSymbolFromTable('main')\n if main_method != 0:\n if len(main_method['Parameters']) > 0:\n self.tipoNodo[ctx] = self.ERROR\n self.errores.AddEntryToTable(\n 0, 0, self.errores.errrorText_MAIN_NOT_EXHISTS)\n else:\n hasError = self.ChildrenHasError(ctx)\n if hasError:\n self.tipoNodo[ctx] = self.ERROR\n else:\n self.tipoNodo[ctx] = self.VOID\n else:\n self.tipoNodo[ctx] = self.ERROR\n self.errores.AddEntryToTable(\n 0, 0, self.errores.errrorText_MAIN_NOT_EXHISTS)\n\n print('----------> FIN PROGRAMA <--------------')\n self.scope_Actual.valueToTable()\n self.tabla_metodos.valueToTable()\n self.tabla_estructuras.valueToTable()\n\n\nclass Compilar():\n def __init__(self, url):\n self.printer = None\n input = FileStream(url)\n lexer = decafAlejandroLexer(input)\n stream = CommonTokenStream(lexer)\n parser = decafAlejandroParser(stream)\n self.errorFromAntlr = MyErrorListener()\n parser.removeErrorListeners()\n parser.addErrorListener(self.errorFromAntlr)\n tree = parser.program()\n\n if not self.errorFromAntlr.getHasError():\n self.printer = DecafAlejandroPrinter()\n walker = ParseTreeWalker()\n walker.walk(self.printer, tree)\n\n def HasLexicalError(self):\n return self.errorFromAntlr.getHasError()\n\n\ncomp = Compilar('Python3/programs/multiple_tests.decaf')\n","sub_path":"mainSemantic.py","file_name":"mainSemantic.py","file_ext":"py","file_size_in_byte":47613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"446962008","text":"n=int(input(\"Enter the target concentration value .. \\n\"))\nval=[float(i) for i in range(0,1025)]\n#Populating the array with values\ncount=0\nlow=float(input(\"Enter the buffer concentration value .. \\n\"))\nhigh=float(input(\"Enter the sample concentration value .. \\n\"))\n#Setting the buffer and solute sample conc and counter\n\nwhile(low<=high):\n\n\n mid=float((low+high)/2.0)\n mid=round(mid,1)\n \n count=count+1\n for i in range(1,count):\n #mid=pe[i]\n #print(\"High:\\t\",high,\"Low:\\t\",low,\"Target:\\t\",mid,\"Steps:\\t\",count,)\n print(\"High:\\t\",high,\"Low:\\t\",low,\"Target:\\t\",mid,\"Steps:\\t\",count,)\n \n \n if n==mid:\n print(\"Found at position\",mid,\"in\",count,\"steps\")\n break\n elif mid>n:\n high=mid\n else:\n low=mid\n \n\n\n#Finding the number of steps in which we can attain the desired target concentration\n\n","sub_path":"Codes/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"594330547","text":"import socket\nimport json\nimport sys\n\n\ndef padd_to_specific_size(bytes_data, size):\n if len(bytes_data) > size:\n raise ValueError(\"Final size should be larger than data size to padd.\")\n return bytes(\"0\" * (size - len(bytes_data)) + bytes_data, encoding='utf-8')\n\n\ndef main():\n example = {\n \"type\": \"unregister\",\n \"node\": sys.argv[1],\n \"path\": sys.argv[2],\n }\n\n BYTES_AMOUNT_REQUEST_SIZE_INDICATION = 20\n\n data = json.dumps(example)\n connection = socket.create_connection((\"backup_server\", 12345))\n data_bytes = bytes(data, encoding='utf-8')\n padded_data_bytes_size = padd_to_specific_size(str(len(data_bytes)), BYTES_AMOUNT_REQUEST_SIZE_INDICATION)\n connection.sendall(padded_data_bytes_size)\n print(\"Going to send {} bytes\".format(len(data_bytes)))\n connection.sendall(data_bytes)\n connection.recv(100).rstrip()\n response = connection.recv(2048).rstrip().decode('utf-8')\n print(response)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"backup_requests_scripts/unregister_node.py","file_name":"unregister_node.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"164532541","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 24 16:32:39 2020\n\n@author: wulinli\n\"\"\"\n#import sys\nimport ipdb\nimport pandas as pd\nimport geoip2.database\n\n#sys.path.append(\"..\")\nfrom package.conver import conver\n\n\nclass baiduGps():\n def __init__(self ):\n pass\n\n\n def getCityNameByIp(self, ip_address):\n db = ipdb.City(\"./package/bdgps/ipipfree.ipdb\")\n region_name = db.find_info(ip_address, \"CN\").region_name\n city_name = db.find_info(ip_address, \"CN\").city_name\n return {\n \"region_name\": region_name,\n \"city_name\": city_name\n }\n\n def ip2gps(self):\n reader = geoip2.database.Reader('./package/bdgps/GeoLite2-City.mmdb')\n response = reader.city(self.ip_address)\n reader.close()\n if response.location.latitude:\n bd = conver.gcj02tobd09(response.location.longitude, response.location.latitude);\n return {\n \"lat\": bd[1],\n \"lng\": bd[0]\n }\n return False;\n\n def getBaiDuGpsByPosition(self, ip_address): # 百度GPS\n position = self.getCityNameByIp(ip_address)\n region_name = position[\"region_name\"]\n city_name = position[\"city_name\"]\n return self.getBaiduGpsByProvinceAndCity(region_name, city_name)\n\n def getBaiduGpsByProvinceAndCity(self, region_name, city_name):\n sFileName = \"./package/bdgps/ipToAddress2 (1).csv\" # 城市csv文件地址\n data = pd.read_csv(sFileName)\n for index in range(len(data['city'])):\n if region_name in data['city'][index] and city_name in data['city'][index]:\n # print(data['city'][index])\n return {\n \"province\":region_name,\n \"city\":city_name,\n \"lat\": data['lat'][index],\n \"lng\": data['lng'][index]\n }\n lat = self.ip2gps();\n if lat:\n return {\n \"province\":region_name,\n \"city\":city_name,\n \"lat\": lat['lat'],\n \"lng\": lat['lng']\n }\n return False\n\n\n","sub_path":"ipToAddress/ipToAddress/package/bdgps/baiduGps.py","file_name":"baiduGps.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"461530207","text":"import numpy as np\nfrom whales.modules.features_extractors.feature_extraction import FeatureExtraction\n\n\nclass ZeroCrossingRate(FeatureExtraction):\n def __init__(self, logger=None):\n super().__init__(logger)\n self.description = \"Zero crossing rate\"\n self.needs_fitting = False\n self.parameters = {}\n\n def method_transform(self):\n data = self.parameters[\"data\"]\n signs = np.sign(data)\n sign_change = np.array(signs[:, 1:] != signs[:, :-1]).astype(int)\n res = np.nansum(sign_change, axis=1) / data.shape[1]\n return res.reshape(1, -1)\n\n\nPipelineMethod = ZeroCrossingRate\n","sub_path":"src/whales/modules/features_extractors/zero_crossing_rate.py","file_name":"zero_crossing_rate.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"233470028","text":"import os\n\n# pylint: disable=unused-wildcard-import, wildcard-import\nfrom .common import *\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'THIS_IS_VERY_SECRET'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, '..', 'db.sqlite3'),\n }\n}\n\nSESSION_COOKIE_SECURE = False\n\nCSRF_COOKIE_SECURE = False\n\nSECURE_SSL_REDIRECT = False\n","sub_path":"app/app/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"557297214","text":"def minmax(data):\n '''\n Write a short Python function, minmax(data), that takes a sequence of\n one or more numbers, and returns the smallest and largest numbers, in the\n form of a tuple of length two. Do not use the built-in functions min or\n max in implementing your solution.\n >>> minmax([1,3,5,3,2,1,0,-1,2,6,3])\n (-1, 6)\n >>> minmax([1,393,45,303,574,5,5,9])\n (1, 574)\n >>> minmax([10,True, 500, 28, False, 12])\n (False, 500)\n '''\n data = sorted(data)\n smallest = data[0]\n largest = data[0]\n for n in data:\n smallest = n if n < smallest else smallest\n largest = n if n > largest else largest\n return (smallest, largest)\n","sub_path":"data-structures-algorithms-python/python-primer/reinforcement/R1_3.py","file_name":"R1_3.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"51592913","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\nfrom osgeo import gdal\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(color_codes=True)\nget_ipython().magic(u'matplotlib inline')\n\n\n# In[2]:\n\nr2_0701 = r'D:\\tmp\\ndai_out\\R2\\window7horizon1//NDAI_R2_.tif'\nds0701 = gdal.Open(r2_0701).ReadAsArray()\nds0701 = ds0701[~np.isnan(ds0701)]\n\n\n# In[3]:\n\nr2_1407 = r'D:\\tmp\\ndai_out\\R2\\window14horizon7//NDAI_R2_.tif'\nds1407 = gdal.Open(r2_1407).ReadAsArray()\nds1407 = ds1407[~np.isnan(ds1407)]\n\n\n# In[4]:\n\nr2_2814 = r'D:\\tmp\\ndai_out\\R2\\window28horizon14//NDAI_R2_.tif'\nds2814 = gdal.Open(r2_2814).ReadAsArray()\nds2814 = ds2814[~np.isnan(ds2814)]\n\n\n# In[22]:\n\nplt.figure(figsize=(8,4))\nfig = sns.kdeplot(ds0701, bw = 0.01, shade=True, label='forecast horizon 01 days (forecast window 07 days)')\nfig = sns.kdeplot(ds1407, bw = 0.01, shade=True, label='forecast horizon 07 days (forecast window 14 days)')\nfig = sns.kdeplot(ds2814, bw = 0.01, shade=True, label='forecast horizon 14 days (forecast window 28 days)')\nplt.xlim(0,1)\nplt.legend(loc=2)\nplt.title('Univariate distribution of $R^2$ values over Inner-Mongolia region')\nplt.xlabel('$R^2$ values')\nplt.ylabel('Normalised frequency')\nplt.savefig(r'D:\\tmp\\ndai_out\\R2//R2_distribution_graph.png', dpi=200)\nplt.show()\n\n\n# In[ ]:\n\n\n\n","sub_path":"2015/Untitled.py","file_name":"Untitled.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"204688371","text":"\"\"\"\ndnpdata object for storing N-dimensional data with coordinates\n\"\"\"\nfrom collections.abc import MutableMapping\n\nimport numpy as np\n\nfrom .core import nddata\nfrom .version import __version__\n\nversion = __version__\n\ncore_attrs_list = [\"nmr_frequency\"]\n\nnp.set_printoptions(threshold=15)\n\n\nclass dnpdata(nddata.nddata_core):\n \"\"\"\n dnpdata Class for handling dnp data\n\n The dnpdata class is inspired by pyspecdata nddata object which handles n-dimensional data, axes, and other relevant information together.\n\n This class is designed to handle data and axes together so that performing NMR processing can be performed easily.\n\n Attributes:\n values (numpy.ndarray): Numpy Array containing data\n coords (list): List of numpy arrays containing axes of data\n dims (list): List of axes labels for data\n attrs (dict): Dictionary of parameters for data\n\n \"\"\"\n\n def __init__(self, values=np.r_[[]], coords=[], dims=[], attrs={}, procList=[]):\n \"\"\"\n dnpdata Class __init__ method\n\n Args:\n data (numpy.ndarray):\n coords (list): list of axes\n dims (list): list of strings which are names of axes\n attrs (dict): dictionary of parameters\n \"\"\"\n super().__init__(values, dims, coords, attrs)\n self.version = version\n self.proc_attrs = []\n self.max_print_attrs = 5\n self.print_values = False\n\n @property\n def _constructor(self):\n return dnpdata\n\n def __repr__(self):\n \"\"\"\n Representation of dnpdata object\n \"\"\"\n return \"nddata(values = {}, coords = {}, dims = {}, attrs = {})\".format(\n repr(self.values), repr(self.coords), repr(self.dims), repr(self.attrs)\n )\n\n def __str__(self):\n \"\"\"\n String representation of dnpdata object\n \"\"\"\n\n string = \"values:\\n\\t\"\n string += \" x \".join(map(str, self.shape))\n\n string += \" {} ({})\\n\".format(type(self.values).__name__, self.values.dtype)\n\n if self.print_values is True:\n string += str(self.values) + \"\\n\"\n\n string += \"dims:\\n\\t\"\n\n string += \"{}\\n\".format(self.dims)\n\n string += \"coords:\\n\\t\"\n string += \"\\n\\t\".join(map(repr, self.coords))\n\n string += \"\\n\"\n\n string += \"attrs:\\n\"\n\n for ix, key in enumerate(self.attrs.keys()):\n if ix == self.max_print_attrs:\n string += \"\\t+%i attrs\" % (len(self.attrs) - self.max_print_attrs)\n break\n string += \"\\t{!r}: {!r}\\n\".format(key, self.attrs[key])\n\n return string\n\n def add_proc_attrs(self, proc_attr_name, proc_dict):\n \"\"\"\n Stamp processing step to dnpdata object\n\n Args:\n proc_attr_name (str): Name of processing step (e.g. \"fourier_transform\"\n proc_dict (dict): Dictionary of processing parameters for this processing step.\n \"\"\"\n if not isinstance(proc_attr_name, str):\n raise ValueError(\"Processing step name must be string\")\n if not isinstance(proc_dict, dict):\n raise ValueError(\"Processing dictionary must be dictionary\")\n\n self.proc_attrs.append((proc_attr_name, proc_dict))\n\n def phase(self):\n \"\"\"\n Return phase of dnpdata object\n\n Returns:\n phase (float,int): phase of data calculated from sum of imaginary\n divided by sum of real components\n \"\"\"\n return np.arctan(np.sum(np.imag(self.values)) / np.sum(np.real(self.values)))\n\n def squeeze(self):\n \"\"\"\n Remove all length 1 dimensions from data\n\n .. warning::\n Axes information is lost\n\n Example:\n data.squeeze()\n \"\"\"\n remove_axes = []\n for axes_ix, axes_value in enumerate(self.coords):\n if len(axes_value) == 1:\n remove_axes.append(axes_ix)\n\n reverse_remove_axes = remove_axes[::-1]\n for index_ix, index_value in enumerate(reverse_remove_axes):\n self.coords.pop(index_value)\n self.dims.pop(index_value)\n self.values = np.squeeze(self.values)\n\n def window(self, dim=\"t2\", linewidth=10, inplace=False) -> \"dnpdata\":\n \"\"\"Apply Apodization to data down given dimension\n\n See dnplab.dnpNMR.window for full documentation\n\n See Also:\n dnplab.dnpNMR.window\n\n Example:\n\n .. code-block:: python\n\n dnpdata = dnpdata.window(dim=\"t2\", linewidth=10)\n\n # For inplace operation to save memory\n dnpdata.window(dim=\"t2\", linewidth=10, inplace=True)\n\n \"\"\"\n reshape_size = [1 for k in self.dims]\n reshape_size[self.index(dim)] = len(self.coords[dim])\n values = self.values\n # Must include factor of 2 in exponential to get correct linewidth ->\n window_array = np.exp(-1.0 * self.coords[dim] * 2.0 * linewidth).reshape(\n reshape_size\n )\n window_array = np.ones_like(self.values) * window_array\n values *= window_array\n\n if inplace:\n self.values = values\n return self\n else:\n return self._constructor(\n values=values,\n coords=self.coords._coords,\n dims=self.dims,\n attrs=self.attrs,\n procList=self.proc_attrs,\n )\n\n\nclass dnpdata_collection(MutableMapping):\n \"\"\"\n Dictionary-like workspace object for storing dnpdata objects\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n\n Args:\n *args:\n **kwargs:\n\n Examples:\n >>> raw = dnpdata()\n >>> dnpdata_collection(raw)\n dnpdata_collection({'raw': nddata(values = array([], dtype=float64), coords = nddata_coord_collection([]), dims = [], attrs = {})})\n >>> dnpdata_collection({\"raw\": raw, \"attrs\": {}})\n dnpdata_collection({'raw': nddata(values = array([], dtype=float64), coords = nddata_coord_collection([]), dims = [], attrs = {}), 'attrs': {}})\n\n \"\"\"\n self._processing_buffer = \"proc\"\n\n self.__data_dict = {}\n\n if len(args) == 0:\n return\n\n elif len(args) == 1:\n if isinstance(args[0], dnpdata):\n self.__data_dict.__setitem__(\"raw\", args[0])\n elif isinstance(args[0], dict):\n data_dict = args[0]\n for key in data_dict:\n if isinstance(data_dict[key], (dnpdata, dict)):\n self.__data_dict[key] = data_dict[key]\n else:\n raise TypeError(\"Each type in dict must be dnpdata or dict\")\n else:\n raise TypeError(\"Argument must be type dnpdata\")\n\n elif len(args) == 2:\n if isinstance(args[0], str) and isinstance(args[1], (dnpdata, dict)):\n self.__data_dict[args[0]] = args[1]\n else:\n raise TypeError(\n \"If two arguments, first argument must be str and 2nd argument must be dnpdata or dict\"\n )\n\n else:\n raise TypeError(\"Arguments not understood\")\n\n def __getitem__(self, key):\n return self.__data_dict[key]\n\n def __setitem__(self, key, value):\n if (not isinstance(key, str)) or (not isinstance(value, (dict, dnpdata))):\n raise TypeError(\"Key must be string and value must be dnpdata or dict\")\n self.__data_dict[key] = value\n\n def __delitem__(self, key):\n del self.__data_dict[key]\n\n def __iter__(self):\n return iter(self.__data_dict)\n\n def __len__(self):\n return len(self.__data_dict)\n\n @property\n def _constructor(self):\n \"\"\"Used when methods return a dnpdata_collection instance\"\"\"\n return dnpdata_collection\n\n @property\n def processing_buffer(self):\n return self._processing_buffer\n\n @processing_buffer.setter\n def processing_buffer(self, new_processing_buffer):\n \"\"\"\"\"\"\n if isinstance(new_processing_buffer, str):\n self._processing_buffer = new_processing_buffer\n else:\n raise TypeError(\n \"Processing buffer must be type str, not %s\"\n % str(type(new_processing_buffer))\n )\n\n def copy(self, key, new_key=None):\n \"\"\"\n Copy data from key to new_key. If new_key is not given, by default\n key will be copied to processing buffer\n\n Args:\n key (str): Key to be copied\n new_key (str, None): New key for copied data\n \"\"\"\n\n if new_key is None:\n new_key = self.processing_buffer\n\n self[new_key] = self[key].copy()\n\n def move(self, key, new_key):\n \"\"\"\n Move data from key to new_key\n\n Args:\n key (str): Name of data to move\n new_key (str): Name of new key to move data\n \"\"\"\n self[new_key] = self.pop(key)\n\n def pop(self, key):\n \"\"\"Pop key. Removes data corresponding to key.\"\"\"\n return self.__data_dict.pop(key)\n\n def dict(self):\n \"\"\"Return dictionary for storing data in dnpdata_collection\"\"\"\n return self.__data_dict\n\n def clear(self):\n \"\"\"Removes all items\"\"\"\n self.__data_dict.clear()\n\n get = __getitem__\n\n def items(self):\n \"\"\"Return items\"\"\"\n return self.__data_dict.items()\n\n def keys(self):\n \"\"\"Return keys.\"\"\"\n return self.__data_dict.keys()\n\n def popitem(self):\n \"\"\"\n Pops item from end of dnpdata_collection\n\n Returns:\n tuple: key, item pair that was removed\n \"\"\"\n return self.__data_dict.popitem()\n\n def values(self):\n \"\"\"Return Values\"\"\"\n return self.__data_dict.values()\n\n def add(self, key, data):\n \"\"\"\n Adds new data\n\n Args:\n key (str): key corresponding to new data\n data (dnpdata): data object corresponding to key\n \"\"\"\n if (not isinstance(key, str)) or (not isinstance(data, (dnpdata, dict))):\n raise TypeError(\"add takes two arguments, a string and dnplab.dnpdata type\")\n self.__data_dict[key] = data\n\n def __repr__(self):\n return \"dnpdata_collection({})\".format(self.__data_dict)\n\n def __str__(self):\n string = \"\"\n\n for key in self.keys():\n string += \"-\" * (2 + len(repr(key))) + \"\\n\"\n string += \"|\" + repr(key) + \"|\" + \"\\n\"\n string += \"-\" * (2 + len(repr(key))) + \"\\n\"\n string += str(self.__data_dict[key]) + \"\\n\"\n string += \"\\n\\n\"\n\n return string\n\n def window(self, processing_buffer=\"proc\", inplace=False, **kwargs):\n \"\"\"\n\n Args:\n processing_buffer:\n inplace:\n **kwargs:\n\n Returns:\n\n Examples:\n >>> ws_original = dnpdata_collection(\n ... {\n ... \"raw\": dnpdata(\n ... np.array([3.0, 2.0, 1.0]),\n ... dims=[\"t2\"],\n ... coords=[np.r_[1, 2, 3]],\n ... )\n ... }\n ... )\n >>> ws_original.copy(\"raw\", \"proc\")\n\n >>> # default processing_buffer = 'proc'\n ... ws_windowed = ws_original.window(dim=\"t2\", linewidth=1.0)\n >>> ws_windowed[\"raw\"] == ws_original[\"raw\"]\n True\n >>> ws_windowed[\"proc\"] == ws_original[\"proc\"]\n False\n\n >>> # default inplace = False, a new instance is generated\n ... ws_windowed is ws_original\n False\n >>> # To save memory when handling large dataset, set inplace to True\n ... ws_windowed = ws_original.window(dim=\"t2\", linewidth=1.0, inplace=True)\n >>> ws_windowed is ws_original\n True\n\n \"\"\"\n values = self[processing_buffer].window(inplace=inplace, **kwargs)\n if inplace:\n self[processing_buffer] = values\n return self\n else:\n kw = {k: v for k, v in self.__data_dict.items() if k != processing_buffer}\n kw.update({processing_buffer: values})\n return self._constructor(kw)\n\n\ndef create_workspace(*args):\n \"\"\"\n Create a workspace (dnpdata_collection)\n\n Args:\n args: Arguments to send to __init__ method in dnpdata_collection\n\n Returns:\n dnpdata_collection: workspace object\n \"\"\"\n return dnpdata_collection(*args)\n","sub_path":"dnplab/dnpData.py","file_name":"dnpData.py","file_ext":"py","file_size_in_byte":12559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"35271193","text":"import numpy as np\nimport cv2 as cv\nimport os\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.optimizers import Adam\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nimport pickle\n\n\n\n\npath = 'TrainingData'\ntestRatio = 0.15\nvalidationRatio = 0.15\nimageDimensions = (50,25,3)\n\nimages = []\ncharacters = []\nmyList = os.listdir(path)\nprint(len(myList))\nnumberOfClasses = len(myList)\n\nbatchSize = 50\nepochs = 10\nstepsPerEpoch = 1000\n\n\n######################################načtení obrázků a jejich tagů\nfor x in range(0,numberOfClasses):\n PicList = os.listdir(path+\"/\"+ myList[x])\n for y in PicList:\n currentImg = cv.imread(path+\"/\"+ myList[x] +\"/\"+y)\n #cv.imshow('erg', currentImg)\n currentImg = cv.resize(currentImg,(imageDimensions[1],imageDimensions[0]))\n #cv.imshow('erg', currentImg)\n images.append(currentImg)\n characters.append(x)\n print(myList[x], end = \" \")\nprint(\"počet nahraných obrázků:\", len(images))\n\nimages = np.array(images)\ncharacters = np.array(characters)\n\nprint(images.shape)\nprint(characters.shape)\n\n################################################# Spliting the data\nX_train,X_test,y_train,y_test = train_test_split(images, characters, test_size=testRatio)\nX_train,X_validation, y_train,y_validation = train_test_split(X_train,y_train,test_size=validationRatio)\n\nprint(X_train.shape)\nprint(X_test.shape)\nprint(X_validation.shape)\n\nnumberOfSamples = []\nfor x in range(0,numberOfClasses):\n #print(\"znak {}:\".format(x), len(np.where(y_train==x)[0]))\n numberOfSamples.append(len(np.where(y_train==x)[0]))\nprint(numberOfSamples)\n\nplt.figure(figsize=(10,5))\nplt.bar(range(0,numberOfClasses),numberOfSamples)\nplt.title('Number of images in each class')\nplt.xlabel(\"Class ID\")\nplt.ylabel(\"Number of images\")\nplt.show()\n\ndef preprocess(img):\n img = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n img = cv.equalizeHist(img)\n img = img/255\n return img\n\nX_train = np.array(list(map(preprocess, X_train)))\nX_test = np.array(list(map(preprocess, X_test)))\nX_validation = np.array(list(map(preprocess, X_validation)))\n\nX_train = X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2], 1)\nX_test = X_test.reshape(X_test.shape[0],X_test.shape[1],X_test.shape[2], 1)\nX_validation = X_validation.reshape(X_validation.shape[0],X_validation.shape[1],X_validation.shape[2], 1)\n\ndataGen = ImageDataGenerator(width_shift_range=0,\n height_shift_range=0,\n zoom_range=0.1,\n shear_range=0.1,\n rotation_range=10)\ndataGen.fit(X_train)\n\ny_train = to_categorical(y_train, numberOfClasses)\ny_test = to_categorical(y_test, numberOfClasses)\ny_validation = to_categorical(y_validation, numberOfClasses)\n\ndef myModel():\n noOfFilters = 30\n sizeOfFilters1 = (5,5)\n sizeOfFilters2 = (3,3)\n sizeOfPool = (2,2)\n noOfNodes = 300\n\n model = Sequential()\n model.add((Conv2D(noOfFilters, sizeOfFilters1, input_shape=(imageDimensions[0], imageDimensions[1], 1), padding='same', activation='relu')))\n #model.add((Conv2D(noOfFilters, sizeOfFilters1, padding='same', activation='relu')))\n model.add(MaxPooling2D(pool_size=sizeOfPool, padding='same'))\n model.add((Conv2D(noOfFilters//2, sizeOfFilters2, padding='same', activation='relu')))\n #model.add((Conv2D(noOfFilters//2, sizeOfFilters2, padding='same', activation='relu')))\n model.add(MaxPooling2D(pool_size=sizeOfPool, padding='same'))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(noOfNodes, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(numberOfClasses, activation='softmax'))\n model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\nmodel = myModel()\nprint(model.summary())\n\nhistory = model.fit_generator(dataGen.flow(X_train, y_train, batch_size=batchSize),\n steps_per_epoch=stepsPerEpoch,\n epochs=epochs,\n validation_data=(X_validation, y_validation),\n shuffle=1)\n\nplt.figure(1)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.legend(['Training','Validation'])\nplt.title('Loss')\nplt.xlabel('epochs')\n\nplt.figure(2)\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.legend(['Training','Validation'])\nplt.title('Accuracy')\nplt.xlabel('epochs')\nplt.show()\n\nscore = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Test loss= \", score[0])\nprint(\"Test Accuracy = \", score[1])\n\nwith open (\"trained_model_2konvoluce.plk\",\"wb\") as modelPickle:\n pickle.dump(model, modelPickle)\n modelPickle.close()\n\n","sub_path":"CNN_Training.py","file_name":"CNN_Training.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"357502711","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 8 15:49:44 2020\n\n@author: Peng\n\"\"\"\n\n#%%\nimport os\nimport pandas as pd\n\nfrom random import shuffle\nfrom keras.models import Model\nfrom sklearn.manifold import TSNE\nfrom numpy import zeros\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Masking, Dense, Bidirectional, Dropout, MaxPooling1D, Conv1D, Activation\nfrom keras.optimizers import Adam#, Nadam\n\ndef seq_one_hot(seqs,seq_type='aa',max_len=None,seq_resize=True):\n# =============================================================================\n# one-hot encodes sequences for use in nn modeling.\n# seqs -- a list where each element is a biological sequence as a string \n# seq_type -- specifies type of biological sequence. Support options are ['aa', 'dna', 'rna', 'dna_iupac']. \n# defualt: 'aa'\n# max_len -- specifies the length of sequences. Defualt is None. This takes the maximum length sequence as the max.\n# seq_resize -- This option resizes sequences using tensorflow.image resize \n# =============================================================================\n\n #create dictionary matching sequences positions to feature index for one-hot encoded matrix\n if seq_type == 'aa':\n seq_dict= {\"A\": 0,\n \"C\": 1,\n \"D\": 2,\n \"E\": 3,\n \"F\": 4,\n \"G\": 5,\n \"H\": 6,\n \"I\": 7,\n \"K\": 8,\n \"L\": 9,\n \"M\": 10,\n \"N\": 11,\n \"P\": 12,\n \"Q\": 13,\n \"R\": 14,\n \"S\": 15,\n \"T\": 16,\n \"V\": 17,\n \"W\": 18,\n \"Y\": 19,\n \"X\": 20,\n \"B\": 21,\n \"Z\": 22,\n \"J\": 23,\n \"U\": 24,\n \"O\": 25}\n n_letter=26\n elif seq_type == 'dna':\n seq_dict= {\"A\": 0,\n \"T\": 1,\n \"C\": 2,\n \"G\": 3}\n n_letter=4\n elif seq_type == 'dna_iupac':\n seq_dict= {\"A\": 0,\n \"T\": 1,\n \"C\": 2,\n \"G\": 3,\n \"Y\": 4,\n \"S\": 5,\n \"W\": 6,\n \"K\": 7,\n \"M\": 8,\n \"B\": 9,\n \"D\": 10,\n \"H\": 11,\n \"V\": 12,\n \"N\": 13,\n \"-\": 14}\n n_letter=15\n \n else:\n return \"Supported seq_type options include: ['aa', 'dna', 'rna', 'dna_iupac']\"\n \n #find maximum length sequence\n if max_len == None:\n n=[len(seq) for seq in seqs]\n max_len = max(n)\n \n #pre-define numpy matrix based on length and number of sequences\n one_hot_matrix=zeros(shape=(len(seqs),max_len,n_letter),dtype='float') \n \n #indexing matching bases/aa's to dictionary and populating one_hot_matrix\n #feature index is retrieved with dictionary\n if seq_resize == True:\n from tensorflow.image import resize\n \n i=0\n for seq in seqs: #loop through each sequence in list seqs\n j=0\n tmp_vector=zeros(shape=(1,len(seq),n_letter,1)) #define 4-D tensor with 1 sample and 1 channel\n for letter in seq: #loop through each base/aa in sequence\n indx=seq_dict[letter] #match letter to dictionary\n tmp_vector[0,j,indx,0]=1 \n j+=1\n one_hot_matrix[i,:,:]=resize(tmp_vector,size=(max_len,n_letter))[0,:,:,0].numpy() #reshape 4-D tensor to 2D\n i+=1\n else:\n i=0\n for seq in seqs:\n j=0\n for letter in seq:\n indx=seq_dict[letter] #match letter to dictionary\n one_hot_matrix[i,j,indx]=1\n j+=1\n if j == max_len: break\n i+=1\n \n return one_hot_matrix\n\n#%%\n#load data from directory \ndef load_seq_dataframe(dir_path):\n \n seq_df=pd.DataFrame()\n for filename in os.listdir(dir_path):\n new_csv=dir_path+filename\n seq_df=seq_df.append(pd.read_csv(new_csv))\n \n return seq_df\n#%%\n#model architecture for amino acids\ndef original_blstm(num_classes, num_letters, sequence_length, embed_size=50):\n \n model = Sequential()\n model.add(Conv1D(input_shape=(sequence_length, num_letters), filters=100, kernel_size=26, padding=\"valid\", activation=\"relu\"))\n model.add(MaxPooling1D(pool_size=13, strides=13))\n model.add(Masking(mask_value=0))\n model.add(Dropout(0.2))\n model.add(Bidirectional(LSTM(320, activation=\"tanh\", return_sequences=True)))\n model.add(Dropout(0.5))\n model.add(LSTM(embed_size, activation=\"tanh\"))\n model.add(Dense(num_classes, activation=None, name=\"AV\"))\n model.add(Activation(\"softmax\"))\n model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])\n return model\n#%%\ndef regression_blstm(num_letters, sequence_length, embed_size=50):\n \n model = Sequential()\n model.add(Conv1D(input_shape=(sequence_length, num_letters), filters=100, kernel_size=26, padding=\"valid\", activation=\"relu\"))\n model.add(MaxPooling1D(pool_size=13, strides=13))\n model.add(Masking(mask_value=0))\n model.add(Dropout(0.2))\n model.add(Bidirectional(LSTM(320, activation=\"tanh\", return_sequences=True)))\n model.add(Dropout(0.5))\n model.add(LSTM(embed_size, activation=\"tanh\"))\n model.add(Dense(1, activation=None))\n model.compile(loss='mse', optimizer='rmsprop', metrics='mae')\n return model\n\ndef dna_blstm(num_classes, num_letters, sequence_length, embed_size=256):\n \n model = Sequential()\n model.add(Conv1D(input_shape=(sequence_length, num_letters), filters=26, kernel_size=3, strides=3, padding=\"valid\", activation=\"relu\"))\n model.add(Conv1D(filters=320, kernel_size=26, padding=\"valid\", activation=\"relu\"))\n model.add(MaxPooling1D(pool_length=13, stride=13))\n model.add(Dropout(0.2))\n model.add(Bidirectional(LSTM(320, activation=\"tanh\", return_sequences=True)))\n model.add(Dropout(0.5))\n model.add(LSTM(embed_size, activation=\"tanh\"))\n model.add(Dense(num_classes, activation=None, name=\"AV\"))\n model.add(Activation(\"softmax\"))\n model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])\n return model\n\n#%%\ndef aa_blstm(num_classes, num_letters, sequence_length, embed_size=5000):\n \n model = Sequential()\n # model.add(Conv1D(input_shape=(sequence_length, num_letters), filters=100, kernel_size=26, padding=\"valid\", activation=\"relu\"))\n # model.add(MaxPooling1D(pool_size=13, strides=13))\n # model.add(Masking(mask_value=0))\n # model.add(Dropout(0.2))\n # model.add(Embedding(num_letters,10000))\n # model.add(SpatialDropout1D(0.2))\n model.add(Bidirectional(LSTM(5000, dropout=0.2, recurrent_dropout=0.2, activation=\"tanh\", return_sequences=True)))\n model.add(Dropout(0.2))\n model.add(LSTM(embed_size, activation=\"tanh\"))\n model.add(Dense(num_classes, activation=None, name=\"AV\"))\n model.add(Activation(\"softmax\"))\n model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])\n return model\n\n#%%\n\n\ndef tsne_non_trained_classes(model,data,write_path,layer,max_len,seq_type='aa',seq_resize=True):\n \n embed_model = Model(inputs=model.input, outputs=model.get_layer(layer).output)\n embed_model.summary()\n \n new_seq=seq_one_hot(data['sequence'],seq_type=seq_type,max_len=max_len,seq_resize=seq_resize)\n embed = embed_model.predict(new_seq, batch_size=100, verbose=1)\n tsne = TSNE(n_components=2, random_state=0)\n xx = tsne.fit_transform(embed)\n \n data['comp1']=xx[:,0]\n data['comp2']=xx[:,1]\n \n data.to_csv(write_path,sep='\\t')\n\ndef randomize_groups(df,x,f=1):\n# =============================================================================\n# shuffles dependent variables (columns) with respect to a dataframe\n# df -- a dataframe\n# x -- list containing columns which are not shuffled--i.e., independent columns (string)\n# f -- fraction of sample dependent columns to shuffle (float from 0 to 1) \n# =============================================================================\n \n if f>1:\n print(\"f ranges 0 to 1--f was set to 1\")\n f=1\n elif f<0:\n print(\"f ranges 0 to 1--f was set to 0\")\n f=0\n \n index_keep=df.sample(frac=f).index\n df_tmp=df.drop(index_keep).reset_index(drop=True)\n df=df.loc[index_keep]\n \n for col in df_tmp.columns:\n if col in x: continue \n df_tmp[col]=df_tmp[col].sample(frac=1).reset_index(drop=True)\n \n return(df.append(df_tmp))\n \n ","sub_path":"scripts/python/tmr/cnn_functions.py","file_name":"cnn_functions.py","file_ext":"py","file_size_in_byte":8851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"283123918","text":"from evennia import DefaultRoom\nfrom commands.ship import CmdSetQuarters\nfrom world.map import Map\n\nclass QuartersObject(DefaultRoom):\n\n def at_object_creation(self):\n self.db.parentShip = \"\"\n self.cmdset.add_default(CmdSetQuarters)\n \n def return_appearance(self, looker):\n map = \"%s\\n\" % Map(looker).show_map()\n text = super(QuartersObject, self).return_appearance(looker)\n text = map + text\n #used to insert base description into an object\n desc = \"\\n|nThe crew quarters of a small transport vessel.\\n\"\n if \"\\n\" in text:\n first_line, rest = text.split(\"\\n\", 1)\n text = first_line + desc + rest\n else:\n text = text + \"\\n\" + desc\n return text\n ","sub_path":"typeclasses/shipQuarters.py","file_name":"shipQuarters.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"13033421","text":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Routine for decoding the CIFAR-10 binary file format.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nimport numpy as np\n\n# Process images of this size. Note that this differs from the original CIFAR\n# image size of 32 x 32. If one alters this number, then the entire model\n# architecture will change and any model would need to be retrained.\nIMAGE_SIZE = 224\n\n# Global constants describing the CIFAR-10 data set.\nNUM_CLASSES = 10\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000\nclassCount = 2\nclustCount = 6\nfileappend = '_3perc.bin'\n#fileappend = '.bin'\n#fileappend = '_blueness_super.bin'\n#fileappend = 'if it uses this is will crash so I will notice'\n#PERC = 0.1\n\n#t = np.zeros((11,2,6))\n#t[1,0,0] = 1\n#t[2,0,1] = 1\n#t[3,1,0] = 1\n#t[4,1,1] = 1\n#t[5,1,2] = 1\n#t[6,1,3] = 1\n#t[7,1,4] = 1\n#t[8,1,5] = 1\n#t[9,0,2] = 1\n#t[10,0,3] = 1\n\n#lbl_lookup = {\n# tf.constant(0): tf.constant(t[0]),\n# tf.constant(1): tf.constant(t[1]),\n# tf.constant(2): tf.constant(t[2]),\n# tf.constant(3): tf.constant(t[3]),\n# tf.constant(4): tf.constant(t[4]),\n# tf.constant(5): tf.constant(t[5]),\n# tf.constant(6): tf.constant(t[6]),\n# tf.constant(7): tf.constant(t[7]),\n# tf.constant(8): tf.constant(t[8]),\n# tf.constant(9): tf.constant(t[9]),\n# tf.constant(10): tf.constant(t[10])\n#}\n\nVGG_MEAN = [103.939, 116.779, 123.68]\n\n\"\"\" function read_cifar10\nfilename_queue: files to read\nswitchbytes: 1 when reading which images should be lablleled in the file, 0 otherwise\nsuperclassbytes: 1 when reading superclass for each image in the file, 0 otherwise\n\nIf you get an error complaining about the gather that the labels are too high a number, this should probably be changed.\n\"\"\"\ndef read_cifar10(filename_queue,switchbytes=1,superclassbytes=0): #NOTE\n \"\"\"Reads and parses examples from CIFAR10 data files.\n\n Recommendation: if you want N-way read parallelism, call this function\n N times. This will give you N independent Readers reading different\n files & positions within those files, which will give better mixing of\n examples.\n\n Args:\n filename_queue: A queue of strings with the filenames to read from.\n\n Returns:\n An object representing a single example, with the following fields:\n height: number of rows in the result (32)\n width: number of columns in the result (32)\n depth: number of color channels in the result (3)\n key: a scalar string Tensor describing the filename & record number\n for this example.\n label: an int32 Tensor with the label in the range 0..9.\n uint8image: a [height, width, depth] uint8 Tensor with the image data\n \"\"\"\n\n class CIFAR10Record(object):\n pass\n result = CIFAR10Record() \n \n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n label_bytes = 1 # 2 for CIFAR-100\n labelled_bytes = switchbytes #byte to switch the labels of with partially labelled learning\n superclasslabel_bytes = superclassbytes\n result.height = 32\n result.width = 32\n result.depth = 3\n #['airplane','automobile','bird','cat','deer','dog','frog','horse', 'ship','truck']\n\n# [ 0 1 2 3 4 5 ] superclass1\n# [ 6 7 8 9 10 11] superclass2\n\n supLab = tf.constant([0,0,1,1,1,1,1,1,0,0]) #Man mande vs animals\n #subclassLab = tf.constant([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])\n subclassLab = tf.constant([1,2,7,8,9,10,11,12,3,4])\n #supLab = tf.constant([0,0,1,1,1,1,1,0,0,0]) #Man mande+horse vs animals-horse\n image_bytes = result.height * result.width * result.depth\n # Every record consists of a label followed by the image, with a\n # fixed number of bytes for each.\n record_bytes = label_bytes + image_bytes + labelled_bytes + superclasslabel_bytes\n\n # Read a record, getting filenames from the filename_queue. No\n # header or footer in the CIFAR-10 format, so we leave header_bytes\n # and footer_bytes at their default of 0.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n result.key, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8 that is record_bytes long.\n record_bytes = tf.decode_raw(value, tf.uint8)\n #result.key = tf.decode_raw(result.key, tf.int32)\n \n # The first bytes represent the label, which we convert from uint8->int32.\n result.label = tf.cast(\n tf.strided_slice(record_bytes, [superclasslabel_bytes+labelled_bytes], [superclasslabel_bytes+labelled_bytes+label_bytes]), tf.int32)\n result.switch = tf.cast(\n tf.strided_slice(record_bytes, [superclasslabel_bytes], [superclasslabel_bytes+labelled_bytes]), tf.int32) \n \n if superclasslabel_bytes == 0:\n result.superLabel = tf.cast(tf.gather(supLab,result.label), tf.int32)\n else:\n result.superLabel = tf.cast(\n tf.strided_slice(record_bytes, [0], [superclasslabel_bytes]), tf.int32) \n \n result.subclasslab = tf.cast(tf.gather(subclassLab,result.label), tf.int32)\n #result.superLabel = result.label\n \n # The remaining bytes after the label represent the image, which we reshape\n # from [depth * height * width] to [depth, height, width].\n depth_major = tf.reshape(\n tf.strided_slice(record_bytes, [superclasslabel_bytes+label_bytes+labelled_bytes],\n [superclasslabel_bytes+label_bytes + labelled_bytes + image_bytes]),\n [result.depth, result.height, result.width])\n # Convert from [depth, height, width] to [height, width, depth].\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n\n return result\n\n\"\"\" function _generate_image_and_label_batch\nimage: 3-D Tensor of [height, width, 3] of type.float32.\nlabel: 1-D Tensor of type.int32\nsuperLabel: 1-D Tensor of type.int32\nmin_queue_examples: int32, minimum number of samples to retain in the queue that provides of batches of examples.\nbatch_size: Number of images per batch.\nshuffle: boolean indicating whether to use a shuffling queue.\nraw: boolean whether or not to also return the raw image (without scaled colours and such)\nraw_image: the raw image tensor\n\nConstruct a queued batch of images and labels.\n\"\"\"\ndef _generate_image_and_label_batch(image, label,superLabel, min_queue_examples,\n batch_size, shuffle, raw = False, raw_image = None):\n # Create a queue that shuffles the examples, and then\n # read 'batch_size' images + labels from the example queue.\n num_preprocess_threads = 16\n if shuffle:\n images, label_batch, superLabel_batch = tf.train.shuffle_batch(\n [image, label, superLabel],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n else:\n if not raw:\n images, label_batch, superLabel_batch = tf.train.batch(\n [image, label, superLabel],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n if raw:\n images, raw_images, label_batch, superLabel_batch = tf.train.batch(\n [image, raw_image, label, superLabel],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n return images, raw_images, label_batch, tf.reshape(superLabel_batch, [batch_size])\n\n # Display the training images in the visualizer.\n tf.summary.image('images', images)\n \n print(label_batch)\n return images, label_batch, tf.reshape(superLabel_batch, [batch_size])\n\n\"\"\" funnction distorted_inputs\ndata_dir: Path to the CIFAR-10 data directory.\nbatch_size: Number of images per batch.\npartially_labelled: Boolean indicating use of partial labels\nmatrix_lab: Boolean indicating if the labels should be 2D (for subclasses in ACOL)\nf_append: String appended to all files (for selecting binaries with additional information\n\nConstruct distorted input for CIFAR training using the Reader ops.\n\"\"\"\ndef distorted_inputs(data_dir, batch_size,partially_labelled=False,matrix_lab=True,f_append=fileappend):\n if not data_dir[-4:] == '.bin':\n print(data_dir[-4:])\n filenames = [os.path.join(data_dir, 'data_batch_%d'%(i))\n for i in xrange(1, 6)]\n filenames = [fname + f_append for fname in filenames]\n else:\n filenames = [data_dir]\n f_append = filenames\n \n print('----- fileappend -------')\n print(f_append)\n print('------------------------')\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n size = tf.constant([IMAGE_SIZE,IMAGE_SIZE])\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for training the network. Note the many random\n # distortions applied to the image.\n\n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(reshaped_image)\n \n # Randomly crop a [height, width] section of the image.\n #distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n distorted_image = tf.image.resize_images(reshaped_image, size)\n\n # Because these operations are not commutative, consider randomizing\n # the order their operation.\n #distorted_image = tf.image.random_brightness(distorted_image,\n # max_delta=63)\n #distorted_image = tf.image.random_contrast(distorted_image,\n # lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n #float_image = tf.image.per_image_standardization(distorted_image) #NOTE!!!\n\n # Convert RGB to BGR\n red, green, blue = tf.split(axis=2, num_or_size_splits=3, value=distorted_image)\n assert red.get_shape().as_list()[:] == [IMAGE_SIZE, IMAGE_SIZE, 1]\n assert green.get_shape().as_list()[:] == [IMAGE_SIZE, IMAGE_SIZE, 1]\n assert blue.get_shape().as_list()[:] == [IMAGE_SIZE, IMAGE_SIZE, 1]\n float_image = tf.concat(axis=2, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n\n #clear labels\n if partially_labelled:\n if matrix_lab:\n # print(read_input.key)\n #read_input.label = lbl_lookup[tf.add(read_input.label,tf.constant(1))*read_input.switch]\n #Makes lbl positive when switch=1 and negative when switch=0, negative values are all zeros after one_hot\n subclasslab_enc = tf.one_hot(read_input.subclasslab * (tf.constant(-1)+read_input.switch*tf.constant(2)) - tf.constant(1), classCount*clustCount)\n read_input.label = tf.reshape(subclasslab_enc,(classCount,clustCount))\n else:\n #read_input.label = tf.one_hot(read_input.label * (tf.constant(-1)+read_input.switch*tf.constant(2)), NUM_CLASSES)\n read_input.label.set_shape([1])\n\n\n # Set the shapes of tensors.\n float_image.set_shape([height, width, 3])\n #read_input.label.set_shape([1])\n read_input.superLabel.set_shape([1])\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n min_fraction_of_examples_in_queue)\n print ('Filling queue with %d CIFAR images before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n \n img, lbl, suplbl = _generate_image_and_label_batch(float_image, read_input.label, read_input.superLabel,\n min_queue_examples, batch_size,\n shuffle=True)\n # Generate a batch of images and labels by building up a queue of examples.\n return img, lbl, suplbl\n\n\"\"\" funnction inputs\neval_data: bool, indicating if one should use the train or eval data set.\ndata_dir: Path to the CIFAR-10 data directory.\nbatch_size: Number of images per batch.\npartially_labelled: Boolean indicating use of partial labels\nmatrix_lab: Boolean indicating if the labels should be 2D (for subclasses in ACOL)\n\nConstruct input for CIFAR evaluation using the Reader ops.\n\"\"\"\ndef inputs(eval_data, data_dir, batch_size,partially_labelled=False,matrix_lab=False):\n if not eval_data:\n filenames = [os.path.join(data_dir, 'data_batch_%d'%(i))\n for i in xrange(1, 6)]\n filenames = [fname + fileappend for fname in filenames]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n switch = 1\n else:\n filenames = [os.path.join(data_dir, 'test_batch' + fileappend)]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n switch = 0\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n size = tf.constant([IMAGE_SIZE,IMAGE_SIZE])\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for evaluation.\n # Crop the central [height, width] of the image.\n resized_image = tf.image.resize_images(reshaped_image, size)\n\n # Subtract off the mean and divide by the variance of the pixels.\n #float_image = tf.image.per_image_standardization(resized_image) #NOTE!!!\n #rgb_scaled = resized_image * 255.0\n rgb_scaled = resized_image\n\n # Convert RGB to BGR\n red, green, blue = tf.split(axis=2, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[:] == [IMAGE_SIZE, IMAGE_SIZE, 1]\n assert green.get_shape().as_list()[:] == [IMAGE_SIZE, IMAGE_SIZE, 1]\n assert blue.get_shape().as_list()[:] == [IMAGE_SIZE, IMAGE_SIZE, 1]\n float_image = tf.concat(axis=2, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n \n #clear labels\n if partially_labelled:\n if matrix_lab:\n # print(read_input.key)\n #read_input.label = lbl_lookup[tf.add(read_input.label,tf.constant(1))*read_input.switch]\n #Makes lbl positive when switch=1 and negative when switch=0, negative values are all zeros after one_hot\n subclasslab_enc = tf.one_hot(read_input.subclasslab * (tf.constant(-1)+read_input.switch*tf.constant(2)) - tf.constant(1), classCount*clustCount)\n read_input.label = tf.reshape(subclasslab_enc,(classCount,clustCount))\n else:\n #read_input.label = tf.one_hot(read_input.label * (tf.constant(-1)+read_input.switch*tf.constant(2)), NUM_CLASSES)\n read_input.label.set_shape([1])\n\n\n # Set the shapes of tensors.\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n read_input.superLabel.set_shape([1])\n\n print(read_input.label)\n \n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(num_examples_per_epoch *\n min_fraction_of_examples_in_queue)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label, read_input.superLabel,\n min_queue_examples, batch_size,\n shuffle=False)\n\n\"\"\" funnction inputs_raw\neval_data: bool, indicating if one should use the train or eval data set.\ndata_dir: Path to the CIFAR-10 data directory.\nbatch_size: Number of images per batch.\npartially_labelled: Boolean indicating use of partial labels\nmatrix_lab: Boolean indicating if the labels should be 2D (for subclasses in ACOL)\n\nConstruct input for CIFAR evaluation using the Reader ops, includes the raw images\n\"\"\"\ndef inputs_raw(eval_data, data_dir, batch_size,partially_labelled=False, matrix_lab=False):\n if not eval_data:\n filenames = [os.path.join(data_dir, 'data_batch_%d'+fileappend % i)\n for i in xrange(1, 6)]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n else:\n filenames = [os.path.join(data_dir, 'test_batch' + fileappend)]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n size = tf.constant([IMAGE_SIZE,IMAGE_SIZE])\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for evaluation.\n # Crop the central [height, width] of the image.\n resized_image = tf.image.resize_images(reshaped_image, size)\n\n # Subtract off the mean and divide by the variance of the pixels.\n #float_image = tf.image.per_image_standardization(resized_image) #NOTE!!!\n #rgb_scaled = resized_image * 255.0\n rgb_scaled = resized_image\n \n # Convert RGB to BGR\n red, green, blue = tf.split(axis=2, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[:] == [224, 224, 1]\n assert green.get_shape().as_list()[:] == [224, 224, 1]\n assert blue.get_shape().as_list()[:] == [224, 224, 1]\n bgr = tf.concat(axis=2, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n \n #clear labels\n if partially_labelled:\n if matrix_lab:\n # print(read_input.key)\n #read_input.label = lbl_lookup[tf.add(read_input.label,tf.constant(1))*read_input.switch]\n #Makes lbl positive when switch=1 and negative when switch=0, negative values are all zeros after one_hot\n subclasslab_enc = tf.one_hot(read_input.subclasslab * (tf.constant(-1)+read_input.switch*tf.constant(2)) - tf.constant(1), classCount*clustCount)\n read_input.label = tf.reshape(subclasslab_enc,(classCount,clustCount))\n else:\n #read_input.label = tf.one_hot(read_input.label * (tf.constant(-1)+read_input.switch*tf.constant(2)), NUM_CLASSES)\n read_input.label.set_shape([1])\n\n # Set the shapes of tensors.\n bgr.set_shape([height, width, 3])\n resized_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n read_input.superLabel.set_shape([1])\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(num_examples_per_epoch *\n min_fraction_of_examples_in_queue)\n\n # Generate a batch of images and labels by building up a queue of examples.\n img,img_raw,lbls, suplbls = _generate_image_and_label_batch(bgr, read_input.label, read_input.superLabel,\n min_queue_examples, batch_size,\n shuffle=False, raw=True, raw_image=reshaped_image)\n return img, img_raw, lbls, suplbls","sub_path":"cifar/cifar10_input_VGG16_PL.py","file_name":"cifar10_input_VGG16_PL.py","file_ext":"py","file_size_in_byte":19817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"151937723","text":"\"\"\"桁DP\nhttp://drken1215.hatenablog.com/entry/2019/02/04/013700\n\nまず各桁は独立に考えて良い\n各桁に1が何個あるのかをすべてのAについて集計すると、\nXに0を選んだ時/1を選んだ時のスコアが出せる\n\nDPでXを先頭から1桁ずつを決めていくが、その時の遷移は、\nこれまでえらんだ桁が\n1. K未満 -> K未満\n2. Kぴったり -> K未満\n3. Kぴったり -> Kぴったり\nの3通りしかない\nK未満 -> Kぴったり の遷移はありえない\nex)\nK=5(0b101)で2桁目から3桁目への遷移\n1. 01 -> 011\n2. 10 -> 100\n3. 10 -> 101\n\ndp[i][j=0,1]\n上からi桁決めて\nj=0: K未満であることが確定\nj=1: 今のところKと一致している\n\n\"\"\"\nMAX_DIGIT = 40\nSMALL = 0\nEQUAL = 1\n\nN, K = map(int, input().split())\nA = list(map(int, input().split()))\n\ndp = [[-1 for _ in range(2)] for _ in range(100)]\ndp[0][EQUAL] = 0\n\nfor d in range(MAX_DIGIT):\n shift = MAX_DIGIT - d - 1\n # A で元々 d 桁目にビットが立っているものの個数\n bit_count = 0\n for a in A:\n if a >> shift & 1:\n bit_count += 1\n\n # X の d 桁目を 0, 1 にしたときのスコア\n cost0 = (1 << shift) * bit_count # 元々の 1 の個数\n cost1 = (1 << shift) * (N - bit_count) # 元々の 0 の個数(反転するから)\n\n # \"K 未満 -> K 未満\" の遷移\n if dp[d][SMALL] != -1:\n # 0 でも 1 でも自在に大きい方\n dp[d+1][SMALL] = max(dp[d+1][SMALL], dp[d][SMALL] + max(cost0, cost1))\n\n # \"K ぴったり -> K 未満\" の遷移\n if dp[d][EQUAL] != -1:\n if K >> shift & 1:\n # K の d 桁目が 1 だったら、X の d 桁目は 0 にする\n dp[d+1][SMALL] = max(dp[d+1][SMALL], dp[d][EQUAL] + cost0)\n\n # \"K ぴったり -> K ぴったり\" の遷移\n if dp[d][EQUAL] != -1:\n if K >> shift & 1:\n dp[d+1][EQUAL] = max(dp[d+1][EQUAL], dp[d][EQUAL] + cost1)\n else:\n dp[d+1][EQUAL] = max(dp[d+1][EQUAL], dp[d][EQUAL] + cost0)\n\n\nans = max(dp[MAX_DIGIT][EQUAL], dp[MAX_DIGIT][SMALL])\nprint(ans)\n","sub_path":"abc/117/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"428172209","text":"import requests\nimport logging\nimport os\n\nbase_url = os.environ.get('URL',\n \"https://internal-apigw.central.arubanetworks.com\")\naccess_token = os.environ.get('TOKEN', None)\n\n\ndef get_thresholds():\n THRESHOLD_URL = '/presence/v2/config/thresholds'\n\n if access_token is None:\n return None\n\n fullurl = base_url + THRESHOLD_URL\n parameters = {\"access_token\": access_token}\n\n try:\n resp = requests.get(fullurl, params=parameters, verify=False)\n except Exception as e:\n logging.exception(\"Error trying to get data from server {}\"\n .format(e))\n return None\n\n logging.info(\"presence status: {}\".format(resp.status_code))\n return resp.json()\n\n\ndef get_presence_analytics():\n AGGR_URL = '/presence/v2/analytics/aggregates'\n\n if access_token is None:\n return None\n\n fullurl = base_url + AGGR_URL\n parameters = {'access_token': access_token,\n 'category': 'all',\n 'start_time': 0,\n 'end_time': 15315525270}\n\n try:\n resp = requests.get(fullurl, params=parameters, verify=False)\n except Exception as e:\n logging.exception(\"Error trying to get data from server:{}\".format(e))\n return None\n\n logging.info(\"presence aggr: {}\".format(resp.status_code))\n return resp.json()\n","sub_path":"data_clients/get_presence_data.py","file_name":"get_presence_data.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"386157344","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymysql\nimport logging\nimport traceback\n\n\nclass HudongbaikePipeline(object):\n\tdef __init__(self):\n\t\tself.conn = pymysql.connect(host = 'localhost',\n\t\t\tport = 3306,\n\t\t\tuser = 'root',\n\t\t\tpassword = '',\n\t\t\tdb = '',\n\t\t\tcharset = 'utf8mb4')\n\t\tself.cursor = self.conn.cursor()\n\n\tdef process_item(self, item, spider):\n\t\tsql = \"\"\"insert into record\n\t\t(url, place, title, prefix)\n\t\tvalues\n\t\t(\"%s\", \"%s\", \"%s\", \"%s\");\n\t\t\"\"\"\n\t\tURL = item['URL']\n\t\tPlace = item['Place']\n\t\tTitle = item['Title']\n\t\tPrefix = item['Prefix']\n\t\t#sql = sql.format(URL, Place, Title, Prefix)\n\t\ttry:\n\t\t\tself.conn.ping(reconnect = True) # 防止连接断开\n\t\t\tself.cursor.execute(sql, [URL, Place, Title, Prefix])\n\t\t\tself.conn.commit()\n\t\texcept:\n\t\t\ttraceback.print_exc()\n\t\treturn item\n\n\tdef close_spider(self, spider):\n\t\tself.conn.close()","sub_path":"hudongBaike/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"646576330","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cryopython as cp\r\nimport matplotlib.cm as cm\r\nfrom scipy import optimize\r\nfrom scipy import optimize\r\nfrom scipy.optimize import curve_fit\r\n\r\n#open image\r\npixel_size = 1.7 #Angstrom\r\n\r\n#do_powerspectrum(image, gamma, p1, p2, gaus_denoise)\r\nimage = cp.do_powerspectrum(cp.open_image('170715_140002.mrc'),1,0,100,1.2)\r\n#image = image[5:,5:]\r\n#print(image.shape)\r\n\r\n# k = np.linspace(1/100,1/(2*pixel_size),image.shape[0])\r\nCs = 2\r\ndefocus = -40000\r\n\r\nht = 200 #kV\r\nA = 0.15\r\nsf = 1 #scaling factor\r\nB = 150\r\n\r\n#calculated constants\r\nwavelength = 12.398 / np.sqrt(ht * (1022 + ht)) #for kV\r\n#defocus_scherzer = -1.2*np.sqrt(Cs*wavelength)\r\n\r\n\r\n#function description with envelope function\r\ndef ctf_function(k,sf,defocus,offset):\r\n B=250\r\n ctf = -2 * np.pi * (Cs * wavelength ** 3 * (k ** 4) / 4 - np.pi * defocus * wavelength * (k ** 2) / 2)\r\n CTF = -sf * ((1 - A ** 2) ** 0.5 * np.sin(ctf) + A * np.cos(ctf))\r\n env = np.exp(-B * k ** 2)\r\n return env*CTF+offset\r\n\r\n#spatial frequency\r\nk = np.linspace(1/100,1/(2*pixel_size),image.shape[0]/2)\r\nX,Y = np.meshgrid(np.linspace(-1/(2*pixel_size),1/(2*pixel_size),image.shape[0]),np.linspace(-1/(2*pixel_size),1/(2*pixel_size),image.shape[0]))\r\n\r\n#create array of radii\r\nx,y = np.meshgrid(np.arange(image.shape[1]),np.arange(image.shape[0]))\r\nprint(x.shape,y.shape)\r\nx0, y0 = xy = np.unravel_index(image.argmax(), image.shape)\r\nprint('Center of the image: '+str(x0)+' '+str(y0))\r\nR = np.sqrt((x-x0)**2+(y-y0)**2)\r\n\r\n#prepare image to fit\r\nspace = 0.5\r\nf = lambda r : np.mean(image[(R >= r-space) & (R < r+space)])\r\nr = np.linspace(0,image.shape[0],num=image.shape[0])\r\nmean = np.vectorize(f)(r)\r\nmean = mean[:int(image.shape[0]/2)]\r\n\r\nwith open('mean.txt', 'a') as fh:\r\n for i in mean:\r\n print(i,file=fh)\r\n\r\nprint('done creating mean')\r\nend = 200\r\n#mean = mean[0:end]\r\nk_red = k\r\n\r\n#fitting #sf,B,defocus,offset\r\np0 = [1,-30000,0.5]\r\nparams, pcov = curve_fit(ctf_function, k_red,mean,p0=p0,maxfev = 2000)\r\nprint(params)\r\n\r\n#changing to radial coordinates\r\nrad = lambda x,y: np.sqrt(x**2+y**2)\r\nimage = ctf_function(rad(X,Y),*params)\r\n\r\n#plot setup\r\nfig, (ax,ax2) = plt.subplots(ncols=2)\r\nax.plot(k_red, ctf_function(k_red,*params))\r\nax.plot(k_red[:300],mean[:300])\r\nax2.imshow(image, extent=[X.min(),X.max(),Y.min(),Y.max()], cmap=plt.cm.gray)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n#proper functions\r\n# ctf = -2*np.pi*(Cs * wavelength**3 * (k**4)/4 - np.pi * defocus * wavelength * (k**2)/2)\r\n# CTF = -sf * ((1-A**2)**0.5*np.sin(ctf) + A*np.cos(ctf))\r\n\r\n#guassian envelope funtion\r\n#env = np.exp(-B*k**2)","sub_path":"CTF_fit.py","file_name":"CTF_fit.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"30587619","text":"from flask_restful import Resource\nfrom flask import Response\nfrom Phone import Control\n\nclass v1_00_Config_Sample_Control(object):\n controller = None\n\n def __init__(self):\n self.controller = Control.global_controller\n\n\n def sample_request(self):\n try:\n self.controller.log('Config sample request received.')\n\n success = 'success'\n status = '200'\n message = 'Sample'\n data = {\"config-sample\":\"config-value\"}\n\n except Exception as e:\n success = 'error'\n status = '500'\n message = 'An error occurred.'\n error_text = 'v1_00_Config_Sample_Control.sample_request'+\\\n ': Exception {0}'.format(repr(e))\n data = {\"exception\":error_text}\n print(error_text)\n\n return_value = self.controller.do_response(message=message,\n data=data,\n status=status,\n response=success)\n\n self.controller.log('Sample request returned {0}'.format(data))\n\n return return_value\n\n\n","sub_path":"v4_00/old/OldPhone/Phone_Config_Control/v1_00_Config_Sample_Control.py","file_name":"v1_00_Config_Sample_Control.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"449134123","text":"import os\n\nimport numpy\nimport paddle.inference as paddle_infer\nimport cv2\n\n\ndef predict(image):\n # 创建 config,并设置预测模型路径\n config = paddle_infer.Config(\"static/model/faster_rcnn_r50_vd_fpn_ssld_2x_coco/model.pdmodel\",\"static/model/faster_rcnn_r50_vd_fpn_ssld_2x_coco/model.pdiparams\")\n\n # 根据 config 创建 predictor\n predictor = paddle_infer.create_predictor(config)\n # 获取输入 Tensor\n input_names = predictor.get_input_names()\n input_tensor = predictor.get_input_handle(input_names[0])\n\n # 从 CPU 获取数据,设置到 Tensor 内部\n input_tensor.copy_from_cpu(image)\n\n # 执行预测\n predictor.run()\n\n # 获取输出 Tensor\n output_names = predictor.get_output_names()\n output_tensor = predictor.get_output_handle(output_names[0])\n output_data = output_tensor.copy_to_cpu() # numpy.ndarray类型\n\n # 释放中间Tensor\n predictor.clear_intermediate_tensor()\n\n # 释放内存池中的所有临时 Tensor\n predictor.try_shrink_memory()\n\n\nif __name__ == '__main__':\n model_cfg = \"./model/faster_rcnn_r50_vd_fpn_ssld_2x_coco\"\n image = \"./static/img/people/00002.jpg\"\n use_gpu = False\n os.system(\"python infer.py --model_dir={} --image_file={} --use_gpu={} --threshold=0\".format(model_cfg, image, use_gpu))\n","sub_path":"flaskr/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"448849648","text":"\"\"\"\nauthor: audreyc\n\"\"\"\n\nimport os\nimport pickle\nimport time_extractor\nfrom collections import Counter\nimport time\nimport re\n\ncompany_dir = 'sampledata2/'\npause_points = False\n\n\ndef cross_check(ct, history_counter, allowed_list, max_pts=5):\n # print(history_counter.most_common(1)[0])\n if history_counter.most_common(1) and len(history_counter.most_common(1)[0][0]) > 5:\n ordered_names_all = [x[0].split('|')[0] for x in history_counter.most_common()]\n else:\n ordered_names_all = [x[0] for x in history_counter.most_common()]\n ordered_names = [y for y in ordered_names_all if y in allowed_list.keys()]\n if max_pts >= 10:\n multiplier = 2\n max_pts = int(max_pts / 2)\n else:\n multiplier = 1\n if pause_points:\n print(ordered_names)\n for e_num, et_key in enumerate(ordered_names):\n if e_num >= max_pts:\n break\n ct[et_key] += (max_pts - e_num) * multiplier\n # for a_num, a_key in enumerate(ordered_names_all):\n # if a_num >= max_pts:\n # continue\n # ct[a_key] += max_pts - a_num\n return ct\n\n\ndef remap():\n test_info = pickle.load(open(company_dir + \"sampled_info_test.pkl\", \"rb\"))\n user_hist = pickle.load(open('new_ds_model/all_userhist.pkl', 'rb'))\n comp_hist = pickle.load(open('new_ds_model/all_companyhist.pkl', 'rb'))\n all_entity_hist = pickle.load(open('new_ds_model/allexpense_entityhistory.pkl', 'rb'))\n vend_hist = pickle.load(open('new_ds_model/all_vendorhist.pkl', 'rb'))\n uvend_hist = pickle.load(open('new_ds_model/all_uservendorhist.pkl', 'rb'))\n amt_hist = pickle.load(open('inputdata/all_amounthist.pkl', 'rb'))\n allowed_types = pickle.load(open(company_dir + 'sampled_allowedtypes.pkl', 'rb'))\n real_answer = pickle.load(open(company_dir + 'sampled_Y_testET.pkl', 'rb'))\n x_test = pickle.load(open(company_dir + \"sampled_X_test.pkl\", \"rb\"))\n\n total_counter = 0\n correct = 0\n real_key_na = 0\n user_history_exists = 0\n in_user_history = 0\n entity_history_exists = 0\n in_entity_history = 0\n vendor_history_exists = 0\n in_vendor_history = 0\n uvendor_history_exists = 0\n in_uvendor_history = 0\n company_skipped = 0\n undef = 0\n for pred_index, ocr_text in enumerate(x_test):\n # if pause_points and pred != 'AIRFR':\n # continue\n company = test_info[pred_index]['entity']\n userid = company + '-' + (test_info[pred_index]['userid'])\n vendor_raw = test_info[pred_index]['vendor']\n if vendor_raw:\n vendor_txt = re.sub(\"([^\\w]|[ 0-9_])\", '', vendor_raw.lower())\n # vendor = str(userid) + '-' + vendor_txt\n vendor = company + '-' + vendor_txt\n uvendor = userid + '-' + vendor_txt\n else:\n vendor = ''\n uvendor = ''\n amount = test_info[pred_index]['amount']\n real_key = real_answer[pred_index]\n\n if company not in allowed_types.keys():\n company_skipped += 1\n continue\n else:\n allowed_list = allowed_types[company]\n if real_key not in allowed_list.keys():\n real_key_na += 1\n if pause_points:\n print(\"real key not in allowed list\")\n print(real_key, allowed_list)\n continue\n\n et_guess = Counter()\n for et_key in allowed_list.keys():\n et_guess[et_key] += 1\n\n # This is the section that deals with time in Meals.\n # te = time_extractor.TimeExtractor()\n # minutes_from_midnight = te.extract_time(ocr_text)\n # if minutes_from_midnight['time'] and minutes_from_midnight['time'][0]['value'] != -1:\n # mfm = minutes_from_midnight['time'][0]['value']\n # if 240 < mfm < 630: # 4:00am - 10:30am\n # searchstr = ['brk', 'bfast', 'break']\n # elif 630 < mfm < 840: # 2:00pm\n # searchstr = ['lun']\n # elif mfm > 1020: # 5:00pm\n # searchstr = ['din']\n # else:\n # searchstr = []\n # if pause_points:\n # print(\"Hours from Midnight: \", str(float(mfm) / 60), searchstr)\n # for k, v in allowed_list.items():\n # for s in searchstr:\n # if s in v.lower():\n # et_guess[k] += 8\n\n # This is the section where you give points (or subtract points) based on amount\n if amount and company in amt_hist.keys():\n amount = float(amount)\n if pause_points:\n print(\"Amount: \", str(amount), end=' ')\n if amount == 0:\n et_guess = cross_check(et_guess, amt_hist[company]['Zero'], allowed_list, 5)\n elif amount < 10:\n et_guess = cross_check(et_guess, amt_hist[company]['Under Ten'], allowed_list, 10)\n elif amount < 50:\n et_guess = cross_check(et_guess, amt_hist[company]['Under Fifty'], allowed_list, 10)\n elif amount < 100:\n et_guess = cross_check(et_guess, amt_hist[company]['Under Hundred'], allowed_list, 10)\n elif amount < 1000:\n et_guess = cross_check(et_guess, amt_hist[company]['Under Thousand'], allowed_list, 12)\n else:\n et_guess = cross_check(et_guess, amt_hist[company]['Other'], allowed_list, 14)\n\n total_counter += 1\n if vendor and vendor in vend_hist.keys():\n vendor_history_exists += 1\n if pause_points:\n print(\"Vendor Hist: \", vendor, end=' ')\n et_guess = cross_check(et_guess, vend_hist[vendor], allowed_list, 7)\n if real_key in vend_hist[vendor]:\n in_vendor_history += 1\n if uvendor and uvendor in uvend_hist.keys():\n uvendor_history_exists += 1\n if pause_points:\n print(\"User Vendor History: \", uvendor, end=' ')\n et_guess = cross_check(et_guess, uvend_hist[uvendor], allowed_list, 7)\n if real_key in uvend_hist[uvendor]:\n in_uvendor_history += 1\n if userid in user_hist.keys():\n user_history_exists += 1\n if pause_points:\n print(\"User Hist: \", userid, end=' ')\n et_guess = cross_check(et_guess, user_hist[userid], allowed_list, 6)\n if real_key in user_hist[userid]:\n in_user_history += 1\n if company in comp_hist.keys():\n entity_history_exists += 1\n if pause_points:\n print(\"Entity Hist: \", company, end=' ')\n et_guess = cross_check(et_guess, comp_hist[company], allowed_list, 5)\n if real_key in comp_hist[company]:\n in_entity_history += 1\n if not len(et_guess):\n if company in all_entity_hist.keys():\n et_guess = cross_check(et_guess, all_entity_hist[company], allowed_list, 5)\n travel_words = ['trav', 't&', '&e', 'trans']\n for expkey, expname in allowed_list.items():\n if any([t in expname.lower() for t in travel_words]):\n et_guess[expkey] += 1\n if not len(et_guess):\n undef += 1\n et_guess['UNDEF'] += 1\n print('allowed list: ', allowed_list)\n print(x_test[pred_index])\n print('real key: ', real_key)\n print('----------------------------')\n\n if pause_points:\n print(\"Final Guess rankings: \", et_guess)\n if et_guess.most_common(1)[0][0] == real_key:\n correct += 1\n\n if pause_points:\n print('Real ET:', real_key, allowed_list[real_key])\n total_counts = sum(et_guess.values())\n output = []\n for k, v in et_guess.most_common(5):\n output.append({'Expense Type': k, 'Score': float(v) / total_counts})\n print(total_counts, output)\n input('pause')\n\n print(\"ouput_name skipped: \", company_skipped)\n print(\"Real key not in allowed list: \", real_key_na, \"(probably the entity changed their allowed types recently)\")\n print(\"Total counter: \", total_counter)\n print(\"CORRECT ET GUESS: \", correct)\n print(\"# correct key somewhere in user hist / user history exists:\", in_user_history, user_history_exists)\n print(\"# correct key somewhere in entity hist / entity history exists:\", in_entity_history, entity_history_exists)\n print(\"# correct key somewhere in vendor hist / vendor history exists:\", in_vendor_history, vendor_history_exists)\n print(\"# correct key somewhere in user-vendor hist / user-vendor hsitory exists:\", in_uvendor_history, uvendor_history_exists)\n print(\"# undefs:\", undef)\n\nif __name__ == '__main__':\n remap()\n\n","sub_path":"remapper_noDS.py","file_name":"remapper_noDS.py","file_ext":"py","file_size_in_byte":8780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"458776510","text":"import random\nfrom ByrdMichaelbst import *\n\n\n\nfor n in range(1000, 100001, 1000):\n heightList = []\n for j in range(15):\n mytree = BinarySearchTree()\n # print(\"J\", j)\n for i in range(n):\n rand = random.randint(0, 2*n)\n while mytree.__contains__(rand):\n rand = random.randint(0, 2 * n)\n mytree[rand] = str(rand)\n heightList.append(mytree.heightTree())\n avgHeight = sum(heightList) / len(heightList)\n print(mytree.length(), \"\\t\", avgHeight)","sub_path":"Matrix-Algebra-master/ByrdMichael_BST_Testing.py","file_name":"ByrdMichael_BST_Testing.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"498557823","text":"\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import xavier_initializer\n\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.models.tf_model import TFModel\n\n\n@register('go_bot_rnn')\nclass GoalOrientedBotNetwork(TFModel):\n\n def __init__(self, **params):\n self.opt = params\n\n save_path = self.opt.get('save_path')\n load_path = self.opt.get('load_path', None)\n train_now = self.opt.get('train_now', False)\n\n super().__init__(save_path=save_path,\n load_path=load_path,\n train_now=train_now,\n mode=self.opt['mode'])\n\n # initialize parameters\n self._init_params()\n # build computational graph\n self._build_graph()\n # initialize session\n self.sess = tf.Session()\n\n if self.get_checkpoint_state():\n #TODO: save/load params to json, here check compatability\n print(\"\\n:: initializing `{}` from saved\".format(self.__class__.__name__))\n self.load()\n else:\n print(\"\\n:: initializing `{}` from scratch\\n\".format(self.__class__.__name__))\n self.sess.run(tf.global_variables_initializer())\n\n self.reset_state()\n\n def run_sess(self):\n pass\n\n def _init_params(self, params=None):\n params = params or self.opt\n self.learning_rate = params['learning_rate']\n self.n_hidden = params['hidden_dim']\n self.n_actions = params['action_size']\n self.obs_size = params['obs_size']\n\n def _build_graph(self):\n\n self._add_placeholders()\n\n # build body\n _logits = self._build_body()\n\n # loss, train and predict operations\n self._prediction = tf.argmax(self._probs, axis=0, name='prediction')\n self._loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=_logits, labels=self._action, name='loss'\n )\n self._step = tf.Variable(0, trainable=False, name='global_step')\n self._train_op = tf.train.AdadeltaOptimizer(self.learning_rate)\\\n .minimize(self._loss, global_step=self._step, name='train_op')\n\n def _add_placeholders(self):\n self._features = tf.placeholder(tf.float32, [1, self.obs_size],\n name='features')\n self._state_c = tf.placeholder(tf.float32, [1, self.n_hidden],\n name='state_c')\n self._state_h = tf.placeholder(tf.float32, [1, self.n_hidden],\n name='state_h')\n self._action = tf.placeholder(tf.int32,\n name='ground_truth_action')\n self._action_mask = tf.placeholder(tf.float32, [self.n_actions],\n name='action_mask')\n\n def _build_body(self):\n # input projection\n _Wi = tf.get_variable('Wi', [self.obs_size, self.n_hidden],\n initializer=xavier_initializer())\n _bi = tf.get_variable('bi', [self.n_hidden],\n initializer=tf.constant_initializer(0.))\n\n # add relu/tanh here if necessary\n _projected_features = tf.matmul(self._features, _Wi) + _bi\n\n _lstm_f = tf.contrib.rnn.LSTMCell(self.n_hidden, state_is_tuple=True)\n _lstm_op, self._next_state = _lstm_f(inputs=_projected_features,\n state=(self._state_c,\n self._state_h))\n\n # reshape LSTM's state tuple (2,n_hidden) -> (1,n_hidden*2)\n _state_reshaped = tf.concat(axis=1,\n values=(self._next_state.c,\n self._next_state.h))\n\n # output projection\n _Wo = tf.get_variable('Wo', [self.n_hidden*2, self.n_actions],\n initializer=xavier_initializer())\n _bo = tf.get_variable('bo', [self.n_actions],\n initializer=tf.constant_initializer(0.))\n # get logits\n _logits = tf.matmul(_state_reshaped, _Wo) + _bo\n # probabilities normalization : elemwise multiply with action mask\n self._probs = tf.multiply(tf.squeeze(tf.nn.softmax(_logits)),\n self._action_mask,\n name='probs')\n return _logits\n\n def reset_state(self):\n # set zero state\n self.state_c = np.zeros([1, self.n_hidden], dtype=np.float32)\n self.state_h = np.zeros([1, self.n_hidden], dtype=np.float32)\n\n def _train_step(self, features, action, action_mask):\n _, loss_value, self.state_c, self.state_h, prediction = \\\n self.sess.run(\n [\n self._train_op, self._loss, self._next_state.c,\n self._next_state.h, self._prediction\n ],\n feed_dict={\n self._features: features.reshape([1, self.obs_size]),\n self._action: [action],\n self._state_c: self.state_c,\n self._state_h: self.state_h,\n self._action_mask: action_mask\n }\n )\n return loss_value[0], prediction\n\n def _forward(self, features, action_mask):\n probs, prediction, self.state_c, self.state_h = \\\n self.sess.run(\n [\n self._probs, self._prediction, self._next_state.c,\n self._next_state.h\n ],\n feed_dict={\n self._features: features.reshape([1, self.obs_size]),\n self._state_c: self.state_c,\n self._state_h: self.state_h,\n self._action_mask: action_mask\n }\n )\n return probs, prediction\n\n def shutdown(self):\n self.sess.close()\n","sub_path":"deeppavlov/skills/go_bot/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"96100349","text":"# import os\r\n# import csv\r\n# import numpy as np\r\nimport tensorflow as tf\r\nfrom datetime import datetime\r\n# from datagenerator import ImageDataGenerator\r\nfrom datagenerator_tfrecord import ImageDataGenerator\r\nfrom resnet_model import *\r\nIterator = tf.data.Iterator\r\n\r\n\r\ndef optimistic_restore(session, save_file):\r\n '''\r\n restore weights from checkpoint as many as possible\r\n '''\r\n reader = tf.train.NewCheckpointReader(save_file)\r\n saved_shapes = reader.get_variable_to_shape_map()\r\n var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables()\r\n if var.name.split(':')[0] in saved_shapes])\r\n restore_vars = []\r\n with tf.variable_scope('', reuse=True):\r\n for var_name, saved_var_name in var_names:\r\n curr_var = tf.get_variable(saved_var_name)\r\n var_shape = curr_var.get_shape().as_list()\r\n if var_shape == saved_shapes[saved_var_name]:\r\n restore_vars.append(curr_var)\r\n # print(restore_vars)\r\n saver = tf.train.Saver(restore_vars)\r\n saver.restore(session, save_file)\r\n\r\n\r\n# Paths\r\n# path = '/media/aldin/2EA2E320A2E2EAF3/ML/CDiscount Image Classification/'\r\n\r\nfilewriter_path = 'summary/'\r\ncheckpoint_path = 'checkpoints/'\r\n# example_path = path + 'example/'\r\nexample_path = '/home/aldin/Desktop/output_file.tfrecords'\r\n\r\n# learning_rate = 0.01\r\nnum_epochs = 20\r\nbatch_size = 128\r\npercent_for_train = 0.98\r\n_WEIGHT_DECAY = 1e-4\r\n_MOMENTUM = 0.9\r\n\r\n\r\nnum_classes = 5270\r\n# Place data loading and preprocessing on the cpu\r\nwith tf.device('/cpu:0'):\r\n tr_data = ImageDataGenerator(example_path,\r\n mode='training',\r\n batch_size=batch_size,\r\n num_classes=num_classes,\r\n shuffle=True)\r\n val_data = ImageDataGenerator(example_path,\r\n mode='validation',\r\n batch_size=batch_size,\r\n num_classes=num_classes,\r\n shuffle=False)\r\n # create an reinitializable iterator given the dataset structure\r\n iterator = Iterator.from_structure(tr_data.data.output_types,\r\n tr_data.data.output_shapes)\r\n x, y = iterator.get_next()\r\n\r\n\r\n# Ops for initializing the two different iterators\r\ntraining_init_op = iterator.make_initializer(tr_data.data)\r\nvalidation_init_op = iterator.make_initializer(val_data.data)\r\n\r\n# Get the number of training/validation steps per epoch\r\n# train_batches_per_epoch = int(np.floor(tr_data.data_size / batch_size))\r\n# val_batches_per_epoch = int(np.floor(val_data.data_size / batch_size))\r\ntrain_batches_per_epoch = 189435 // 2\r\n\r\n# Op for training\r\n# Link variable to model output\r\nlogits = inference(x, True, num_classes=num_classes)\r\n\r\n# Added 2017/12/15 confirm prob\r\nop_probs = tf.nn.softmax(logits)\r\n\r\n# Op for calculating the loss\r\nwith tf.name_scope(\"cross_ent\"):\r\n loss = loss(logits, y)\r\n\r\n# Train op\r\nwith tf.name_scope(\"train\"):\r\n # Scale the learning rate linearly with the batch size. When the batch size\r\n # is 256, the learning rate should be 0.1.\r\n initial_learning_rate = 0.001 * batch_size / 256\r\n global_step = tf.train.get_or_create_global_step()\r\n\r\n # Multiply the learning rate by 0.1 at 30, 60, 80, and 90 epochs.\r\n boundaries = [\r\n int(train_batches_per_epoch * epoch) for epoch in [5, 6, 7, 60]]\r\n values = [\r\n initial_learning_rate * decay for decay in [1, 0.1, 0.01, 1e-3, 1e-4]]\r\n learning_rate = tf.train.piecewise_constant(\r\n tf.cast(global_step, tf.int32), boundaries, values)\r\n\r\n # Create optimizer and apply gradient descent to the trainable variables\r\n # optimizer = tf.train.AdamOptimizer(learning_rate)\r\n optimizer = tf.train.MomentumOptimizer(\r\n learning_rate=learning_rate,\r\n momentum=_MOMENTUM)\r\n grads = optimizer.compute_gradients(loss)\r\n train_op = optimizer.apply_gradients(grads_and_vars=grads)\r\n\r\n # Set variables to train\r\n # train_op = optimizer.minimize(loss, var_list=tf.get_collection(\r\n # tf.GraphKeys.TRAINABLE_VARIABLES, \"fc\"))\r\n # train_op = optimizer.minimize(\r\n # loss, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\r\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(update_ops):\r\n train_op = optimizer.minimize(\r\n loss, global_step, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\r\n\r\n# Evaluation op: Accuracy of the model\r\nwith tf.name_scope(\"accuracy\"):\r\n correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n\r\n# Add summary\r\ntf.summary.scalar('loss', loss)\r\ntf.summary.scalar('accuracy', accuracy)\r\nmerged_summary_op = tf.summary.merge_all()\r\n# Initialize the FileWriter\r\nwriter = tf.summary.FileWriter(filewriter_path)\r\n\r\n\r\n# Initialize an saver for store model checkpoints\r\nsaver = tf.train.Saver()\r\n\r\nwith tf.Session() as sess:\r\n # Initialize all variables\r\n sess.run(tf.global_variables_initializer())\r\n print('Initializing finished')\r\n\r\n # Add the model graph to TensorBoard\r\n writer.add_graph(sess.graph)\r\n\r\n # Load the pretrained weights into the non-trainable layer\r\n # When execute first time\r\n optimistic_restore(sess, checkpoint_path + 'ResNet-L50.ckpt')\r\n # Else\r\n # latest = tf.train.latest_checkpoint(checkpoint_path)\r\n if latest is not None:\r\n print(\"resume\", latest)\r\n saver.restore(sess, latest)\r\n\r\n print(\"{} Start training...\".format(datetime.now()))\r\n print('train_batches_per_epoch:', train_batches_per_epoch)\r\n for epoch in range(10, num_epochs):\r\n # Initialize training iterator each epoch\r\n sess.run(training_init_op)\r\n\r\n for step in range(train_batches_per_epoch):\r\n # Construct a op list\r\n sess.run(train_op)\r\n probs = sess.run(op_probs)\r\n print(np.max(probs, axis=-1))\r\n\r\n if step % 100 == 0:\r\n # Create summary every step_size steps\r\n o = sess.run([loss, accuracy])\r\n writer.add_summary(\r\n sess.run(merged_summary_op), epoch * train_batches_per_epoch + step)\r\n format_str = ('step %d, loss = %.2f, accuracy = %.2f')\r\n print(datetime.now(), format_str % (step, o[0], o[1]))\r\n\r\n # if step % 1000 == 1:\r\n # print(\"{} Saving checkpoint of model...\".format(datetime.now()))\r\n # # save checkpoint of the model\r\n # checkpoint_name = checkpoint_path + \\\r\n # 'model_epoch' + str(epoch) + '.ckpt'\r\n # save_path = saver.save(sess, checkpoint_name)\r\n # print(\"{} Model checkpoint saved at {}\".format(datetime.now(),\r\n # checkpoint_name))\r\n\r\n # Validate the model on the entire validation set\r\n # print(\"{} Start validation\".format(datetime.now()))\r\n # sess.run(validation_init_op)\r\n # test_acc = 0.\r\n # test_count = 0\r\n # for _ in range(val_batches_per_epoch):\r\n # acc = sess.run(accuracy)\r\n # test_acc += acc\r\n # test_count += 1\r\n # test_acc /= test_count\r\n # print(\"{} Validation Accuracy = {:.4f}\".format(datetime.now(),\r\n # test_acc))\r\n","sub_path":"fine_tune_resnet.py","file_name":"fine_tune_resnet.py","file_ext":"py","file_size_in_byte":7589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"511909149","text":"#Write to a file\n\n#texts ='1. New healine to write.\\n 2. Second headline for testing.\\n 3. Third headline for checking inline text '\nwith open('test.txt', 'r') as fr:\n with open('test2.txt', 'w') as fw:\n for line in fr:\n fw.write(line)\n \n \n \n","sub_path":"writeText.py","file_name":"writeText.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"564384178","text":"\n#hello openCV\n\nimport cv2\n\nimg = cv2.imread('1/lena512color.tiff') #opencv路径不能是中文路径\ncv2.imshow('sourceImg', img)\n#对原始图像进行高斯平滑处理(高斯核选取的是(5,5),也就是5*5大小的卷积模版),并且得到img2\nimg2 = cv2.GaussianBlur(img,(5,5),0)\ncv2.imshow('GaussianBlur1',img2)\n\nimg3 = cv2.GaussianBlur(img,(15,15),0)\n#对原始图像进行高斯平滑处理(高斯核选取的是(15,15)),并且得到img3\ncv2.imshow('GaussianBlur2',img3)\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n# 高斯平滑:\n# python dst=cv.GaussianBlur(src,ksize,sigmaX[,dst[,sigmaY[,borderType]]])\n# 参数:\n# src 原始图像 \n# ksize 高斯核大小,ksize.width和ksize.height可以不同,但是都必须为正的奇数(或者为0,此时它们的值会自动由sigma进行计算))\n# sigmaX 高斯核在x方向上的标准差\n# dst 目标图像\n# sigmaY 高斯核在y上的标准差\n# borderType 像素外插策略","sub_path":"1/hello_openCV_GuassianBlur.py","file_name":"hello_openCV_GuassianBlur.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"273611971","text":"import vlc\nimport time\nimport os\n\nfrom apscheduler.job import Job\n\nimport globals\n\n\n# vlc libvlc.dll fix: https://stackoverflow.com/a/58851931/10375372\n\n\nclass JobLocalAudio:\n @staticmethod\n def run(task_id, d_args):\n \"\"\"play local audio file\"\"\"\n\n file = d_args.get(\"file\", None)\n try:\n if not os.path.isfile(file):\n raise FileNotFoundError\n\n # play mp3\n player = vlc.MediaPlayer(file)\n\n # add player to global cache -> needed to stop later\n globals.global_current_task[task_id] = {\"task_id\": task_id, \"method\": JobLocalAudio.stop, \"player\": player}\n player.play()\n except:\n raise Exception(\"something went wrong while playing local music..\")\n\n @staticmethod\n def stop(job_id):\n player = globals.global_current_task.get(job_id).get(\"player\")\n if player:\n player.stop()\n return \"player stopped\"\n else:\n raise Exception(\"no valid player found\")\n","sub_path":"jobs/job_local_audio.py","file_name":"job_local_audio.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"511551676","text":"import os\r\nfrom discord.ext import commands\r\nimport asyncpg\r\n\r\nfrom urllib.parse import urlparse\r\nimport requests_async as requests\r\nfrom bs4 import BeautifulSoup\r\nimport json\r\nfrom base64 import b64encode\r\n\r\nfrom random import randint\r\n\r\nbot = commands.Bot(command_prefix=\"!\")\r\nTOKEN = os.getenv(\"DISCORD_TOKEN\")\r\n\r\nDATABASE_URL = os.environ['DATABASE_URL']\r\nROWS_PER_PAGE = 10\r\n\r\nSPOTIFY_CLIENT_ID = os.environ['SPOTIFY_CLIENT_ID']\r\nSPOTIFY_CLIENT_SECRET = os.environ['SPOTIFY_CLIENT_SECRET']\r\n\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(f\"Logged in as {bot.user.name}({bot.user.id})\")\r\n\r\n\r\n@bot.command()\r\nasync def ping(ctx):\r\n await ctx.send(\"pong\")\r\n\r\n\r\nasync def get_or_create_user(ctx, conn, discord_id):\r\n username = await conn.fetchval(f\"SELECT id FROM users WHERE discord_id = {discord_id}\")\r\n if not username is None:\r\n return username\r\n msg = await ctx.send(\"You do not have an account. Create an account?\")\r\n await msg.add_reaction('✅')\r\n await msg.add_reaction('❌')\r\n\r\n def check(reaction, user):\r\n return reaction.message.id == msg.id and user == ctx.message.author and str(reaction.emoji) in ['✅', '❌', ]\r\n\r\n try:\r\n reaction, _user = await bot.wait_for('reaction_add', timeout=60.0, check=check)\r\n except:\r\n # timeout\r\n await msg.delete()\r\n return\r\n if str(reaction.emoji) == '❌':\r\n await msg.delete()\r\n return\r\n await ctx.send(\"Enter your username\")\r\n\r\n def check_info(m):\r\n return m.author == ctx.message.author and m.content[0] != '!'\r\n\r\n username = await bot.wait_for('message', check=check_info)\r\n await conn.execute(f\"INSERT INTO users(id, discord_id) VALUES ({username.content}, {discord_id})\")\r\n await ctx.send(f\"Registered new user {username.content}\")\r\n return username.content\r\n\r\n\r\n@bot.command()\r\nasync def stats(ctx):\r\n db_conn = await asyncpg.connect(DATABASE_URL)\r\n user = ctx.message.author\r\n user_row = await db_conn.fetchrow(f\"SELECT * FROM users WHERE discord_id = {user.id}\")\r\n if user_row is None:\r\n await ctx.send(f\"user with discord id {user.id} does not exist\")\r\n else:\r\n stats_string = \"Here is your data:\\n\" + f\"username: {user_row[0]}\\n\"\r\n if user_row[2]:\r\n stats_string += \":star: is admin\\n\"\r\n await ctx.send(stats_string)\r\n\r\n\r\n@bot.command()\r\nasync def echo(ctx, *args):\r\n await ctx.send(args)\r\n\r\n\r\nasync def get_new_id(conn, table):\r\n while True:\r\n new_id = randint(1, (2 ** 31) - 1)\r\n existing = await conn.fetchval(\r\n f\"SELECT id FROM {table} WHERE id = {new_id}\")\r\n if existing is None:\r\n return new_id\r\n\r\n\r\n@bot.command()\r\nasync def test_wait(ctx, *args):\r\n state = randint(0, 2 ** 32)\r\n await ctx.send(f\"{state:x} {args}\")\r\n\r\n def check(m):\r\n return m.author == ctx.message.author and m.content[0] != '!'\r\n\r\n msg = await bot.wait_for('message', check=check)\r\n await ctx.send(f\"{state:x} {msg}\")\r\n\r\n\r\n@bot.command()\r\nasync def emoji(ctx):\r\n msg = await ctx.send(\"react to this message\")\r\n\r\n def check(reaction, user):\r\n return reaction.message.id == msg.id and user == ctx.message.author\r\n\r\n try:\r\n reaction, _user = await bot.wait_for('reaction_add', timeout=60.0, check=check)\r\n except:\r\n # timeout\r\n await msg.delete()\r\n else:\r\n await msg.delete()\r\n await ctx.send(\"`\" + str(reaction.emoji) + \"`\\n\" + f\"{reaction.emoji.encode('ascii', 'namereplace')}\")\r\n\r\n# =========================\r\n# song\r\n# =========================\r\n\r\n\r\n@bot.command(aliases=['addsong'])\r\nasync def add_song(ctx, link):\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n song_id = await conn.fetchval(f\"SELECT song_id FROM links WHERE link = '{link}'\")\r\n if not song_id is None:\r\n await ctx.send(f\"entry already exists:\")\r\n await view_song(ctx, song_id)\r\n return\r\n\r\n parsed_url = urlparse(link)\r\n # try to fix if user forgot 'https://'\r\n if parsed_url.scheme == '':\r\n link = 'https://' + link\r\n parsed_url = urlparse(link)\r\n # detect link type\r\n if parsed_url.netloc in ['www.youtube.com', 'youtube.com', 'youtu.be', 'm.youtube.com']:\r\n info = await add_youtube(ctx, link)\r\n if info is None:\r\n return\r\n (song_name, artist_name) = info\r\n link_type = 'youtube'\r\n elif parsed_url.netloc in ['open.spotify.com']:\r\n path = parsed_url.path.split('/')\r\n if path[0] != '':\r\n await ctx.send(\"wtf?\")\r\n return\r\n if path[1] == 'album':\r\n await ctx.send(\"Tou are trying to add an entire spotify album.\\n\" + \"This action is not supported yet, please add songs one by one.\")\r\n return\r\n elif path[1] == 'track':\r\n if len(path) > 3:\r\n await ctx.send(\"why is there some data after track id?\\n\" + \"ping my developer please\")\r\n return\r\n new_link = parsed_url._replace(\r\n netloc='api.spotify.com', path='/v1/tracks/' + path[2]).geturl()\r\n info = await add_spotify(ctx, new_link)\r\n if info is None:\r\n return\r\n (song_name, artist_name) = info\r\n link_type = 'spotify'\r\n else:\r\n await ctx.send(\"I don't recognize this kind of spotify link... :/\" + \"ping my dev please\")\r\n return\r\n else:\r\n await ctx.send(\r\n \"I don't recognize this link, can't parse it dude :/\")\r\n return\r\n\r\n # ask user to fix the song_name and artist_name if needed\r\n wait_change_msg = await ctx.send(\"adding song...\\n\" + f\"name: `{song_name}`\\n\" + f\"artist: `{artist_name}`\\n\" + \"confirm song info ✅, edit song info ❔, or cancel ❌\")\r\n await wait_change_msg.add_reaction('✅')\r\n await wait_change_msg.add_reaction('❔')\r\n await wait_change_msg.add_reaction('❌')\r\n\r\n def check(reaction, user):\r\n return reaction.message.id == wait_change_msg.id and user == ctx.message.author and str(reaction.emoji) in ['✅', '❔', '❌', ]\r\n\r\n try:\r\n reaction, _user = await bot.wait_for('reaction_add', timeout=60.0, check=check)\r\n except:\r\n # timeout\r\n await wait_change_msg.delete()\r\n return\r\n if str(reaction.emoji) == '✅':\r\n pass\r\n elif str(reaction.emoji) == '❔':\r\n # edit\r\n def check_info(m):\r\n return m.author == ctx.message.author and m.content[0] != '!'\r\n await ctx.send('Please enter the correct song name:')\r\n song_name_msg = await bot.wait_for('message', check=check_info)\r\n song_name = song_name_msg.content\r\n await ctx.send('Please enter the correct artist name:')\r\n artist_name_msg = await bot.wait_for('message', check=check_info)\r\n artist_name = artist_name_msg.content\r\n elif str(reaction.emoji) == '❌':\r\n await wait_change_msg.delete()\r\n return\r\n\r\n matching_song = await conn.fetchrow(\r\n f\"SELECT id, name FROM songs WHERE name = ($1)\", song_name)\r\n if matching_song is None:\r\n # add new song\r\n # ...but first find or create new artist\r\n matching_artist = await conn.fetchrow(\r\n f\"SELECT id, name FROM artists WHERE name = ($1)\", artist_name)\r\n if matching_artist is None:\r\n artist_id = await get_new_id(conn, 'artists')\r\n await conn.execute(\r\n f\"INSERT INTO artists VALUES ({artist_id}, '{artist_name}', NULL)\")\r\n await ctx.send(f\"created new artist entry `{artist_id:x}`\")\r\n else:\r\n artist_id = matching_artist[0]\r\n await ctx.send(f\"found existing artist entry `{artist_id:x}`\")\r\n\r\n # now actually add new song\r\n new_id = await get_new_id(conn, 'songs')\r\n await conn.execute(\r\n f\"INSERT INTO songs(id, name, artist_ids) VALUES (($1), ($2), ($3))\", new_id, song_name, [artist_id])\r\n msg = await ctx.send(f\"created new song entry `{new_id:x}`\")\r\n song_id = new_id\r\n await msg.add_reaction('🔢')\r\n else:\r\n song_id = matching_song['id']\r\n msg = await ctx.send(f\"found song with same title: `{song_id:x}`, linking to that song\")\r\n await msg.add_reaction('🔢')\r\n\r\n # TODO: move this somewhere else, this is a bit scuffed\r\n await conn.execute(f\"INSERT INTO links VALUES (($1), ($2), ($3))\", link, link_type, song_id)\r\n\r\n def check2(reaction, user):\r\n return reaction.message.id == msg.id and user == ctx.message.author and str(reaction.emoji) == '🔢'\r\n\r\n try:\r\n reaction, _user = await bot.wait_for('reaction_add', timeout=60.0, check=check2)\r\n except:\r\n await msg.remove_reaction('🔢', msg.author)\r\n return\r\n await rate_react(ctx, song_id)\r\n\r\n\r\nasync def add_youtube(ctx, link):\r\n print('adding youtube link', link)\r\n try:\r\n r = await requests.get(link)\r\n print(r.status_code)\r\n if r is None:\r\n await ctx.send(\"couldn't reach your link\")\r\n soup = BeautifulSoup(r.text, 'html.parser')\r\n song_name = soup.find('meta', {'property': 'og:title'})['content']\r\n author_span = soup.find('span', {'itemprop': 'author'})\r\n author_name = author_span.find('link', {'itemprop': 'name'})['content']\r\n return (song_name, author_name)\r\n except Exception as e:\r\n await ctx.send(\"failed to parse your youtube link :/\")\r\n await ctx.send(f\"{e}\")\r\n\r\n\r\nasync def add_spotify(ctx, link):\r\n print('adding spotify link', link)\r\n try:\r\n r = await requests.post('https://accounts.spotify.com/api/token',\r\n data={'grant_type': 'client_credentials'},\r\n headers={\r\n 'Authorization': 'Basic ' + b64encode((SPOTIFY_CLIENT_ID + ':' + SPOTIFY_CLIENT_SECRET).encode()).decode()}\r\n )\r\n print('spotify authorization', r.status_code, r.text)\r\n access_token = json.loads(r.text)['access_token']\r\n r = await requests.get(link, headers={'Authorization': 'Bearer ' + access_token})\r\n print('spotify data', r.status_code)\r\n j = json.loads(r.text)\r\n # TODO: handle multiple artists\r\n return (j['name'], j['artists'][0]['name'])\r\n except Exception as e:\r\n await ctx.send(\"failed to parse your spotify link :/\")\r\n await ctx.send(f\"{e}\")\r\n\r\n\r\n@bot.command()\r\nasync def song(ctx, *args):\r\n \"\"\"\r\n just throw all the song related commands in here\r\n \"\"\"\r\n if len(args) == 0:\r\n await song_help(ctx)\r\n elif args[0] == 'help':\r\n await song_help(ctx)\r\n elif len(args) == 1:\r\n try:\r\n song_id = int(args[0], 16)\r\n except ValueError:\r\n await ctx.send(\"invalid song id, please use: !song \\n\" + \"for other usages of !song, please do !song help\")\r\n else:\r\n await view_song(ctx, song_id)\r\n else:\r\n await ctx.send(\"unknown usage for !song. :/\")\r\n await song_help(ctx)\r\n\r\n\r\nasync def song_help(ctx):\r\n await ctx.send(\"usage:\")\r\n await ctx.send(\"I'm too lazy to write this, this is constatly changign anyway :p\")\r\n\r\n\r\nasync def artist_string(conn, song_id):\r\n artists = await conn.fetch(f\"SELECT id, name, english_name FROM artists WHERE id IN (SELECT unnest(artist_ids) FROM songs WHERE id = {song_id})\")\r\n if len(artists) == 0:\r\n return \"???\"\r\n\r\n def artist_string_single(artist):\r\n s = f\"{artist[1]} \"\r\n if not artist[2] is None:\r\n s += f\"({artist[2]}) \"\r\n s += f\"`{artist[0]:x}`\"\r\n return s\r\n\r\n return ', '.join([artist_string_single(artist) for artist in artists])\r\n\r\n\r\nasync def view_song(ctx, song_id):\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n message = await song_info_string(conn, song_id)\r\n if message is None:\r\n await ctx.send(f\"cannot find song with id {song_id}\")\r\n await ctx.send(message)\r\n\r\n\r\nasync def song_info_string(conn, song_id):\r\n song = await conn.fetchrow(f\"SELECT name, english_name, rating, rating_num FROM songs WHERE id = {song_id}\")\r\n if song is None:\r\n return\r\n message = f\"{song[0]}\"\r\n if not song[1] == '':\r\n message += f\" ({song[1]})\"\r\n message += \" by \"\r\n message += await artist_string(conn, song_id)\r\n message += \"\\n\"\r\n if song[3] != 0:\r\n message += f\"rating: {song[2]:.2f} / 10\\n\"\r\n message += f\"song id: `{song_id:x}`\\n\"\r\n tags = await conn.fetch(f\"SELECT names FROM tags WHERE id IN (SELECT unnest(tag_ids) FROM songs WHERE id = {song_id})\")\r\n print(tags)\r\n if len(tags) > 0:\r\n message += f\"tags: {', '.join([tag['names'][0] for tag in tags])}\\n\"\r\n links = await conn.fetch(f\"SELECT type, link FROM links WHERE song_id = {song_id}\")\r\n for link in links:\r\n message += f\"{link[0]} link: {link[1]}\\n\"\r\n return message\r\n\r\n\r\n@bot.command(aliases=['editsong'])\r\nasync def edit_song(ctx, *args):\r\n if len(args) < 3:\r\n await edit_song_help(ctx)\r\n return\r\n try:\r\n song_id = int(args[0], 16)\r\n except ValueError:\r\n await ctx.send(\"song id should be a hex number\")\r\n # await edit_song_help(ctx)\r\n return\r\n # TODO: security!!\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n if args[1] == 'name':\r\n await conn.execute(f\"UPDATE songs SET name = '{args[2]}' WHERE id = {song_id}\")\r\n await ctx.send(\"song name has been chagned\")\r\n elif args[1] == 'english_name':\r\n await conn.execute(f\"UPDATE songs SET english_name = '{args[2]}' WHERE id = {song_id}\")\r\n await ctx.send(\"song english name has been chagned\")\r\n elif args[1] == 'id':\r\n try:\r\n new_id = int(args[2], 16)\r\n except ValueError:\r\n await ctx.send(\"new song id should be a hex number\")\r\n return\r\n matching_song = await conn.fetchval(f\"SELECT name FROM songs WHERE id = {new_id}\")\r\n if not matching_song is None:\r\n await ctx.send(f\"there is another song entry with id {new_id:x}: {matching_song}\")\r\n return\r\n await conn.execute(f\"UPDATE songs SET id = {new_id} WHERE id = {song_id}\")\r\n await conn.execute(f\"UPDATE links SET song_id = {new_id} WHERE id = {song_id}\")\r\n song_name = await conn.fetchval(f\"SELECT name FROM songs WHERE id = {new_id}\")\r\n await ctx.send(f\"song id of {song_name} has been chagned\")\r\n else:\r\n await edit_song_help(ctx)\r\n\r\n\r\n@bot.command()\r\nasync def songs(ctx, page=1):\r\n try:\r\n page = int(page)\r\n except ValueError:\r\n await ctx.send(\"page should be an integer\")\r\n return\r\n page -= 1\r\n if page < 0:\r\n await ctx.send(\"page number should be a positive integer\")\r\n return\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n songs = await conn.fetch(f\"SELECT * FROM songs ORDER BY time_added DESC LIMIT {ROWS_PER_PAGE} OFFSET {page * ROWS_PER_PAGE}\")\r\n if len(songs) == 0:\r\n await ctx.send(f\"There is no page {page + 1}\")\r\n return\r\n display_text = \"\"\r\n for i, row in enumerate(songs):\r\n idx = i + 1 + page * ROWS_PER_PAGE\r\n display_text += f\"{idx}. {row['name']} `{row['id']:x}`\"\r\n display_text += \" by \"\r\n display_text += await artist_string(conn, row['id'])\r\n display_text += \"\\n\"\r\n await ctx.send(display_text)\r\n\r\n\r\nasync def edit_song_help(ctx):\r\n help_text = \"usage:\\n\"\r\n help_text += \"!edit_song name \\n\"\r\n help_text += \"!edit_song english_name \\n\"\r\n help_text += \"!edit_song id \\n\"\r\n await ctx.send(help_text)\r\n\r\n# =========================\r\n# artist\r\n# =========================\r\n\r\n\r\n@bot.command()\r\nasync def artist(ctx, *args):\r\n \"\"\"\r\n just throw all the artist related commands in here\r\n \"\"\"\r\n if len(args) == 0:\r\n await artist_help(ctx)\r\n elif args[0] == 'help':\r\n await artist_help(ctx)\r\n elif args[0] == 'add':\r\n if len(args) != 2:\r\n await ctx.send(\"usage: !artist add \")\r\n return\r\n await add_artist(ctx, args[1])\r\n elif len(args) == 1:\r\n try:\r\n artist_id = int(args[0], 16)\r\n except ValueError:\r\n await ctx.send(\"invalid artist id, please use: !artist \\n\" + \"for other usages of !artist, please do !artist help\")\r\n else:\r\n await view_artist(ctx, artist_id)\r\n else:\r\n await ctx.send(\"unknown usage for !artist. :/\")\r\n await artist_help(ctx)\r\n\r\n\r\nasync def view_artist(ctx, artist_id):\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n artist = await conn.fetchrow(f\"SELECT name, english_name FROM artists WHERE id = {artist_id}\")\r\n if artist is None:\r\n await ctx.send(f\"cannot find artist with id {artist_id:x}\")\r\n else:\r\n artist_message = f\"{artist[0]} \"\r\n if not artist[1] is None:\r\n artist_message += f\"({artist[1]}) \"\r\n artist_message += f\"`{artist_id:x}`\"\r\n artist_message += \"\\n\"\r\n songs = await conn.fetch(f\"SELECT id, name FROM songs WHERE {artist_id} = ANY(artist_ids)\")\r\n if len(songs) > 0:\r\n artist_message += f\"songs ({len(songs)}):\\n\"\r\n for song in songs[:10]:\r\n artist_message += f\"{song[1]} `{song[0]}`\\n\"\r\n await ctx.send(artist_message)\r\n\r\n\r\n@bot.command()\r\nasync def edit_artist(ctx, *args):\r\n if len(args) < 3:\r\n await edit_artist_help(ctx)\r\n return\r\n try:\r\n artist_id = int(args[0], 16)\r\n except ValueError:\r\n ctx.send(\"arist id should be a hex number\")\r\n return\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n # TODO: security!!\r\n if args[1] == 'name':\r\n await conn.execute(f\"UPDATE artists SET name = '{args[2]}' WHERE id = {artist_id}\")\r\n await ctx.send(\"artist name has been chagned\")\r\n elif args[1] == 'english_name':\r\n await conn.execute(f\"UPDATE artists SET english_name = '{args[2]}' WHERE id = {artist_id}\")\r\n await ctx.send(\"artist english name has been chagned\")\r\n elif args[1] == 'id':\r\n try:\r\n new_id = int(args[2], 16)\r\n except ValueError:\r\n await ctx.send(\"new artist id should be a hex number\")\r\n return\r\n artist_name = await conn.fetchval(f\"SELECT name FROM artist WHERE id = {new_id}\")\r\n if not artist_name is None:\r\n await ctx.send(f\"there is another artist entry with id {new_id:x}: {artist_name}\")\r\n return\r\n await conn.execute(f\"UPDATE artists SET id = {new_id} WHERE id = {artist_id}\")\r\n await conn.execute(f\"UPDATE songs SET artist_ids = array_replace(artist_ids, {artist_id}, {new_id})\")\r\n artist_name = await conn.fetchval(f\"SELECT name FROM artists WHERE id = {new_id}\")\r\n await ctx.send(f\"artist id of {artist_name} has been chagned\")\r\n else:\r\n await edit_artist_help(ctx)\r\n\r\n\r\nasync def edit_artist_help(ctx):\r\n help_text = \"usage:\\n\"\r\n help_text += \"I'm too lazy\"\r\n await ctx.send(help_text)\r\n\r\n\r\nasync def artist_help(ctx):\r\n help_text = \"usage:\\n\"\r\n help_text += \"I'm too lazy to do this atm\"\r\n await ctx.send(help_text)\r\n\r\n\r\n@bot.command(aliases=['addartist'])\r\nasync def add_artist(ctx, artist_name):\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n existing = await conn.fetchval(\"SELECT id FROM artists WHERE name = ($1)\", artist_name)\r\n if not existing is None:\r\n await ctx.send(f\"There is already an database entry for {artist_name}: `{existing}`\")\r\n return\r\n artist_id = await get_new_id(conn, 'artists')\r\n await conn.execute(\"INSERT INTO artists(id, name) VALUES (($1), ($2))\", artist_id, artist_name)\r\n await ctx.send(f\"created artist entry {artist_name} `{artist_id:x}`\")\r\n\r\n# =========================\r\n# tags\r\n# =========================\r\n\r\n\r\n@bot.command()\r\nasync def tag(ctx, *args):\r\n if len(args) == 0:\r\n await tag_help(ctx)\r\n elif len(args) == 1:\r\n if args[0] == 'help':\r\n await tag_help(ctx)\r\n else:\r\n await view_tag(ctx, args[0])\r\n elif args[0] == 'song':\r\n if len(args[1:]) != 2:\r\n await ctx.send(\"usage: !tag song \")\r\n return\r\n await tag_song(ctx, args[1], args[2])\r\n elif len(args) == 2:\r\n await tag_song(ctx, args[0], args[1])\r\n\r\n\r\nasync def tag_help(ctx):\r\n help_text = \"usage:\\n\"\r\n help_text += \"!tag help\\n\"\r\n help_text += \"!tag \\n\"\r\n help_text += \"!tag \\n\"\r\n help_text += \"!tag song \\n\"\r\n await ctx.send(help_text + \"too lazy to write down all other !tag help\")\r\n\r\n\r\n@bot.command(aliases=['viewtag'])\r\nasync def view_tag(ctx, tag_info):\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n try:\r\n tag_id = int(tag_info, 16)\r\n tag_names = await conn.fetchval(f\"SELECT names FROM tags WHERE id = {tag_id}\")\r\n except ValueError:\r\n tag_id, tag_names = await conn.fetchrow(f\"SELECT id, names FROM tags WHERE '{tag_info}' = ANY(tags.names)\")\r\n display_text = f\"{tag_names[0]} \"\r\n if len(tag_names) > 1:\r\n display_text += f\"({', '.join(tag_names[1:])}) \"\r\n display_text += f\"`{tag_id}`\\n\"\r\n songs = await conn.fetch(f\"SELECT id, name FROM songs WHERE {tag_id} = ANY(songs.tag_ids)\")\r\n if len(songs) > 0:\r\n display_text += f\"songs with tag ({min(10, len(songs))} / {len(songs)}):\\n\"\r\n for song in songs[:10]:\r\n display_text += f\"{song['name']} `{song['id']}`\\n\"\r\n await ctx.send(display_text)\r\n\r\n\r\n@bot.command(aliases=['tagsong'])\r\nasync def tag_song(ctx, song_id, tag_info):\r\n try:\r\n song_id = int(song_id, 16)\r\n except ValueError:\r\n await ctx.send(\"song id should be a hex number\")\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n song_tags = await conn.fetchrow(\r\n f\"SELECT tag_ids FROM songs WHERE id = {song_id}\")\r\n if song is None:\r\n await ctx.send(f\"cannot find song with id {song_id}\")\r\n return\r\n\r\n try:\r\n tag_id = int(tag_info)\r\n tag_names = await conn.fetchval(f\"SELECT names FROM tags WHERE id = {tag_id}\")\r\n # TODO: repeated code\r\n if tag_names is None:\r\n await ctx.send(f\"cannot find tag with id {tag_info}\")\r\n return\r\n except ValueError:\r\n row = await conn.fetchrow(f\"SELECT id, names FROM tags WHERE '{tag_info}' = ANY(names)\")\r\n if row is None:\r\n await ctx.send(f\"cannot find any tag with name {tag_info}\")\r\n return\r\n tag_id, tag_names = row\r\n\r\n if tag_id in song_tags:\r\n await ctx.send(f\"song {song_id} already has tag {tag_names[0]} ({tag_id})\")\r\n return\r\n await conn.execute(f\"UPDATE songs SET tag_ids = tag_ids || {tag_id} WHERE id = {song_id}\")\r\n await ctx.send(f\"song {song_id} has been tagged {tag_names[0]} `{tag_id}`\")\r\n\r\n\r\n@bot.command(aliases=['untagsong', 'untag_song'])\r\nasync def untag(ctx, *args):\r\n \"\"\"\r\n remove a tag from a song\r\n usage: !untag \r\n for other usages, I'm too lazy to document\r\n \"\"\"\r\n if len(args) == 0:\r\n await untag_help(ctx)\r\n return\r\n if args[0] == 'song':\r\n await untag(ctx, args[1:])\r\n return\r\n if len(args) != 2:\r\n await untag_help(ctx)\r\n return\r\n await untag_song(ctx, args[0], args[1])\r\n\r\n\r\nasync def untag_help(ctx):\r\n await ctx.send()\r\n\r\n\r\nasync def untag_song(ctx, song_id, tag_info):\r\n try:\r\n song_id = int(song_id, 16)\r\n except ValueError:\r\n await ctx.send(\"song id should be a hex number\")\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n song_tags = await conn.fetchval(\r\n f\"SELECT tag_ids FROM songs WHERE id = {song_id}\")\r\n if song is None:\r\n await ctx.send(f\"cannot find song with id {song_id}\")\r\n return\r\n\r\n try:\r\n tag_id = int(tag_info)\r\n tag_names = await conn.fetchval(f\"SELECT names FROM tags WHERE id = {tag_id}\")\r\n # TODO: repeated code\r\n if tag_names is None:\r\n await ctx.send(f\"cannot find tag with id {tag_info}\")\r\n return\r\n except ValueError:\r\n tag_id, tag_names = await conn.fetchval(f\"SELECT id, names FROM tags WHERE '{tag_info}' = ANY(names)\")\r\n if tag_id is None:\r\n await ctx.send(f\"cannot find any tag with name {tag_info}\")\r\n return\r\n\r\n if not tag_id in song_tags:\r\n await ctx.send(f\"song {song_id} does not have tag {tag_names[0]} ({tag_id})\")\r\n return\r\n await conn.execute(f\"UPDATE songs SET array_remove(tag_ids, {tag_id}) WHERE id = {song_id}\")\r\n await ctx.send(f\"tag {tag_names[0]} `{tag_id}` has been removed from song {song_id}\")\r\n\r\n\r\n@bot.command()\r\nasync def tags(ctx, page=1):\r\n \"\"\"\r\n do something tag related\r\n \"\"\"\r\n try:\r\n page = int(page)\r\n except ValueError:\r\n await ctx.send(\"page should be an integer\")\r\n return\r\n page -= 1\r\n if page < 0:\r\n await ctx.send(\"page number should be a positive integer\")\r\n return\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n songs = await conn.fetch(f\"SELECT * FROM tags ORDER BY id LIMIT {ROWS_PER_PAGE} OFFSET {page * ROWS_PER_PAGE}\")\r\n if len(songs) == 0:\r\n await ctx.send(f\"There is no page {page + 1}\")\r\n return\r\n display_text = \"\"\r\n for i, row in enumerate(songs):\r\n idx = i + 1 + page * ROWS_PER_PAGE\r\n display_text += f\"{idx}. {row['names'][0]} `{row['id']:x}` \"\r\n if len(row['names']) > 1:\r\n display_text += f\"({', '.join(row['names'][1:])})\"\r\n display_text += \"\\n\"\r\n await ctx.send(display_text)\r\n\r\n# =========================\r\n# rate\r\n# =========================\r\n\r\n\r\n@bot.command()\r\nasync def rate(ctx, *args):\r\n if len(args) == 0:\r\n await ctx.send(\"blah\")\r\n elif args[0] == 'help':\r\n await ctx.send(\"blah\")\r\n elif args[0] == \"view\" or args[0] == 'check':\r\n if len(args) < 3:\r\n await ctx.send(\"usage: !rate view \")\r\n return\r\n if args[1] == 'song':\r\n await ctx.send(\"not implmented yet\")\r\n elif args[1] == 'user':\r\n await ctx.send(\"not implmented yet\")\r\n else:\r\n await view_rating(ctx, args[1], args[2])\r\n elif len(args) == 1:\r\n await rate_react(ctx, args[0])\r\n elif len(args) == 2:\r\n await rate_with_rating(ctx, args[0], args[1])\r\n\r\n\r\nasync def view_rating(ctx, song_id, username):\r\n try:\r\n song_id = int(song_id, 16)\r\n except:\r\n await ctx.send(\"usage: !rate view \")\r\n return\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n song_name = await conn.fetchval(f\"SELECT name FROM songs WHERE id = {song_id}\")\r\n if song_name is None:\r\n await ctx.send(f\"There is no song with id {song_id:x}\")\r\n return\r\n rating = await conn.fetchval(f\"SELECT value FROM ratings WHERE username = {username} AND song_id = {song_id}\")\r\n if rating is None:\r\n await ctx.send(f\"{username} has not rated the song {song_name}\")\r\n else:\r\n await ctx.send(f\"{username} rated the song {song_name} a {rating} / 10\")\r\n\r\n\r\nasync def rate_react(ctx, song_id):\r\n try:\r\n song_id = int(song_id, 16)\r\n except:\r\n await ctx.send(\"usage: !rate \")\r\n return\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n song_name = await conn.fetchval(f\"SELECT name FROM songs WHERE id = {song_id}\")\r\n if song_name is None:\r\n await ctx.send(f\"There is no song with id {song_id:x}\")\r\n return\r\n username = await get_or_create_user(ctx, conn, ctx.message.author.id)\r\n old_rating = await conn.fetchval(f\"SELECT value FROM ratings WHERE username = '{username}' AND song_id = {song_id}\")\r\n\r\n prompt_text = f\"{song_name} `{song_id:x}` by {await artist_string(conn, song_id)}\\n\"\r\n if not old_rating is None:\r\n prompt_text += f\"you previously rated the song {old_rating} / 10\\n\"\r\n prompt_text += \"to stop rating this song react with ❌\"\r\n prompt_msg = await ctx.send(prompt_text)\r\n rating_emojis = [\"0️⃣\",\r\n \"1️⃣\",\r\n \"2️⃣\",\r\n \"3️⃣\",\r\n \"4️⃣\",\r\n \"5️⃣\",\r\n \"6️⃣\",\r\n \"7️⃣\",\r\n \"8️⃣\",\r\n \"9️⃣\",\r\n \"🔟\",\r\n '❌']\r\n for emoji in rating_emojis:\r\n await prompt_msg.add_reaction(emoji)\r\n\r\n def check(reaction, user):\r\n return reaction.message.id == prompt_msg.id and user == ctx.message.author and str(reaction.emoji) in rating_emojis\r\n\r\n try:\r\n reaction, _user = await bot.wait_for('reaction_add', timeout=60.0, check=check)\r\n except:\r\n await prompt_msg.delete()\r\n return\r\n if str(reaction.emoji) == '❌':\r\n await prompt_msg.delete()\r\n return\r\n rating = rating_emojis.index(str(reaction.emoji))\r\n if old_rating is None:\r\n await conn.execute(f\"INSERT INTO ratings(username, song_id, value) VALUES ('{username}', {song_id}, {rating})\")\r\n row = await conn.fetchrow(f\"SELECT rating_num, rating_total FROM songs WHERE id = {song_id}\")\r\n await conn.execute(f\"UPDATE songs SET rating_num = {row[0] + 1}, rating_total = {row[1] + rating}, rating = {(row[1] + rating) / (row[0] + 1)} WHERE id = {song_id}\")\r\n await ctx.send(f\"You've rated {song_name} `{song_id:x}` a {rating}\")\r\n else:\r\n await conn.execute(f\"UPDATE ratings SET value = {rating} WHERE username = '{username}' AND song_id = {song_id}\")\r\n row = await conn.fetchrow(f\"SELECT rating_num, rating_total FROM songs WHERE id = {song_id}\")\r\n await conn.execute(f\"UPDATE songs SET rating_total = {row[1] + rating - old_rating}, rating = {(row[1] + rating - old_rating) / row[0]} WHERE id = {song_id}\")\r\n await ctx.send(f\"You've changed your rating of {song_name} `{song_id:x}` from {old_rating} to {rating}\")\r\n await prompt_msg.delete()\r\n\r\n\r\nasync def rate_with_rating(ctx, song_id, rating):\r\n try:\r\n song_id = int(song_id, 16)\r\n rating = int(rating, 10)\r\n except:\r\n await ctx.send(\"usage: !rate \")\r\n return\r\n if rating < 0 or rating > 10:\r\n await ctx.send(f\"rating should be an integer between 0 and 10 (inclusive), not {rating}\")\r\n return\r\n\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n song_name = await conn.fetchval(f\"SELECT name FROM songs WHERE id = {song_id}\")\r\n if song_name is None:\r\n await ctx.send(f\"There is no song with id {song_id:x}\")\r\n return\r\n\r\n username = await get_or_create_user(ctx, conn, ctx.message.author.id)\r\n old_rating = await conn.fetchval(f\"SELECT value FROM ratings WHERE username = '{username}' AND song_id = {song_id}\")\r\n\r\n if old_rating is None:\r\n await conn.execute(f\"INSERT INTO ratings(username, song_id, value) VALUES ('{username}', {song_id}, {rating})\")\r\n row = await conn.fetchrow(f\"SELECT rating_num, rating_total FROM songs WHERE id = {song_id}\")\r\n await conn.execute(f\"UPDATE songs SET rating_num = {row[0] + 1}, rating_total = {row[1] + rating}, rating = {(row[1] + rating) / (row[0] + 1)} WHERE id = {song_id}\")\r\n await ctx.send(f\"You've rated {song_name} `{song_id:x}` a {rating}\")\r\n else:\r\n await conn.execute(f\"UPDATE ratings SET value = {rating} WHERE username = '{username}' AND song_id = {song_id}\")\r\n row = await conn.fetchrow(f\"SELECT rating_num, rating_total FROM songs WHERE id = {song_id}\")\r\n await conn.execute(f\"UPDATE songs SET rating_total = {row[1] + rating - old_rating}, rating = {(row[1] + rating - old_rating) / row[0]} WHERE id = {song_id}\")\r\n await ctx.send(f\"You've changed your rating of {song_name} `{song_id:x}` from {old_rating} to {rating}\")\r\n\r\n\r\n@bot.command()\r\nasync def review(ctx, song_id):\r\n try:\r\n song_id = int(song_id, 16)\r\n except:\r\n await ctx.send(\"usage: !review \")\r\n return\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n song_name = await conn.fetchval(f\"SELECT name FROM songs WHERE id = {song_id}\")\r\n if song_name is None:\r\n await ctx.send(f\"There is no song with id {song_id:x}\")\r\n return\r\n username = await get_or_create_user(ctx, conn, ctx.message.author.id)\r\n row = await conn.fetchrow(f\"SELECT review FROM ratings WHERE username = '{username}' AND song_id = {song_id}\")\r\n if row is None:\r\n await ctx.send(f\"You have not rated the song {song_name} `{song_id:x}`. Please rate the song before submitting a review.\")\r\n return\r\n old_review = row[0]\r\n if not (old_review == '' or old_review is None):\r\n msg = await ctx.send(f\"You have an old review of this song. Do you want to write a new review?\" + \"delete old review and write a new one ✅, see old review instead 👀, cancel action and don't do anything ❌\")\r\n await msg.add_reaction('✅')\r\n await msg.add_reaction('👀')\r\n await msg.add_reaction('❌')\r\n\r\n def check(reaction, user):\r\n return reaction.message.id == msg.id and user == ctx.message.author and str(reaction.emoji) in ['✅', '👀', '❌']\r\n\r\n try:\r\n reaction, _user = await bot.wait_for('reaction_add', timeout=60.0, check=check)\r\n except:\r\n pass\r\n await msg.delete()\r\n if str(reaction.emoji) == '✅':\r\n await ctx.send(f\"Please write a new review for {song_name} `{song_id:x}`\")\r\n # TODO: cancel writing review at this stage?\r\n # await msg\r\n\r\n def check_info(m):\r\n return m.author == ctx.message.author and m.content[0] != '!'\r\n\r\n new_review = await bot.wait_for('message', check=check_info)\r\n await conn.execute(f\"UPDATE ratings SET review = ($1) WHERE username = '{username}' AND song_id = {song_id}\", new_review.content)\r\n await ctx.send(\"Thank you for your review\")\r\n elif str(reaction.emoji) == '👀':\r\n await ctx.send(f\"Here is your previous review of {song_name} `{song_id:x}`:\")\r\n await ctx.send(old_review)\r\n else:\r\n pass\r\n else:\r\n await ctx.send(f\"Please write a new review for {song_name} `{song_id:x}`\")\r\n # TODO: cancel writing review at this stage?\r\n # await msg\r\n\r\n def check_info(m):\r\n return m.author == ctx.message.author and m.content[0] != '!'\r\n\r\n new_review = await bot.wait_for('message', check=check_info)\r\n await conn.execute(f\"UPDATE ratings SET review = ($1) WHERE username = '{username}' AND song_id = {song_id}\", new_review.content)\r\n await ctx.send(\"Thank you for your review\")\r\n\r\n\r\n@bot.command(aliases=['rec'])\r\nasync def recommend(ctx, song_id, discord_id):\r\n try:\r\n song_id = int(song_id, 16)\r\n discord_id = int(discord_id, 10)\r\n except:\r\n await ctx.send(\"usage: !recommend \")\r\n return\r\n conn = await asyncpg.connect(DATABASE_URL)\r\n row = await conn.fetchrow(f\"SELECT name, english_name, rating, rating_num FROM songs WHERE id = {song_id}\")\r\n if row is None:\r\n await ctx.send(f\"Couldn't find song with id {song_id:x}\")\r\n return\r\n sender_name = await get_or_create_user(ctx, conn, ctx.message.author.id)\r\n rec_message = f\"{sender_name} has recommended you the song:\\n\"\r\n rec_message += await song_info_string(conn, song_id)\r\n\r\n sender_rating_row = await conn.fetchrow(\"SELECT value, review FROM ratings WHERE username = ($1) AND song_id = ($2)\", sender_name, song_id)\r\n if not sender_rating_row is None:\r\n rec_message += f\"{sender_name} has rated this song {sender_rating_row[0]} / 10 \"\r\n if not sender_rating_row[1] is None:\r\n rec_message += \"with the following review:\\n\"\r\n rec_message += f\"```\\n\"\r\n rec_message += sender_rating_row[1]\r\n rec_message += f\"```\\n\"\r\n\r\n await bot.get_user(discord_id).send(rec_message)\r\n print(sender_name, 'recommended', song_id, discord_id)\r\n\r\nif __name__ == \"__main__\":\r\n bot.run(TOKEN)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":36428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"53886451","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import pyqtSlot\n#from main_ui import *\nimport cv2\nimport time\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.misc import imresize\n#from ek import Graph\nfrom sklearn.mixture import GaussianMixture\n#---------ek------\nclass Graph:\n\n def __init__(self):\n self.node = []\n self.edge = {}\n self.neighbors = {}\n self.graph = []\n self.residual = [] # residual graph\n self.row = None\n self.source = None\n self.sink = None\n self.sset = None\n self.tset = None\n\n # add nodes\n def add_node(self, node=[]):\n self.node = node\n\n # add edges\n def add_edge(self, node=(), capacity=None):\n self.edge.setdefault(node, capacity)\n\n # build the network flow\n def build_flow(self, source=None, sink=None):\n for i in range(len(self.node)):\n self.graph.append([])\n self.graph[i] = [0 for j in range(len(self.node))]\n self.neighbors.setdefault(i, [])\n for i, j in self.edge.keys():\n self.graph[i][j] = self.edge[(i, j)]\n self.neighbors[i].append(j)\n if i not in self.neighbors[j]:\n self.neighbors[j].append(i)\n self.residual = [i[:] for i in self.graph]\n self.row = len(self.graph)\n self.source = source\n self.sink = sink\n\n def edmonds_karp(self):\n flow = 0\n length = len(self.graph)\n flows = [[0 for i in range(length)] for j in range(length)]\n while True:\n max, parent = self.bfs(flows)\n print(max)\n if max == 0:\n self.sset = [self.source] + \\\n [i for i, v in enumerate(parent) if v >= 0]\n self.tset = [x for x in self.node if x not in self.sset]\n print(self.sset, self.tset)\n break\n flow = flow + max\n v = self.sink\n while v != self.source:\n u = parent[v]\n flows[u][v] = flows[u][v] + max\n self.residual[u][v] -= max\n flows[v][u] = flows[v][u] - max\n self.residual[v][u] += max\n v = u\n return flow, flows\n\n def bfs(self, flows):\n length = self.row\n parents = [-1 for i in range(length)] # parent table\n parents[self.source] = -2 # make sure source is not rediscovered\n M = [0 for i in range(length)] # Capacity of path to vertex i\n M[self.source] = float('Inf') # this is necessary!\n\n queue = []\n queue.append(self.source)\n while queue:\n u = queue.pop(0)\n for v in self.neighbors[u]:\n # if there is available capacity and v is is not seen before in\n # search\n if self.graph[u][v] - flows[u][v] > 0 and parents[v] == -1:\n parents[v] = u\n # it will work because at the beginning M[u] is Infinity\n M[v] = min(M[u], self.graph[u][v] - flows[u]\n [v]) # try to get smallest\n if v != self.sink:\n queue.append(v)\n else:\n return M[self.sink], parents\n return 0, parents\n\n def find_cut(self):\n cut = {}\n for i in self.sset:\n cut[i] = 0\n for i in self.tset:\n cut[i] = 1\n return cut\n#-------end ek-------\n\n#-------gmm--------\ndef build_bayes_graph(img, prob_fg, prob_bg, sigma=1e2, kappa=2):\n \"\"\" Build a graph from 4-neighborhood of pixels.\n Foreground and background is determined from\n labels (1 for foreground, 0 for background)\n and is modeled with naive Bayes classifiers.\"\"\"\n m, n = img.shape[:2]\n # RGB vector version (one pixel per row)\n vim = img.reshape((-1, 3))\n # create graph with m*n+2 nodes\n gr = Graph()\n gr.add_node(range(m * n + 2))\n source = m * n # second to last is source\n sink = m * n + 1 # last node is sink\n # normalize\n for i in range(vim.shape[0]):\n vim[i] = vim[i] / np.linalg.norm(vim[i])\n # go through all nodes and add edges\n for i in range(m * n):\n print(i)\n # add edge from source\n gr.add_edge((source, i), (prob_fg[i] / (prob_fg[i] + prob_bg[i])))\n # add edge to sink\n gr.add_edge((i, sink), (prob_bg[i] / (prob_fg[i] + prob_bg[i])))\n # add edges to neighbors\n if i % n != 0: # left exists\n edge_wt = kappa * \\\n np.exp(-1.0 * sum((vim[i] - vim[i - 1])**2) / sigma)\n gr.add_edge((i, i - 1), edge_wt)\n if (i + 1) % n != 0: # right exists\n edge_wt = kappa * \\\n np.exp(-1.0 * sum((vim[i] - vim[i + 1])**2) / sigma)\n gr.add_edge((i, i + 1), edge_wt)\n if i // n != 0: # up exists\n edge_wt = kappa * \\\n np.exp(-1.0 * sum((vim[i] - vim[i - n])**2) / sigma)\n gr.add_edge((i, i - n), edge_wt)\n if i // n != m - 1: # down exists\n edge_wt = kappa * \\\n np.exp(-1.0 * sum((vim[i] - vim[i + n])**2) / sigma)\n gr.add_edge((i, i + n), edge_wt)\n gr.build_flow(source, sink)\n return gr\n\n\ndef cut_graph(gr, imsize):\n h, w = imsize\n flows = gr.edmonds_karp()\n cuts = gr.find_cut()\n # convert graph to image with labels\n res = np.zeros(h * w)\n for pos in list(cuts.keys())[0:-2]: # don't add source/sink\n res[pos - 1] = cuts[pos]\n return res.reshape((h, w))\n\n\ndef graph_cuts(img, scale, x, y):\n img_down = imresize(img, scale, interp='bilinear') # downsample\n size = img_down.shape[:2]\n img_flat = np.concatenate((img_down[:, :, 0].flatten().reshape(-1, 1),\n img_down[:, :, 1].flatten().reshape(-1, 1),\n img_down[:, :, 2].flatten().reshape(-1, 1)), axis=1)\n gmm = GaussianMixture(\n n_components=2,\n covariance_type='full',\n max_iter=500,\n n_init=5).fit(img_flat)\n prob = gmm.predict_proba(img_flat)\n labels = np.argmax(gmm.predict_proba(img_flat), axis=1)\n prob_fg, prob_bg = np.array([i[0] for i in prob.tolist()]), np.array([\n i[1] for i in prob.tolist()])\n # create graph\n g = build_bayes_graph(img_down, prob_fg, prob_bg, sigma=1e20, kappa=1)\n # cut the graph\n mask = cut_graph(g, size)\n mask = cv2.resize(mask, (img.shape[1], img.shape[0])) # upsample\n mask = np.array(mask, dtype=np.uint8)\n cuts = cv2.bitwise_and(img, img, mask=mask)\n sy = 300 / img.shape[0]\n sx = 450 / img.shape[1]\n print(size, int(y / sy), int(x / sx), sy, sx)\n if np.all(cuts[int(y / sy), int(x / sx)] == 0):\n cuts = img - cuts\n plt.figure()\n plt.imshow(cv2.cvtColor(cuts, cv2.COLOR_BGR2RGB))\n return mask, cuts\n\n\ndef main_gmm(path, scale, x, y):\n img = cv2.imread(path)\n start_time = time.time()\n mask, cuts = graph_cuts(img, scale, x, y)\n end_time = time.time()\n total = end_time - start_time\n print('Running time: {:2}s'.format(total))\n plt.show()\n return mask, cuts\n\n#-------end gmm------\n\n\n\nclass UI(QWidget):\n # global imagepath\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Image Segmentation\")\n self.setGeometry(100, 100, 500, 550)\n\n self.inputImagePath = None\n self.x = 0\n self.y = 0\n\n self.scalevalue = 1\n\n self.imageselected = False\n\n # self.coordinateIsButton = False\n\n self.openButton = QPushButton('Select Image', self)\n self.openButton.setToolTip('format = jpg/png')\n self.openButton.clicked.connect(self.openButtonOnClick)\n self.openButton.setShortcut(\"Ctrl+O\")\n\n # self.saveButton = QPushButton('Save Image', self)\n # self.saveButton.clicked.connect(self.saveImage)\n # self.saveButton.setShortcut(\"Ctrl+S\")\n\n self.closeButton = QPushButton('Close', self)\n self.closeButton.clicked.connect(self.closeApp)\n self.closeButton.setShortcut(\"Ctrl+E\")\n\n self.inputImageView = QLabel(\"input image\")\n\n\n # self.outputImageView = QLabel(\"output image\")\n # self.foreground = QLabel(\"foreground\")\n # self.background = QLabel(\"background\")\n\n self.startButton = QPushButton('Start', self)\n self.startButton.setShortcut(\"Ctrl+S\")\n self.startButton.clicked.connect(self.startButtonOnClick)\n\n self.scaleLabel = QLabel(self)\n self.scaleLabel.setText('Scale value(0-1):')\n self.line = QLineEdit(self)\n\n self.okbutton = QPushButton('OK')\n self.okbutton.clicked.connect(self.okButtonOnClick)\n\n\n self. coordinatesLabel = QLabel(self)\n self.coordinatesLabel.setText(\"coordinates = (\" + str(self.x) + \" , \" + str(self.y) + \")\")\n self.coordinatesLabel.setFixedHeight(15)\n\n # self.startCoorBtn = QPushButton('Start from Selected Coordinates')\n # self.startCoorBtn.setShortcut(\"Ctrl+C\")\n # self.startCoorBtn.clicked.connect(self.startCoorOnClick)\n\n self.horizontal = QHBoxLayout()\n self.horizontal.addWidget(self.scaleLabel)\n self.horizontal.addWidget(self.line)\n self.horizontal.addWidget(self.okbutton)\n\n # self.horizontalImages2 = QHBoxLayout()\n # self.horizontalImages2.addWidget(self.foreground)\n # self.horizontalImages2.addWidget(self.background)\n\n self.GroupBox1 = QGroupBox()\n self.GroupBox1.setLayout(self.horizontal)\n self.GroupBox1.setFixedHeight(50)\n\n # self.imageGroupBox2 = QGroupBox(\"Segmentation\")\n # self.imageGroupBox2.setLayout(self.horizontalImages2)\n\n self.vlayout = QVBoxLayout()\n # self.vlayout.addWidget(self.imageGroupBox1)\n # self.vlayout.addWidget(self.imageGroupBox2)\n self.vlayout.addWidget(self.inputImageView)\n self.vlayout.addWidget(self.coordinatesLabel)\n self.vlayout.addWidget(self.GroupBox1)\n self.vlayout.addWidget(self.openButton)\n self.vlayout.addWidget(self.startButton)\n # self.vlayout.addWidget(self.line)\n # self.vlayout.addWidget(self.startCoorBtn)\n # self.vlayout.addWidget(self.saveButton)\n self.vlayout.addWidget(self.closeButton)\n # self.vlayout.addWidget(self.horizontal)\n self.setLayout(self.vlayout)\n\n self.show()\n\n def okButtonOnClick(self):\n try:\n tmp = float(self.line.text())\n if tmp > 1 or tmp < 0:\n notInRangeWarning = QMessageBox.information(self, \"Warning\", \"Please input number in range(0, 1)!\")\n # print(\"input is not in range 0-1\")\n else:\n self.scalevalue = float(self.line.text())\n self.coordinatesLabel.setText(\"coordinates = (\" + str(self.x) + \" , \" + str(self.y) + \")\")\n # print(self.scalevalue)\n except BaseException:\n notNumberWarning = QMessageBox.information(self, \"Warning\", \"Please input valid number!\")\n # print(\"input is not number\")\n\n def closeApp(self):\n reply = QMessageBox.question(\n self,\n \"Close Message\",\n \"Are you sure to exit?\",\n QMessageBox.Yes | QMessageBox.No)\n if reply == QMessageBox.Yes:\n self.close()\n\n def openFileNameDialog(self):\n self.inputImagePath, _ = QFileDialog.getOpenFileName(\n self, \"Open Image\", \"\", \"Images (*.png *.jpeg *.jpg *.bmp)\")\n if self.inputImagePath:\n # print(self.inputImagePath)\n self.showInputImageView()\n\n def showInputImageView(self):\n pixmap = QPixmap(self.inputImagePath).scaled(450, 300)\n self.inputImageView.setPixmap(pixmap)\n self.imageselected = True\n\n def mousePressEvent(self, QMouseEvent):\n tmpx = QMouseEvent.x() - 20\n tmpy = QMouseEvent.y() - 20\n if tmpx < 450 and tmpx > 0 and tmpx > 0 and tmpy < 300:\n self.x = QMouseEvent.x() - 20\n self.y = QMouseEvent.y() - 20\n self.coordinatesLabel.setText(\"coordinates = (\" + str(self.x) + \" , \" + str(self.y) + \")\")\n # print(\"self.x = \", self.x, \"self.y = \", self.y)\n # print(\"click x = \", QMouseEvent.x(), \"click y = \", QMouseEvent.y())\n\n # def checkInputFormatMsgBox(self):\n # checkbox = QMessageBox.about(self, \"Image Format Incorrect\", \"Please select jpg/png format only\")\n\n # @pyqtSlot()\n def openButtonOnClick(self):\n self.openFileNameDialog()\n\n def startButtonOnClick(self):\n if self.imageselected:\n imagepath = self.inputImagePath\n # print(imagepath)\n # print(\"self.scalevalue = \", self.scalevalue)\n mask, cuts = main_gmm(imagepath, self.scalevalue, self.x, self.y)\n else:\n msg = QMessageBox.information(self, \"Warning\", \"No image selected\")\n # print(\"no image selected\")\n\n # def startCoorOnClick(self):\n # print(self.x - 20, self.y - 20)\n\n\n#if __name__ == '__main__':\ndef main():\n app = QApplication(sys.argv)\n firstUI = UI()\n sys.exit(app.exec_())\n\n","sub_path":"cython/ui_comb.py","file_name":"ui_comb.py","file_ext":"py","file_size_in_byte":13221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"193644392","text":"import pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import Ridge\n\n\n\n\"\"\"\nFrom https://towardsdatascience.com/how-to-perform-lasso-and-ridge-regression-in-python-3b3b75541ad8\n\"\"\"\n\nDATAPATH = 'data/Advertising.csv'\n\ndata = pd.read_csv(DATAPATH)\n\nprint(data.head)\n\ndata.drop(['Unnamed: 0'], axis=1, inplace=True)\n\nprint(data.head)\n\ndef scatter_plot(feature, target):\n\tplt.figure(figsize=(16,8))\n\tplt.scatter(\n\t\t\tdata[feature],\n\t\t\tdata[target],\n\t\t\tc='black'\n\t\t)\n\n\tplt.xlabel(\"Money spent on {} ads($)\".format(feature))\n\tplt.ylabel(\"Sales\")\n\tplt.savefig('scatter.png')\n\n\nscatter_plot('TV', 'sales')\n\n\nridge = Ridge()\n\ny = data['sales'].values.reshape(-1,1)\n\nprint(y)\n\nridge.fit(data, y)\n\nprint(ridge.coef_)\n\nprint(data.values[0])\n\nprint(ridge.predict([data.values[-1]]))","sub_path":"ridge_regression.py","file_name":"ridge_regression.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"461793722","text":"# this is basically to classfiy positive or neg\n#use this corpora to train the model\n#then using the model to find the positive and negative words\n#which will be used with twitter data for sentiment analysis\n\n#so what is sentiment analysis\n# use the tweets to find positive or negative views\n\n\n\nimport nltk\nimport random\nfrom nltk.corpus import movie_reviews\n\ndocuments = [(list(movie_reviews.words(fileid)), category)\n for category in movie_reviews.categories()\n for fileid in movie_reviews.fileids(category)]\n\nrandom.shuffle(documents)\n\nprint(documents[1])\n#documents = []\n#categories = movie_reviews.categories()\n#for category in categories:\n #print(category)#pos,neg\n #for fileid in movie_reviews.fileids(category):\n # print(fileid)\n # print(movie_reviews.words(fileid))\n\n\n#moving on to the frequency distribution\n\nall_words = []\nfor w in movie_reviews.words():\n all_words.append(w.lower())\n \nall_words = nltk.FreqDist(all_words)\n#print(all_words)\n#top 15 common words\nprint(all_words.most_common(15))\n#how many times the word and appears out of total\nprint(all_words[\"and\"])\n \n \n","sub_path":"text_classification.py","file_name":"text_classification.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"466613245","text":"import json\nfrom nose.tools import (\n eq_,\n set_trace,\n)\n\nfrom oauth2client import client as GoogleClient\n\nfrom .. import DatabaseTest\nfrom core.util.problem_detail import ProblemDetail\n\nfrom api.admin.google_oauth_admin_authentication_provider import (\n GoogleOAuthAdminAuthenticationProvider,\n DummyGoogleClient,\n)\nfrom core.model import (\n ExternalIntegration,\n create,\n)\n\nclass TestGoogleOAuthAdminAuthenticationProvider(DatabaseTest):\n\n def test_callback(self):\n super(TestGoogleOAuthAdminAuthenticationProvider, self).setup()\n auth_integration, ignore = create(\n self._db, ExternalIntegration,\n protocol=ExternalIntegration.GOOGLE_OAUTH,\n goal=ExternalIntegration.ADMIN_AUTH_GOAL\n )\n self.google = GoogleOAuthAdminAuthenticationProvider(auth_integration, \"\", test_mode=True)\n\n # Returns a problem detail when Google returns an error.\n error_response, redirect = self.google.callback({'error' : 'access_denied'})\n eq_(True, isinstance(error_response, ProblemDetail))\n eq_(400, error_response.status_code)\n eq_(True, error_response.detail.endswith('access_denied'))\n eq_(None, redirect)\n\n # Successful case creates a dict of admin details\n success, redirect = self.google.callback({'code' : 'abc'})\n eq_('example@nypl.org', success['email'])\n default_credentials = json.dumps({\"id_token\": {\"email\": \"example@nypl.org\", \"hd\": \"nypl.org\"}})\n eq_(default_credentials, success['credentials'])\n\n # Returns a problem detail when the oauth client library\n # raises an exception.\n class ExceptionRaisingClient(DummyGoogleClient):\n def step2_exchange(self, auth_code):\n raise GoogleClient.FlowExchangeError(\"mock error\")\n self.google.dummy_client = ExceptionRaisingClient()\n error_response, redirect = self.google.callback({'code' : 'abc'})\n eq_(True, isinstance(error_response, ProblemDetail))\n eq_(400, error_response.status_code)\n eq_(True, error_response.detail.endswith('mock error'))\n eq_(None, redirect)\n\n def test_domains(self):\n super(TestGoogleOAuthAdminAuthenticationProvider, self).setup()\n auth_integration, ignore = create(\n self._db, ExternalIntegration,\n protocol=ExternalIntegration.GOOGLE_OAUTH,\n goal=ExternalIntegration.ADMIN_AUTH_GOAL\n )\n auth_integration.set_setting(\"domains\", json.dumps([\"nypl.org\"]))\n \n google = GoogleOAuthAdminAuthenticationProvider(auth_integration, \"\", test_mode=True)\n\n eq_([\"nypl.org\"], google.domains)\n","sub_path":"tests/admin/test_google_oauth_admin_authentication_provider.py","file_name":"test_google_oauth_admin_authentication_provider.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"216472785","text":"import csv\n#import MySQLdb\n\nf = open(\"SIASG_ITENS_LIC.CSV\", \"r\")\n#f = open(\"teste.csv\", \"r\")\n#abre arquivo de teste com 100 registros\n\n#dados que seram passados para o bd:\n\n\n# Modalidade_Licitacao->(data[1:2])\n# Data_Referencia_Compra->(data[5:6])\n# Identificacao_ItemCompra->(data[8:9])\n# Cpf_Cnpj_Fornecedor->(data[23:24])\n# Valor_Total_Homologado->(data[31:32])\n# Poder_Unidade->(data[41:42])\n\nline = []\ni=0\nwhile f.readline() != '':\n\trow = f.readline()\n\tdata = row.split(\"\\xac\")\n\tline.append((data[8:9], data[5:6], data[23:24], data[41:42], data[1:2], data[31:32]))\n\ti=i+1\n\ntam = len(line) - 1\nline = line[1:tam]\nprint(line[0:1])\nprint(len(line))\n\n#db = MySQLdb.connect(host=\"localhost\", \n# user=\"root\", \n# passwd=\"root\", \n# db=\"mydb\") \n\n#cur = db.cursor()\n#\n#query = (\"INSERT INTO licitacoes (iditemcompra, datareferencia, cnpj, poderunidade, modalidade, valorpreco) VALUES (%s, %s, %s, %s, %s, %s)\")\n\n\n#cur.executemany(query, line)\n\n#db.commit()\n\n#db.close()\n","sub_path":"selecionandocampos5G.py","file_name":"selecionandocampos5G.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"466949007","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nimport requests\nimport datetime\nimport pandas as pd\n\nfrom apps.cso.models import (Observation, ObservationList, BBOX)\n\n\nOBSERVATION_TYPE = 'snow_depth'\nBASE_URL = 'https://api.mountainhub.com/timeline'\nHEADER = {\n 'Accept-version': '1'\n}\nSOURCE = 'mtnhub'\n\n\ndef parse_date(value):\n return datetime.datetime.fromtimestamp(value/1000.0)\n\n\ndef parse_record(item):\n return Observation(id=item.id,\n source_id=item.obs_id,\n name=item.author_name,\n reported_at=parse_date(item.timestamp),\n coords=[item.lat, item.lng],\n snow_depth=item.snow_depth,\n source=SOURCE)\n\n\ndef parse_records(results):\n observations = []\n\n for idx, res in enumerate(results):\n observation = res['observation']\n actor = res['actor']\n obs_data = {}\n if 'full_name' in actor.keys():\n obs_data['author_name'] = actor['full_name']\n elif 'fullName' in actor.keys():\n obs_data['author_name'] = actor['fullName']\n obs_data['id'] = idx\n obs_data['obs_id'] = observation['_id']\n obs_data['timestamp'] = int(observation['reported_at'])\n obs_data['lat'] = observation['location'][1]\n obs_data['lng'] = observation['location'][0]\n obs_data['obs_type'] = observation['type']\n if len(observation['details']) > 0:\n if observation['details'][0]:\n if 'snowpack_depth' in observation['details'][0].keys():\n obs_data['snow_depth'] = observation['details'][0]['snowpack_depth']\n\n observations.append(obs_data)\n\n df = pd.DataFrame.from_records(observations).dropna()\n\n return df[df['snow_depth'] != 'undefined']\n\n\ndef prepare_bbox(north_east_lat, north_east_lng, south_west_lat, south_west_lng):\n box = BBOX(north_east_lat, north_east_lng, south_west_lat, south_west_lng)\n return {\n 'north_east_lat': box.north_east_lat,\n 'north_east_lng': box.north_east_lng,\n 'south_west_lat': box.south_west_lat,\n 'south_west_lng': box.south_west_lng,\n }\n\n\ndef get_records(**kwargs):\n from_date = kwargs.get('from_date')\n north_east_lat = kwargs.get('north_east_lat')\n north_east_lng = kwargs.get('north_east_lng')\n south_west_lat = kwargs.get('south_west_lat')\n south_west_lng = kwargs.get('south_west_lng')\n\n params = {\n 'publisher': 'all',\n 'obs_type': 'snow_conditions',\n 'limit': 1000\n }\n\n if north_east_lat and north_east_lng \\\n and south_west_lat and south_west_lng:\n params.update(prepare_bbox(north_east_lat, north_east_lng, south_west_lat, south_west_lng))\n\n if from_date:\n params.update({\n 'since': from_date\n })\n\n response = requests.get(BASE_URL, params=params, headers=HEADER)\n\n data = response.json()\n\n if 'results' not in data:\n raise ValueError(data)\n\n results = data['results']\n count = len(data['results'])\n\n obsdf = parse_records(results)\n\n return ObservationList(\n obs_type=OBSERVATION_TYPE,\n date_start=parse_date(data['pagination']['before']),\n date_end=parse_date(data['pagination']['after']),\n results=[parse_record(item) for i, item in obsdf.iterrows()],\n count=count)\n","sub_path":"src/csoapi/apps/cso/types/snowobs.py","file_name":"snowobs.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"24883133","text":"import sys\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport math\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D\n\n#Print python version\nprint('Python', sys.version)\n# https://stackoverflow.com/questions/52351255/keras-numpy-input-typeerror-error-converting-shape-to-a-tensorshape-int-ar\n\n# Import the data\ntraining_data = pd.read_csv('training_data.csv')\ntraining_data.fillna(0)\n\n# Shuffle the data\ntraining_data = training_data.sample(frac=1).reset_index(drop=True)\n\n# Switch for multiple feautre sets\ndef getFeatureSet(number):\n return {\n 1 : ['followers_count', 'friends_count', 'statuses_count', 'favourites_count', 'listed_count', 'verified', 'bot'],\n 2 : ['followers_count', 'friends_count', 'statuses_count', 'favourites_count', 'listed_count', 'verified', 'nameBi','descriptionBi','bot'],\n 3 : ['followers_count', 'friends_count', 'statuses_count', 'favourites_count', 'listed_count_binary', 'verified', 'name_binary'\n ,'description_binary', 'bot'],\n }[number]\n\n# Configure data and features\nmykeywords = r'bot|b0t|papers|#botally|follow|every|made|//|random|day|daily|tweets|fuck|XXX|sex|truthe|fake|updates'\ntraining_data['name_binary'] = training_data.name.str.contains(mykeywords, case=False, na=False)\ntraining_data['description_binary'] = training_data.description.str.contains(mykeywords, case=False, na=False)\ntraining_data['status_binary'] = training_data.status.str.contains(mykeywords, case=False, na=False)\n\ntraining_data['listed_count_binary'] = (training_data.listed_count>20000)==False\n\n# Create input\nfeatureSet = getFeatureSet(3)\ntest_size = 0.2\n\nfeatures = np.array(training_data[featureSet]) # Np array of all data SHAPE (2797, 9)\ntesting_size = int(test_size*len(features)) # Number of row in testing\n\n# Training data 90% \ntrain_x = list(features[:-testing_size,:-1]) # Input \ntrain_y = list(features[:-testing_size,-1:]) # Answers\n# Testing data 10%\ntest_x = list(features[:testing_size,:-1])\ntest_y = list(features[:testing_size,-1:])\n\ntrain_x = np.array(train_x)\ntrain_y = np.array(train_y)\ntest_x = np.array(test_x)\ntest_y = np.array(test_y)\n\nprint(train_x.shape)\nprint(train_x.shape[1])\n# print(train_y.shape)\n# print(test_x.shape)\n# print(test_y.shape)\n\n#train_x = tf.keras.utils.normalize(train_x, axis=1)\n#test_y = tf.keras.utils.normalize(train_y, axis=1)\n\n# Define Model\nmodel = tf.keras.models.Sequential()\nmodel.add(tf.keras.layers.ZeroPadding2D((1,1), input_shape=(8, 1, 1)))\nmodel.add(tf.keras.layers.Convolution2D(32, kernel_size=(3,3)))\nmodel.add(tf.keras.layers.ZeroPadding2D((1,1)))\nmodel.add(tf.keras.layers.Convolution2D(32, activation=tf.nn.relu, kernel_size=(3,3))) # Hidden Layer 1 (neurons in layer, actvication function)\n#model.add(tf.keras.layers.Dense(64, activation=tf.nn.relu)) # Hidden Layer 2\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dense(1, activation=tf.nn.softmax)) # Output layer\n\n# Compile the model\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# Train the model\nmodel.fit(train_x, train_y, epochs=4)\n\nval_loss, val_acc = model.evaluate(test_x, test_y)\nprint(val_loss, val_acc)","sub_path":"PythonDeepLearningKeras CNN.py","file_name":"PythonDeepLearningKeras CNN.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"233634153","text":"'''\nCreated on Aug 29, 2014\n\n@author: Heiko Strathmann\n'''\n\nfrom scipy.spatial.distance import squareform, pdist\n\nfrom theano import function\nimport theano\nfrom theano.configparser import TheanoConfigParser\n\nimport numpy as np\nfrom pymc.model import modelcontext\nfrom pymc.step_methods.arraystep import ArrayStep, metrop_select\nfrom pymc.step_methods.metropolis import tune\nimport scipy as sp\nimport theano.tensor as T\n\nfrom ..core import *\n\n\n# To avoid Theano complaining about missing test values\nTheanoConfigParser().compute_test_value = 'off'\n\n\n__all__ = ['KameleonOracle']\n\n\nclass KameleonOracle(ArrayStep):\n \"\"\"\n Kernel Adaptive Metropolis-Hastings sampling step with a fixed set of orcale\n samples. Automatic tuning of the scaling is possible via a simple schedule\n for a given number of iterations.\n \n Based on \"Kernel Adaptive Metopolis Hastings\" by D. Sejdinovic, H. Strathmann,\n M. Lomeli, C. Andrieu, A. Gretton\n http://jmlr.org/proceedings/papers/v32/sejdinovic14.html\n \n See also https://github.com/karlnapf/kameleon-mcmc for experimental code.\n\n Parameters\n ----------\n vars : list\n List of variables for sampler\n Z : 2d numpy array\n Oracle sample to represent target covariance structure\n proposal_dist : function\n Function that returns zero-mean deviates when parameterized with\n S (and n). Defaults to quad_potential.\n gamma2 : scalar\n Exploration term in proposal\n nu2 : scalar\n Scaling of the covariance part of the proposal\n kernel : Kernel instance\n Kernel to use for representing covariance structure in feature space.\n Must implement interface for kernel and gradient, see for example GaussianKernel\n model : PyMC Model\n Optional model for sampling step. Defaults to None (taken from context).\n\n \"\"\"\n def __init__(self, vars=None, Z=None, gamma2=0.1, nu2=1., kernel=None,\n tune=True, tune_interval=100, model=None, dist=None):\n model = modelcontext(model)\n if vars is None:\n vars = model.vars\n \n self.Z = Z\n self.kernel = kernel\n self.gamma2 = gamma2\n self.nu2 = nu2\n self.tune = tune\n \n # empty proposal distribution and last likelihood\n self.q_dist = None\n self.log_target = -np.inf\n \n # statistics for tuning scaling\n self.tune = tune\n self.tune_interval = tune_interval\n self.steps_until_tune = tune_interval\n self.accepted = 0\n \n super(KameleonOracle, self).__init__(vars, [model.fastlogp])\n\n def astep(self, q0, logp):\n # sample from kernel based Gaussian proposal\n q_dist = self.construct_proposal(q0)\n q = np.ravel(q_dist.sample())\n \n # evaluate target log probability\n logp_q = logp(q)\n \n # MH accept/reject step\n if self.q_dist is None:\n q_new = q\n else:\n q_new = metrop_select(logp_q + q_dist.log_pdf(q0) \\\n - self.log_pdf_target - self.q_dist.log_pdf(q), q, q0)\n \n # adapt\n if self.tune and not self.steps_until_tune:\n # tune scaling parameter using metropolis method\n self.nu2 = tune(self.nu2, self.accepted / float(self.tune_interval))\n # Reset counter\n self.steps_until_tune = self.tune_interval\n self.accepted = 0\n \n # update log-pdf and proposal distribution object on accept\n if any(q_new != q0):\n self.accepted += 1\n self.q_dist = q_dist\n self.log_pdf_target = logp_q\n\n self.steps_until_tune -= 1\n\n return q_new\n\n def compute_constants(self, y):\n \"\"\"\n Pre-computes constants of the log density of the proposal distribution,\n which is Gaussian as p(x|y) ~ N(mu, R)\n where\n mu = y-a\n a = 0\n R = gamma^2 I + M M^T\n M = 2 [\\nabla_x k(x,z_i]|_x=y\n \n Returns (mu,L_R), where L_R is lower Cholesky factor of R\n \"\"\"\n assert(len(np.shape(y)) == 1)\n \n # M = 2 [\\nabla_x k(x,z_i]|_x=y\n R = self.gamma2 * np.eye(len(y))\n if self.Z is not None:\n M = 2 * self.kernel.gradient(y, self.Z)\n # R = gamma^2 I + \\nu^2 * M H M^T\n H = np.eye(len(self.Z)) - 1.0 / len(self.Z)\n R += self.nu2 * M.T.dot(H.dot(M))\n \n L_R = np.linalg.cholesky(R)\n \n return y.copy(), L_R\n \n def construct_proposal(self, y):\n \"\"\"\n Constructs the Kameleon MCMC proposal centred at y, using history Z\n \n The proposal is a Gaussian based on the kernel values between y and all\n points in the chain history.\n \"\"\"\n mu, L = self.compute_constants(y)\n \n return Gaussian(mu, L, is_cholesky=True)\n\nclass Gaussian():\n \"\"\"\n Helper class to sample from and evaluate log-pdf of a multivariate Gaussian,\n using efficient Cholesky based representation (Cholesky only computed once)\n \"\"\"\n def __init__(self, mu, Sigma, is_cholesky=False):\n self.mu = mu\n self.is_cholesky = is_cholesky\n \n if self.is_cholesky:\n self.L = Sigma\n else:\n self.L = np.linalg.cholesky(Sigma)\n \n self.dimension = len(mu)\n \n def log_pdf(self, X):\n # duck typing for shape\n if len(np.shape(X)) == 1:\n X = X.reshape(1, len(X))\n \n log_determinant_part = -sum(np.log(np.diag(self.L)))\n \n quadratic_parts = np.zeros(len(X))\n for i in range(len(X)):\n x = X[i] - self.mu\n \n # solve y=K^(-1)x = L^(-T)L^(-1)x\n y = sp.linalg.solve_triangular(self.L, x.T, lower=True)\n y = sp.linalg.solve_triangular(self.L.T, y, lower=False)\n quadratic_parts[i] = -0.5 * x.dot(y)\n \n const_part = -0.5 * len(self.L) * np.log(2 * np.pi)\n \n return const_part + log_determinant_part + quadratic_parts\n \n def sample(self, n=1):\n V = np.random.randn(self.dimension, n)\n\n # map to our desired Gaussian and transpose to have row-wise vectors\n return self.L.dot(V).T + self.mu\n\ndef theano_sq_dists_mat_expr(X, Y):\n return (-2 * X.dot(Y.T).T + T.sum(X ** 2, 1).T).T + T.sum(Y ** 2, 1)\n\ndef theano_gaussian_kernel_expr(sq_dists, sigma):\n return T.exp(-sq_dists / (2.*sigma ** 2))\n\ndef theano_sq_dists_vec_expr(x, Y):\n # element wise vector norm\n Y_norm = T.sum(Y ** 2, 1)\n xY_terms = x.T.dot(Y.T)\n \n # expanded sq euclidean distance\n return T.sum(x ** 2) + Y_norm - 2 * xY_terms\n\nclass GaussianKernel():\n \"\"\"\n Helper class to represent a Gaussian kernel, with methods to compute kernel\n function and its gradient wrt the left argument.\n Uses Theano's autodiff for computing kernel gradients.\n \"\"\"\n \n # compile theano functions\n X = T.dmatrix('X')\n x = T.dvector('x')\n Y = T.dmatrix('Y')\n sigma = T.dscalar('sigma')\n \n # kernel expressions as for left input being matrix or vector\n sq_dist_mat_expr = theano_sq_dists_mat_expr(X, Y)\n sq_dist_vec_expr = theano_sq_dists_vec_expr(x, Y)\n K_expr = theano_gaussian_kernel_expr(sq_dist_mat_expr, sigma)\n k_expr = theano_gaussian_kernel_expr(sq_dist_vec_expr, sigma)\n \n # compile\n theano_kernel_mat = function(inputs=[X, Y, sigma], outputs=K_expr)\n theano_kernel_vec = function(inputs=[x, Y, sigma], outputs=k_expr)\n theano_kernel_vec_grad_x = function(inputs=[x, Y, sigma],\n outputs=theano.gradient.jacobian(k_expr, x))\n \n @staticmethod\n def gaussian_median_heuristic(X):\n dists = squareform(pdist(X, 'sqeuclidean'))\n median_dist = np.median(dists[dists > 0])\n sigma = np.sqrt(0.5 * median_dist)\n return sigma\n \n def __init__(self, width):\n self.width = width\n \n def kernel(self, X, Y=None):\n \"\"\"\n Computes the standard Gaussian kernel k(x,y)=exp(-0.5* ||x-y||**2 / sigma**2)\n \n X - 2d array, samples on right hand side\n Y - 2d array, samples on left hand side, can be None in which case its replaced by X\n \"\"\"\n return self.theano_kernel(X, Y, self.width)\n \n def gradient(self, x, Y):\n \"\"\"\n Computes the gradient of the Gaussian kernel wrt. to the left argument, i.e.\n k(x,y)=exp(-0.5* ||x-y||**2 / sigma**2), which is\n \\nabla_x k(x,y)=1.0/sigma**2 k(x,y)(y-x)\n Given a set of row vectors Y, this computes the\n gradient for every pair (x,y) for y in Y.\n \n x - single sample on right hand side (1d array)\n Y - samples on left hand side (2d array)\n \"\"\"\n return self.theano_kernel_vec_grad_x(x, Y, self.width)","sub_path":"pymc/step_methods/kernel_metropolis.py","file_name":"kernel_metropolis.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"93034566","text":"from classifier import * \nfrom scipy.io import loadmat\n\nd = DecisionTree({\n 'depth' : 9,\n})\n\nr = RandomForest(parameters={\n 'trees' : 100,\n 'samples' : 800,\n 'depth' : 5,\n 'features' : 10 \n})\n\na = AdaBoost(parameters={\n 'iterations' : 20,\n 'depth' : 2,\n})\n\nprint(\"Loading data\")\ndata = loadmat('spamData.mat')\nx = data['Xtrain']\ny = data['ytrain']\nc = Collection(x, y)\n\nxtest = data['Xtest']\nytest = data['ytest']\n\nt = Collection(xtest, ytest)\n\ndef benchmark(classifier):\n classifier.train(c)\n print(\"Training Error rate: %f\" % classifier.test(c))\n print(\"Testing Error rate: %f\" % classifier.test(t))\n\nprint(\"Training Decision Tree\")\nbenchmark(d)\nprint(\"Training Random Forest\")\nbenchmark(r)\nprint(\"Training AdaBoost\")\nbenchmark(a)\n","sub_path":"code/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"390760450","text":"'''\r\nIf you add some pictures to this game, make a folder in /home/the77lab/RehabGame/picture/squeegee/image/\r\nand add pictures you want.\r\nThey are automatically loaded.\r\n'''\r\n\r\nimport NanotecLibrary as NT # import python library to control nanotec motors\r\n\r\nimport sys, cv2, os, subprocess, re, select\r\nos.chdir(\"../rehab-games\")\r\n\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\n\r\nwinHeight = 900\r\nwinWidth = 1200\r\nscene = image = view = None\r\npicHeight = 0\r\npicWidth = 0\r\npicPosX = picPosY = 0\r\nscale = 1.0\r\nfilepath = \"./picture/squeegee/image/scenery/cherry_blossoms.jpg\"\r\ntitle = \"cherry_blossoms\"\r\n\r\nnc_cmd = 'netcat 192.168.1.12 3333'\r\nprocess = subprocess.Popen(nc_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\r\n\r\n\r\n\r\n\r\n##-------------------------------------------\r\n# character setting\r\n\r\n\r\nclass Calc_Pic(QGraphicsItem):\r\n\tdef __init__(self):\r\n\t\tsuper(Calc_Pic, self).__init__()\r\n\t\tglobal SqegHeight, SqegWidth, HalfWidth\r\n\t\tSqegHeight = 20\r\n\t\tSqegWidth = 70\r\n\t\tHalfWidth = int( SqegWidth / 2)\r\n\t\tself.setup()\r\n\r\n\tdef setup(self):\r\n\t\tglobal filepath, path, imgCalc, imgOutput, show_Pic, picWidth, picHeight\r\n\t\tpath = cv2.imread(filepath)\r\n\t\tself.imgOrigin = cv2.cvtColor(path, cv2.COLOR_BGR2HSV) # load original image\r\n\t\th, s, v = cv2.split(self.imgOrigin) # split into h,s,v data\r\n\t\ts.fill(0) # grayscale\r\n\t\timgCalc = cv2.merge([h, s, v]) # reconstruct to image data\r\n\t\timgOutput = cv2.cvtColor( imgCalc, cv2.COLOR_HSV2RGB) # output grayscale image\r\n\t\tpicWidth = self.imgOrigin.shape[1] # width of image\r\n\t\tpicHeight = self.imgOrigin.shape[0] # height of image\r\n\t\tshow_Pic = QImage( imgOutput.data, picWidth, picHeight, QImage.Format_RGB888) # create image data\r\n\r\n\tdef boundingRect(self):\r\n\t\treturn QRectF( 0, 0, self.imgOrigin.shape[1], self.imgOrigin.shape[0])\r\n\r\n\tdef paint(self, painter, option, widget):\r\n\t\tglobal show_Pic\r\n\t\tpainter.drawPixmap(0, 0, QPixmap.fromImage( show_Pic, Qt.AutoColor)) # display image\r\n\r\n\tdef Wiper(self): # coloring function\r\n\t\tglobal picPosX, picPosY, imgCalc, imgOutput, picWidth, picHeight, show_Pic, SqegWidth, HalfWidth, SqegHeight\r\n\t\tfor i in range(SqegHeight): # squeegee's thickness\r\n\t\t\tdrawY = picPosY + i\r\n\t\t\tif self.imgOrigin.shape[0] <= picPosY + i: # when the mouse + thickness exceeds the height of the image\r\n\t\t\t\tdrawY = self.imgOrigin.shape[0] - 1\r\n\t\t\tfor j in range(SqegWidth): # squeegee's width\r\n\t\t\t\tdrawX = picPosX - HalfWidth + j\r\n\t\t\t\tif drawX - HalfWidth + j <= 0: # when the mouse + thickness exceeds the width of the image (left boundary)\r\n\t\t\t\t\tdrawX = 0\r\n\t\t\t\telif self.imgOrigin.shape[1] <= drawX - HalfWidth + j: # right boundary\r\n\t\t\t\t\tdrawX = self.imgOrigin.shape[1] - 1\r\n\t\t\t\tif imgCalc[drawY][drawX][1] == 0: # coloring when the mouse position image is gray\r\n\t\t\t\t\timgCalc[drawY][drawX][1] = self.imgOrigin[drawY][drawX][1]\r\n\t\timgOutput = cv2.cvtColor( imgCalc, cv2.COLOR_HSV2RGB)\r\n\t\tshow_Pic = QImage( imgOutput.data, picWidth, picHeight, QImage.Format_RGB888) # display image\r\n\t\tscene.update()\r\n\r\n\r\n\r\n\r\nclass Frame(QGraphicsItem):\r\n\tdef __init__(self):\r\n\t\tsuper(Frame, self).__init__()\r\n\t\tself.setAcceptedMouseButtons(Qt.LeftButton)\r\n\t\tself.setup()\r\n\r\n\r\nclass PicTitle(Frame):\r\n\tdef setup(self):\r\n\t\tpass\r\n\r\n\tdef boundingRect(self):\r\n\t\treturn QRectF( 0, 0, 400, 200)\r\n\r\n\tdef paint(self, painter, option, widget):\r\n\t\tglobal title\r\n\t\tpainter.setFont(QFont('Norasi',30))\r\n\t\tpainter.drawText(0, 0, str(title))\r\n\r\n\r\nclass Start(Frame):\r\n\tdef setup(self):\r\n\t\tself.image = QImage(\"./picture/squeegee/layout/start.png\")\r\n\r\n\tdef boundingRect(self):\r\n\t\treturn QRectF( 0, 0, self.image.width(), self.image.height())\r\n\r\n\tdef paint(self, painter, option, widget):\r\n\t\tpainter.drawImage( 0, 0, self.image)\r\n\r\n\tdef mousePressEvent(self, event):\r\n\t\tglobal picWidth, picHeight, imgCalc, imgOutput, show_Pic\r\n\t\th, s, v = cv2.split(imgCalc)\r\n\t\ts.fill(0)\r\n\t\timgCalc = cv2.merge([h, s, v])\r\n\t\timgOutput = cv2.cvtColor( imgCalc, cv2.COLOR_HSV2RGB)\r\n\t\tshow_Pic = QImage( imgOutput.data, picWidth, picHeight, QImage.Format_RGB888)\r\n\t\tscene.update()\r\n\r\n\r\nclass Quit(Frame):\r\n\tdef setup(self):\r\n\t\tself.image = QImage(\"./picture/squeegee/layout/quit.png\")\r\n\r\n\tdef boundingRect(self):\r\n\t\treturn QRectF( 0, 0, self.image.width(), self.image.height())\r\n\r\n\tdef paint(self, painter, option, widget):\r\n\t\tpainter.drawImage( 0, 0, self.image)\r\n\r\n\tdef mousePressEvent(self, event):\r\n\t\tmsg = QMessageBox()\r\n\t\tmsg.setIcon(QMessageBox.Warning)\r\n\t\tmsg.setWindowTitle(\"Attention\")\r\n\t\tmsg.setText(\"Do you really want to quit this game ?\")\r\n\t\tmsg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\r\n\t\tret = msg.exec_()\r\n\t\tif ret == QMessageBox.Yes:\r\n\t\t\tQApplication.quit()\r\n\t\telif ret == QMessageBox.No:\r\n\t\t\tevent.ignore()\r\n\r\n\r\nclass Settings(Frame):\r\n\tdef setup(self):\r\n\t\tself.image = QImage(\"./picture/squeegee/layout/settings.png\")\r\n\r\n\tdef boundingRect(self):\r\n\t\treturn QRectF( 0, 0, self.image.width(), self.image.height())\r\n\r\n\tdef paint(self, painter, option, widget):\r\n\t\tpainter.drawImage( 0, 0, self.image)\r\n\r\n\tdef mousePressEvent(self, event):\r\n\t\tsubWindow = SubWindow()\r\n\t\tsubWindow.show()\r\n\t\tsubWindow.exec_()\r\n\r\n\r\nclass SubWindow(QDialog):\r\n\tdef __init__(self, parent=None):\r\n\t\tsuper(SubWindow, self).__init__(parent)\r\n\t\tsubwinWidth = 600\r\n\t\tsubwinHeight = 350\r\n\t\tself.setGeometry(500, 300, subwinWidth, subwinHeight)\r\n\t\tself.setMaximumSize( subwinWidth, subwinHeight)\r\n\t\tself.setMinimumSize( subwinWidth, subwinHeight)\r\n\t\tself.setWindowTitle(\"Settings\")\r\n\r\n\t\ttext1 = QLabel(self)\r\n\t\ttext1.setFont(QFont('Norasi', 15))\r\n\t\ttext1.setText(\"SELECT PICTURE\")\r\n\t\ttext1.move( 170, 30)\r\n\r\n\t\tlist1 = os.listdir(\"./picture/squeegee/image\")\r\n\t\ttext2 = QLabel(self)\r\n\t\ttext2.setFont(QFont('Norasi', 12))\r\n\t\ttext2.setText(\"group\")\r\n\t\ttext2.move( 80, 85)\r\n\t\tself.combo1 = QComboBox(self)\r\n\t\tself.combo1.setFont(QFont('Norasi',12))\r\n\t\tfor num in range(len(list1)):\r\n\t\t\tself.combo1.addItem(list1[num])\r\n\t\tself.combo1.move( 80, 120)\r\n\t\tself.select = self.combo1.currentText()\r\n\t\tself.combo1.currentIndexChanged.connect(self.indexChanged)\r\n\r\n\t\tpath = \"./picture/squeegee/image/\" + str(self.select) # load images from this directory automatically\r\n\t\tself.list2 = os.listdir(path)\r\n\t\ttext3 = QLabel(self)\r\n\t\ttext3.setFont(QFont('Norasi', 12))\r\n\t\ttext3.setText(\"picture\")\r\n\t\ttext3.move( 300, 85)\r\n\t\tself.combo2 = QComboBox(self)\r\n\t\tself.combo2.setFont(QFont('Norasi',12))\r\n\t\tfor num in range(len(self.list2)):\r\n\t\t\tself.combo2.addItem(self.list2[num])\r\n\t\tself.combo2.move( 300, 120)\r\n\r\n\tdef indexChanged(self):\r\n\t\tself.combo2.clear()\r\n\t\tself.select = self.combo1.currentText()\r\n\t\tpath = \"./picture/squeegee/image/\" + str(self.select) # load images from this directory automatically\r\n\t\tself.list2 = os.listdir(path)\r\n\t\tfor num in range(len(self.list2)):\r\n\t\t\tself.combo2.addItem(self.list2[num])\r\n\t\tself.combo2.move( 300, 120)\r\n\r\n\tdef closeEvent(self,event):\r\n\t\tglobal filepath, title, view\r\n\t\tfilepath = \"./picture/squeegee/image/\" + self.combo1.currentText() + \"/\" + self.combo2.currentText() # load images from this directory automatically\r\n\t\ttitle = self.combo2.currentText()\r\n\t\ttitle = title.rstrip('.jpg')\r\n\t\tscene.clear()\r\n\t\tlay = Layout()\r\n\t\tlay.SceneSet()\r\n\t\tview.Set()\r\n\r\n\r\n\r\n\r\n##-------------------------------------------\r\n# window setting\r\n\r\n\r\nclass gamewindow(QGraphicsView, QWidget):\r\n\tdef __init__( self, parent = None ):\r\n\t\tsuper(gamewindow, self).__init__(parent)\r\n\t\tglobal winWidth, winHeight, Points\r\n\t\tself.wx = winWidth\r\n\t\tself.wy = winHeight\r\n\t\tself.direction = 1\r\n\t\tself.initUI()\r\n\t\tself.ManusSetting()\r\n\t\t\r\n\t\t# initialize motors\r\n\t\tserialPort1 = \"/dev/ttyACM0\"\r\n\t\tID1 = 1\r\n\t\tself.motor1 = NT.NanotecMotor(serialPort1,ID1)\r\n\t\t\r\n\t\tserialPort2 = \"/dev/ttyACM1\"\r\n\t\tID2 = 2\r\n\t\tself.motor2 = NT.NanotecMotor(serialPort2,ID2)\r\n\r\n\tdef ManusSetting(self):\r\n\t\tself.cursorX = 0 # cordinate of MIT - Manus X\r\n\t\tself.cursorY = 0 # cordinate of MIT - Manus Y\r\n\t\tself.R = 30 # cursor radius\r\n\t\tself.switch = 1 # load MIT - Manus values (aa and fe)\r\n\r\n\t\tpen = QPen(Qt.red)\r\n\t\tbrush = QBrush(pen.color())\r\n\t\tself.player = scene.addEllipse(self.cursorX - int(self.R/2), self.cursorY - int(self.R/2), self.R, self.R, pen, brush) # MIT - Manus cursor\r\n\r\n\t\ttimer1 = QTimer(self)\r\n\t\ttimer1.timeout.connect(self.reading) # load values from MIT - Manus\r\n\t\ttimer1.start(10)\r\n\r\n\t\ttimer2 = QTimer(self)\r\n\t\ttimer2.timeout.connect(self.moving) # game action\r\n\t\ttimer2.start(10)\r\n\r\n\tdef reading(self):\r\n\t\tglobal process, winWidth, winHeight\r\n\r\n\t\tif(self.switch == 1):\r\n\t\t\taa_pos_360 = self.motor1.getAbsoluteAngularPosition() # command for load wrist aa_pos value\r\n\t\t\taa_pos = (aa_pos_360 - 180.0)/ 360.0 # convert degrees [0,360.0) to [-1,1)\r\n\t\t\tif(aa_pos < 0):\r\n\t\t\t\tvalue_aa = float(aa_pos) * 10.0\r\n\t\t\telse:\r\n\t\t\t\tvalue_aa = float(aa_pos) * 4.0\r\n\t\t\tself.cursorY = int( (winHeight / 2) - ((winHeight / 2) * value_aa))\r\n\t\t\tself.switch = - self.switch\r\n\t\telse:\r\n\t\t\tfe_pos_360 = self.motor2.getAbsoluteAngularPosition() # command for load wrist fe_pos value\r\n\t\t\tfe_pos = (fe_pos_360 - 180.0)/ 360.0 # convert degrees [0,360.0) to [-1,1)\r\n\t\t\tvalue_fe = float(fe_pos) * 2.0\r\n\t\t\tself.cursorX = int( (winWidth / 2) + ((winWidth / 2) * value_fe))\r\n\t\t\tself.switch = - self.switch\r\n\t\tself.player.setRect(self.cursorX - int(self.R/2), self.cursorY - int(self.R/2), self.R, self.R)\r\n\r\n\tdef initUI(self):\r\n\t\tself.setMouseTracking(True)\r\n\t\tself.setGeometry(250, 100, self.wx, self.wy)\r\n\t\tself.setMaximumSize( self.wx, self.wy)\r\n\t\tself.setMinimumSize( self.wx, self.wy)\r\n\t\tself.setWindowTitle(\"Squeegee\")\r\n\t\tself.combo = QComboBox(self)\r\n\t\tself.combo.setFont(QFont('Norasi',20))\r\n\t\tself.combo.addItem(\"Vertical\")\r\n\t\tself.combo.addItem(\"Horizontal\")\r\n\t\tself.combo.move( 780, 20)\r\n\t\tself.combo.activated.connect(self.ComboChange)\r\n\t\tself.sp = QSpinBox(self)\r\n\t\tself.sp.setFont(QFont('Norasi',20))\r\n\t\tself.sp.setRange( 0, 10)\r\n\t\tself.sp.setValue(5)\r\n\t\tself.sp.move( 980, 20)\r\n\t\tself.sp.valueChanged.connect(self.ValueChange)\r\n\r\n\tdef ComboChange(self): # change the pad length and width\r\n\t\tglobal SqegHeight, SqegWidth, HalfWidth\r\n\t\tif ( self.combo.currentText() == 'Vertical'):\r\n\t\t\tself.direction = 1\r\n\t\t\tSqegHeight = 20\r\n\t\t\tSqegWidth = 20 + (10 * self.sp.value())\r\n\t\t\tHalfWidth = int( SqegWidth / 2)\r\n\t\telse:\r\n\t\t\tself.direction = 0\r\n\t\t\tSqegWidth = 20\r\n\t\t\tSqegHeight = 20 + (10 * self.sp.value())\r\n\t\t\tHalfWidth = int( SqegWidth / 2)\r\n\r\n\tdef ValueChange(self): # \r\n\t\tglobal SqegWidth, SqegHeight, HalfWidth\r\n\t\tif (self.direction == 1):\r\n\t\t\tSqegWidth = 20 + (10 * self.sp.value())\r\n\t\t\tHalfWidth = int( SqegWidth / 2)\r\n\t\telse:\r\n\t\t\tSqegHeight = 20 + (10 * self.sp.value())\r\n\t\t\tHalfWidth = int( SqegWidth / 2)\r\n\r\n\tdef Set(self):\r\n\t\tglobal picWidth, picHeight, view, Item_Pic\r\n\t\twindowX = view.frameGeometry().width()\r\n\t\twindowY = view.frameGeometry().height()\r\n\t\tself.ofsetX = (windowX - picWidth) / 2\r\n\t\tself.ofsetY = (windowY - picHeight) / 2\r\n\t\tItem_Pic.setPos( int(self.ofsetX), int(self.ofsetY))\r\n\r\n\tdef moving(self):\r\n\t\tglobal picPosX, picPosY, Item_Pic, scene, Points\r\n\t\tpicPosX = int(self.cursorX - self.ofsetX)\r\n\t\tpicPosY = int(self.cursorY - self.ofsetY)\r\n\r\n\t\tif ((0 <= picPosX) and (picPosX < picWidth)):\r\n\t\t\t if ((0 <= picPosY) and (picPosY < picHeight)):\r\n\t\t\t\t Item_Pic.Wiper()\r\n\r\n\r\n\r\n\r\nclass Layout:\r\n\tdef SceneSet(self):\r\n\t\tglobal scene, Item_Pic\r\n\r\n\t\tpb = QGraphicsPixmapItem(QPixmap('./picture/squeegee/layout/pb.png'))\r\n\t\tpb.setPos( 0, 0)\r\n\t\tscene.addItem(pb)\r\n\r\n\t\tframe = QGraphicsPixmapItem(QPixmap('./picture/squeegee/layout/frame.png'))\r\n\t\tframe.setPos( 30, 20)\r\n\t\tframe.setTransform(QTransform.fromScale(0.8, 0.8), True)\r\n\t\tscene.addItem(frame)\r\n\t\tsettings = Settings()\r\n\t\tsettings.setPos( 300, 28)\r\n\t\tsettings.setTransform(QTransform.fromScale(0.9, 0.9), True)\r\n\t\tscene.addItem(settings)\r\n\t\tstart = Start()\r\n\t\tstart.setPos( 180, 23)\r\n\t\tstart.setTransform(QTransform.fromScale(0.9, 0.9), True)\r\n\t\tscene.addItem(start)\r\n\t\tquit = Quit()\r\n\t\tquit.setPos( 50, 30)\r\n\t\tscene.addItem(quit)\r\n\r\n\t\tItem_Pic = Calc_Pic()\r\n\t\tItem_Pic.setPos(100, 100)\r\n\t\tItem_Pic.setTransform(QTransform.fromScale(1.0, 1.0), True)\r\n\t\tscene.addItem(Item_Pic)\r\n\r\n\t\tpictitle = PicTitle()\r\n\t\tpictitle.setPos(800,110)\r\n\t\tscene.addItem(pictitle)\r\n\r\n\r\n\r\n\r\ndef main():\r\n\tglobal scene, winWidth, winHeight, view\r\n\tapp = QApplication(sys.argv)\r\n\tscene = QGraphicsScene(0, 0, winWidth, winHeight)\r\n\r\n\tlay = Layout()\r\n\tlay.SceneSet()\r\n\r\n\tview = gamewindow(scene)\r\n\tview.show()\r\n\tview.raise_()\r\n\tview.Set()\r\n\tapp.exec_()\r\n\r\n\r\n\r\n\r\n##-------------------------------------------\r\n# game setting\r\n\r\nif __name__== '__main__':\r\n\tmain()\r\n\tsys.exit()\n","sub_path":"manus-home-v2/6-Squeegee.py","file_name":"6-Squeegee.py","file_ext":"py","file_size_in_byte":12821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"345011702","text":"\"\"\"\nAnalysing ITER parameters\n=========================\n\nLet's try to look at ITER plasma conditions using the `physics` subpackage.\n\"\"\"\n\nfrom astropy import units as u\nfrom plasmapy import formulary\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\n######################################################\n# The radius of electric field shielding clouds, also known as the Debye length,\n# would be\n\nelectron_temperature = 8.8 * u.keV\nelectron_concentration = 10.1e19 / u.m**3\nprint(formulary.Debye_length(electron_temperature, electron_concentration))\n\n############################################################\n# Note that we can also neglect the unit for the concentration, as\n# 1/m^3 is the a standard unit for this kind of Quantity:\n\nprint(formulary.Debye_length(electron_temperature, 10.1e19))\n\n############################################################\n# Assuming the magnetic field as 5.3 Teslas (which is the value at the major\n# radius):\n\nB = 5.3 * u.T\n\nprint(formulary.gyrofrequency(B, particle='e'))\n\nprint(formulary.gyroradius(B, T_i=electron_temperature, particle='e'))\n\n######################################################################\n# The electron inertial length would be\nprint(formulary.inertial_length(electron_concentration, particle='e'))\n\n######################################################################\n# In these conditions, they should reach thermal velocities of about\nprint(formulary.thermal_speed(T=electron_temperature, particle='e'))\n\n######################################################################\n# And the Langmuir wave plasma frequency should be on the order of\nprint(formulary.plasma_frequency(electron_concentration))\n\n############################################################\n# Let's try to recreate some plots and get a feel for some of these quantities.\n\nn_e = np.logspace(4, 30, 100) / u.m**3\nplt.plot(n_e, formulary.plasma_frequency(n_e))\nplt.scatter(\n electron_concentration,\n formulary.plasma_frequency(electron_concentration))\nplt.xlabel(\"Electron Concentration (m^-3)\")\nplt.ylabel(\"Langmuir Wave Plasma Frequency (rad/s)\")\nplt.show()\n","sub_path":"plasmapy/examples/plot_physics.py","file_name":"plot_physics.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"268963436","text":"# 给个矩阵,0代表海,1代表陆地,把所有岛替换成海(能连到边上的不算岛)\n\n\ndef replace_sea(matrix):\n if matrix == None or len(matrix) == 0:\n return 0\n\n n = len(matrix)\n m = len(matrix[0])\n\n visited = [[0 for _ in range(m)] for _ in range(n)]\n\n # step 1 :replace the all 1 with '-'\n\n for i in range(n):\n for j in range(m):\n if matrix[i][j] == 1 :\n matrix[i][j] = '-'\n\n\n # call for flood fill for all \"-\" on the edges\n for i in range(n):\n if matrix[i][0] == \"-\":\n flood(matrix,i ,0 )\n\n\n for i in range(n):\n if matrix[i][m-1] == \"-\":\n flood(matrix, i, m-1)\n\n for j in range(m):\n if matrix[0][j] == \"-\":\n flood(matrix, 0, j)\n\n for j in range(n):\n if matrix[n-1][j] == \"-\":\n flood(matrix,n-1,j )\n\n\n # step3 replace all '-' with 'x'\n\n for i in range(n):\n for j in range(m):\n if matrix[i][j] == '-':\n matrix[i][j] = 1\n\n\n\ndef flood(mat,x,y):\n m = len(mat)\n n = len(mat[0])\n if x < 0 or x > m-1 or y < 0 or y > n-1:\n return\n\n if mat[x][y] != '-':\n return\n mat[x][y] = 1\n\n flood (mat, x + 1, y)\n flood(mat, x-1, y)\n flood(mat, x, y+1)\n flood(mat, x, y-1)","sub_path":"grab/island.py","file_name":"island.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"473101001","text":"\"\"\"Finder to extract all dates from multiple lines of text\"\"\"\nfrom typing import List, Any\nfrom copy import deepcopy\nimport yaml\nimport numpy as np\nfrom .. import match\nfrom .. import extract\nfrom ..match import score\n\n\nclass DatesFinder(yaml.YAMLObject):\n \"\"\"Finder to extract multiple dates from multiple text lines output by OCR.\n\n Disclaimer:\n This finder was originally designed to address the issue that different kinds\n of dates may be extracted from the same excerpt. However, THIS IS NOT MEANT\n TO BE A FINAL SOLUTION FOR DATE EXTRACTION due to limited time and testing\n data. If a maintainer finds out some code does not make sense, probably it's\n because the code does not make sense indeed, and such a maintainer is\n encouraged to modify or totally reimplement this finder.\n\n Another problem that can be addressed, but has NOT been addressed is\n inconsistency between extracted dates. e.g. YukoEdYmd is earlier than\n YukoStYmd. You are encouraged to implement a check for this problem\n if you ovserve such inconsistency in tests.\n\n Typical usage example:\n >>> match_methods = {\n \"Birthday\": \"birthday_match\",\n \"YukoEdYmd\": \"valid_until_match\",\n \"YukoStYmd\": \"valid_from_match\",\n \"KofuYmd\": \"kofu_match\",\n }\n >>> finder = DatesFinder(\n match_methods=match_methods,\n extract_method=\"get_date\"\n )\n >>> texts = [\n [\"生年月日平成1年2月3日\"],\n [\"有効開始日令和元年1月2日有効終了日令和2年1月2日\"],\n [\"令和元年1月1日交付\"]\n ]\n >>> dates = finder.extract(texts)\n >>> print(dates[\"Birthday\"])\n 19890203\n >>> print(dates[\"YukoStYmd\"])\n 20190102\n >>> print(dates[\"YukoEdYmd\"])\n 20200102\n >>> print(dates[\"KofuYmd\"])\n 20190101\n\n Args:\n match_methods: Name of the function for pattern matching, which has to be\n defined in `..match`\n extract_method: Name of the function for information extraction, which has\n to be defined in `..extract`\n \"\"\"\n yaml_tag = u'!DatesFinder'\n def __init__(self, match_methods: str, extract_method: str):\n self.match_methods = match_methods\n self.extract_method = extract_method\n self.scores = {}\n self.texts = {}\n self.info = {}\n\n def _score(self, texts: List[List[Any]]):\n \"\"\"Scores each textline for each kind of date to extract.\n\n Args:\n texts: OCR results in a list, each element of each has also to be a\n list, each element of which is the text for each detected line.\n \"\"\"\n for tag, match_func in self.match_methods.items():\n # only look above and below for birth dates\n self.scores[tag], self.texts[tag] = score(\n match_func=getattr(match, match_func),\n texts=texts,\n no_ext=(tag != \"Birthday\")\n )\n\n # if YukoEdYmd found within the first 2 lines, make an exception to look below\n if tag == \"YukoEdYmd\" and sum(self.scores[tag][:2]) == 1:\n self.scores[tag], self.texts[tag] = score(\n match_func=getattr(match, match_func),\n texts=texts,\n no_ext=False\n )\n\n def extract(self, texts: List[List[Any]]) -> dict:\n \"\"\"Extracts all kinds of dates from text lines when possible.\n\n Args:\n texts: OCR results in a list, each element of each has also to be a\n list, each element of which is the text for each detected line.\n\n Returns:\n A dict of extracted dates\n \"\"\"\n self.texts = {}\n self.info = {}\n self._score(texts)\n\n # extract dates from lines with positive score for any key\n extract_f = getattr(extract, self.extract_method)\n dates_all = {}\n for (tag, lines), (_, scores) in zip(\n self.texts.items(),\n self.scores.items()\n ):\n dates_all[tag] = [extract_f(line) if score > 0 else [] for score, line in zip(scores, lines)] #pylint: disable=line-too-long\n\n # date match NMS\n for i in range(len(texts)):\n key_keep, suppress = None, False\n for key, cur_score in self.scores.items():\n if cur_score[i] < 2:\n continue\n if suppress:\n # more than 1 line with score > 1\n suppress = False\n break\n key_keep = key\n suppress = True\n if suppress:\n for key, cur_dates in dates_all.items():\n if key == key_keep or not cur_dates: continue\n for idx, (dates1, dates2) in enumerate(zip(\n cur_dates,\n dates_all[key_keep]\n )):\n dates_all[key][idx] = [d1 for d1 in dates1 if d1 not in dates2]\n\n\n # suppress YukoStYmd when YukoEdYmd and KofuYmd matched on the same line\n for idx, dates in enumerate(dates_all[\"YukoEdYmd\"]):\n if (self.scores[\"YukoStYmd\"][idx] and\n self.scores[\"KofuYmd\"][idx] and\n len(dates) < 3):\n self.scores[\"YukoStYmd\"][idx] = 0\n dates_all[\"YukoStYmd\"][idx].clear()\n\n # handle 2 dates in the same line\n for idx, dates in enumerate(dates_all[\"YukoEdYmd\"]):\n if (len(dates) == 2 and\n self.scores[\"YukoStYmd\"][idx] > 0 and\n self.scores[\"KofuYmd\"][idx] == 0):\n self.info[\"YukoStYmd\"], self.info[\"YukoEdYmd\"] = dates\n if str(self.info[\"YukoStYmd\"]) > str(self.info[\"YukoEdYmd\"]):\n self.info[\"YukoStYmd\"], self.info[\"YukoEdYmd\"] = self.info[\"YukoEdYmd\"], self.info[\"YukoStYmd\"] #pylint: disable=line-too-long\n\n # assign dates recursively\n for th in np.arange(np.max(list(self.scores.values())), 0, -1):#pylint: disable=too-many-nested-blocks\n scores_prev = {}\n while not all([np.all(scores_prev.get(k, None) == v) for k, v in self.scores.items()]):#pylint: disable=line-too-long\n scores_prev = deepcopy(self.scores)\n for key in self.scores:\n if self.info.get(key, None) is not None: continue\n val_max, idx_max = self.scores[key].max(), self.scores[key].argmax()\n if (val_max >= th and\n len(self.scores[key][self.scores[key] == val_max]) == 1 and\n len(dates_all[key][idx_max]) == 1):\n self.info[key] = dates_all[key][idx_max][0]\n # pop out used date\n for other_key in set(self.scores.keys()) - set(key):\n other_dates = dates_all[other_key][idx_max]\n if other_dates:\n new_dates = [d for d in other_dates if str(d) != str(self.info[key])] #pylint: disable=line-too-long\n dates_all[other_key][idx_max] = new_dates\n\n # handle yukostymd and yukoedymd in the same line\n if \"YukoStYmd\" not in self.info and \"YukoEdYmd\" not in self.info:\n idx_from = self.scores[\"YukoStYmd\"].argmax()\n idx_until = self.scores[\"YukoEdYmd\"].argmax()\n dates_from = dates_all[\"YukoStYmd\"][idx_from]\n dates_until = dates_all[\"YukoEdYmd\"][idx_until]\n if str(dates_from) == str(dates_until) and len(dates_from) == 2:\n self.info[\"YukoStYmd\"], self.info[\"YukoEdYmd\"] = dates_from\n\n\n # handle YukoEdYmd and KofuYmd in the same line\n if (self.info.get(\"YukoEdYmd\", None) is None and\n self.info.get(\"KofuYmd\", None) is None):\n for idx in range(len(self.scores[\"YukoEdYmd\"])):\n if (self.scores[\"KofuYmd\"][idx] > 0 and\n len(dates_all[\"YukoEdYmd\"][idx]) == 2 and\n len(dates_all[\"KofuYmd\"][idx]) == 2):\n self.info[\"KofuYmd\"], self.info[\"YukoEdYmd\"] = dates_all[\"KofuYmd\"][idx] #pylint: disable=line-too-long\n # make sure KofuYmd is earlier than YukoEdYmd\n if str(self.info[\"KofuYmd\"]) > str(self.info[\"YukoEdYmd\"]):\n self.info[\"KofuYmd\"], self.info[\"YukoEdYmd\"] = self.info[\"YukoEdYmd\"], self.info[\"KofuYmd\"] #pylint: disable=line-too-long\n\n for key in self.scores:\n if self.info.get(key, None) is None:\n for idx in (-self.scores[key]).argsort(kind=\"mergesort\"):\n if dates_all[key][idx]:\n # use the earliest date for birthday\n if key == \"Birthday\":\n dates_all[key][idx] = sorted(dates_all[key][idx], key=str)\n self.info[key] = dates_all[key][idx].pop(0)\n break\n\n for tag in self.match_methods:\n if tag not in self.info:\n self.info[tag] = None\n return self.info\n","sub_path":"ocr2/info_extractor/finders/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":8224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"58867766","text":"import requests\n\nimport os\nimport json\n\nfrom django.db import connection\n\n\ndef truncate_table(table):\n \"\"\"Truncate tables to import new products\"\"\"\n\n cursor = connection.cursor()\n\n sql_command = 'BEGIN;'\n sql_command += 'ALTER TABLE {} DISABLE TRIGGER ALL;'.format(table)\n sql_command += 'TRUNCATE TABLE {} CASCADE;'.format(table)\n sql_command += 'ALTER TABLE {} ENABLE TRIGGER ALL;'.format(table)\n sql_command += 'COMMIT;'\n\n return cursor.execute(sql_command)\n\n\n\ndef list_products(category):\n \"\"\"Get a JSON list of products from OpenFoodFacts by category\"\"\"\n\n # Search in each category for importable products\n conditions = {\n 'page_size': 100,\n 'format': 'json',\n 'sort_by': 'unique_scans_n',\n 'criteria': [\n {\n 'tagtype': 'categories',\n 'tagcontains': 'contains'\n }, {\n 'tagtype': 'states',\n 'tag_contains': 'does_not_contains',\n 'fixed_tag': 'to-be-completed'\n }, {\n 'tagtype': 'countries',\n 'tag_contains': 'contains'\n }\n ]\n }\n\n request_url = \"https://fr.openfoodfacts.org/cgi/search.pl?action=process\"\n\n conditions_path = os.path.join(\n os.path.dirname(\n os.path.dirname(\n os.path.dirname(__file__)\n )\n ),\n 'static',\n 'json',\n 'category_conditions.json'\n )\n\n with open(conditions_path, 'r') as jsonf:\n conditions = json.load(jsonf)\n\n # Add criterias to request :\n for data in conditions['criteria']:\n i = conditions['criteria'].index(data)\n request_url += \"&tagtype_{index}={tagtype}\".format(\n index=str(i),\n tagtype=data['tagtype']\n )\n request_url += \"&tag_contains_{index}={contains}\".format(\n index=str(i),\n contains=data['tag_contains']\n )\n if data['tagtype'] == 'categories':\n request_url += \"&tag_{index}={category}\".format(\n index=str(i),\n category=category\n )\n else:\n request_url += \"&tag_{index}={tag}\".format(\n index=str(i),\n tag=data['contains']\n )\n\n # Add sorting\n request_url += \"&sort_by={}\".format(\n conditions['sort_by']\n )\n\n # Add page size\n request_url += \"&page_size={}\".format(\n conditions['page_size']\n )\n\n # Add file format\n request_url += \"&{}=1\".format(\n conditions['format']\n )\n\n resp = requests.get(request_url)\n\n return resp.json()['products']\n","sub_path":"products/management/commands/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"138170105","text":"import numpy as np\nimport argparse\nimport cv2\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = True,\n help = \"Path to the image\")\n\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nblurred = cv2.GaussianBlur(gray, (11, 11), 0)\ncv2.imshow(\"Image\", image)\n\nedged = cv2.Canny(blurred, 30, 150)\ncv2.imshow(\"Edges\", edged)\n\n(cnts,_) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,\ncv2.CHAIN_APPROX_SIMPLE)\n\nprint(\"I count {} objects in this image\".format(len(cnts)))\n\nobjs = image.copy()\ncv2.drawContours(objs, cnts, -1, (0, 255, 0), 2)\ncv2.imshow(\"Objects\", objs)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"basics/contours.py","file_name":"contours.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509049749","text":"import torch\nfrom torch.optim.lr_scheduler import StepLR\n\nclass StepLRScheduler(StepLR):\n KEY = \"StepLR\"\n \n def __init__(self, optimizer, scheduler_config):\n self.config = scheduler_config\n StepLR.__init__(self,\n optimizer,\n step_size=self.config.step_size,\n gamma=self.config.gamma,\n )","sub_path":"eva4/scheduler/scheduler_steplr.py","file_name":"scheduler_steplr.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"280061539","text":"#!/usr/bin/python\n\nimport requests\nimport logging\nimport traceback\nimport sys\nimport argparse\nfrom dateutil import parser as dateParse\n\nappName = 'OctopusEnergy'\n\nif False:\n logger = logging.getLogger(appName)\n stdout = logging.StreamHandler(sys.stdout)\n logger.addHandler(stdout)\n logger.setLevel(logging.DEBUG)\nelse:\n try:\n from systemd.journal import JournalHandler\n logger = logging.getLogger(appName)\n logger.addHandler(JournalHandler(SYSLOG_IDENTIFIER=appName))\n except ImportError:\n logger = logging.getLogger(appName)\n stdout = logging.StreamHandler(sys.stdout)\n logger.addHandler(stdout)\n finally:\n logger.setLevel(logging.DEBUG)\n\ndef graphiteHttpPost(graphiteUrl, metric):\n try:\n resp = requests.post(\n graphiteUrl,\n data=metric.encode())\n except ConnectionError as e:\n logger.error(\"%s: failed to send energy consumption to graphite with error %s\"%(metric, str(e)))\n pass\n else:\n if resp.status_code == 202 or resp.status_code == 200:\n logger.info(\"sent metrics to graphite\")\n else:\n logger.error(\"failed to send metrics to graphite with response %s (%d)\"%(resp.text, resp.status_code))\n\ndef getGasComsumptionInKwh(API_KEY, mprn, gasMeter, days = 1):\n pageSize = 24 * 2 * (days + 1)\n url = \"https://api.octopus.energy/v1/gas-meter-points/%s/meters/%s/consumption/?page_size=%d\"%(mprn, gasMeter, pageSize)\n response = requests.get(\n url = url,\n auth=(API_KEY, \"\"))\n try:\n data = response.json()\n except:\n logger.error(\"Cannot read %s. Response status is %d\"%(response.text(), response.status_code))\n pass\n else:\n return data[\"results\"]\n\ndef getElectricityComsumptionInKwh(API_KEY, mpan, electricMeter, days = 1):\n pageSize = 24 * 2 * (days + 1)\n url = \"https://api.octopus.energy/v1/electricity-meter-points/%s/meters/%s/consumption/?page_size=%d\"%(mpan, electricMeter, pageSize)\n response = requests.get(\n url = url,\n auth=(API_KEY, \"\"))\n try:\n data = response.json()\n except:\n logger.error(\"Cannot read %s. Response status is %d\"%(response.text(), response.status_code))\n pass\n else:\n return data[\"results\"]\n return None\n\n\ndef main():\n global args, token\n parser = argparse.ArgumentParser(description='Gather data from Octopus API and send it to graphite')\n parser.add_argument('--graphiteKey', metavar='GRAPHITEKEY', required=True,\n help='graphite key')\n parser.add_argument('--graphiteUrl', metavar='GRAPHITEURL', default=\"https://graphite.debroglie.net/graphiteSink.php\",\n help='graphite host')\n parser.add_argument('--apiKey', metavar='APIKEY', required=True,\n help='octopus api key')\n parser.add_argument('--mpan', metavar='MPAN', required=True,\n help='MPAN')\n parser.add_argument('--eSerial', metavar='ESERIAL', required=True,\n help='electric meter serial number')\n parser.add_argument('--mprn', metavar='MPRN', required=True,\n help='MPRN')\n parser.add_argument('--gSerial', metavar='GSERIAL', required=True,\n help='gas meter serial number')\n parser.add_argument('--days', metavar='DAYS', default=1,\n help='nuber of days to fetch')\n args = parser.parse_args()\n\n gasData = getGasComsumptionInKwh(args.apiKey, args.mprn, args.gSerial,args.days)\n if gasData:\n metrics = \"\"\n for measure in gasData:\n if measure[\"consumption\"] > 0:\n metric = \"%s.%s %f %d\"%(args.graphiteKey, \"energy.gas.consumption\", measure[\"consumption\"], dateParse.parse(measure[\"interval_end\"]).timestamp())\n metrics += metric + \"\\n\"\n if metrics:\n graphiteHttpPost(args.graphiteUrl, metrics)\n\n metrics = \"\"\n elecData = getElectricityComsumptionInKwh(args.apiKey, args.mpan, args.eSerial,args.days)\n if elecData:\n for measure in elecData:\n if measure[\"consumption\"] > 0:\n metric = \"%s.%s %f %d\"%(args.graphiteKey, \"energy.electricity.consumption\", measure[\"consumption\"], dateParse.parse(measure[\"interval_end\"]).timestamp())\n metrics += metric + \"\\n\"\n if metrics:\n graphiteHttpPost(args.graphiteUrl, metrics)\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n logger.error('An unexpected error occurred')\n logger.error(\"\".join(traceback.format_exception(None,e, e.__traceback__)).replace(\"\\n\",\"\"))\n pass\n","sub_path":"Automation/octopus.py","file_name":"octopus.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"507501151","text":"def find_row(i, j):\n row_len = 0\n while 0 <= i < n and matrix[i][j] > 0:\n i += 1\n row_len += 1\n return row_len\ndef find_col(i, j):\n col_len = 0\n while 0 <= j < n and matrix[i][j] > 0:\n j += 1\n col_len += 1\n return col_len\ndef change_zero(i, j, row_len, col_len):\n for row in range(i, i + row_len):\n for col in range(j, j + col_len):\n matrix[row][col] = 0\nt = int(input())\nfor tc in range(1, t + 1):\n n = int(input())\n matrix = [list(map(int, input().split())) for _ in range(n)]\n result = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] > 0:\n col_len = find_col(i, j)\n row_len = find_row(i, j)\n result.append((row_len, col_len, row_len * col_len))\n change_zero(i, j, row_len, col_len)\n result.sort(key=lambda x: (x[2], x[0]))\n print(\"#{} {}\".format(tc, len(result)), end=' ')\n for i in result:\n print(i[0], i[1], end=' ')\n print()","sub_path":"SWEA/1258-행렬찾기.py","file_name":"1258-행렬찾기.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"548802968","text":"import io,cv2,numpy as np\nfrom pyjava.api.mlsql import RayContext\n\nray_context = RayContext.connect(globals(),\"127.0.0.1:10001\")\n\ndef resize_image(row):\n new_row = {}\n image_bin = row[\"content\"] \n oriimg = cv2.imdecode(np.frombuffer(io.BytesIO(image_bin).getbuffer(),np.uint8),1)\n newimage = cv2.resize(oriimg,(28,28))\n is_success, buffer = cv2.imencode(\".png\", newimage)\n io_buf = io.BytesIO(buffer)\n new_row[\"content\"]=io_buf.getvalue() \n return new_row\n\nray_context.foreach(resize_image)","sub_path":"alg/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"573409735","text":"import asyncio\nimport logging\nimport random\nimport textwrap\nfrom datetime import datetime, timedelta\nfrom typing import List\n\nfrom discord import Colour, Embed, Message, TextChannel\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, ERROR_REPLIES, Reddit as RedditConfig, STAFF_ROLES\nfrom bot.converters import Subreddit\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\n\nclass Reddit(Cog):\n \"\"\"Track subreddit posts and show detailed statistics about them.\"\"\"\n\n HEADERS = {\"User-Agent\": \"Discord Bot: PythonDiscord (https://pythondiscord.com/)\"}\n URL = \"https://www.reddit.com\"\n MAX_FETCH_RETRIES = 3\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.reddit_channel = None\n\n self.prev_lengths = {}\n self.last_ids = {}\n\n self.new_posts_task = None\n self.top_weekly_posts_task = None\n\n self.bot.loop.create_task(self.init_reddit_polling())\n\n async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]:\n \"\"\"A helper method to fetch a certain amount of Reddit posts at a given route.\"\"\"\n # Reddit's JSON responses only provide 25 posts at most.\n if not 25 >= amount > 0:\n raise ValueError(\"Invalid amount of subreddit posts requested.\")\n\n if params is None:\n params = {}\n\n url = f\"{self.URL}/{route}.json\"\n for _ in range(self.MAX_FETCH_RETRIES):\n response = await self.bot.http_session.get(\n url=url,\n headers=self.HEADERS,\n params=params\n )\n if response.status == 200 and response.content_type == 'application/json':\n # Got appropriate response - process and return.\n content = await response.json()\n posts = content[\"data\"][\"children\"]\n return posts[:amount]\n\n await asyncio.sleep(3)\n\n log.debug(f\"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}\")\n return list() # Failed to get appropriate response within allowed number of retries.\n\n async def send_top_posts(\n self, channel: TextChannel, subreddit: Subreddit, content: str = None, time: str = \"all\"\n ) -> Message:\n \"\"\"Create an embed for the top posts, then send it in a given TextChannel.\"\"\"\n # Create the new spicy embed.\n embed = Embed()\n embed.description = \"\"\n\n # Get the posts\n async with channel.typing():\n posts = await self.fetch_posts(\n route=f\"{subreddit}/top\",\n amount=5,\n params={\n \"t\": time\n }\n )\n\n if not posts:\n embed.title = random.choice(ERROR_REPLIES)\n embed.colour = Colour.red()\n embed.description = (\n \"Sorry! We couldn't find any posts from that subreddit. \"\n \"If this problem persists, please let us know.\"\n )\n\n return await channel.send(\n embed=embed\n )\n\n for post in posts:\n data = post[\"data\"]\n\n text = data[\"selftext\"]\n if text:\n text = textwrap.shorten(text, width=128, placeholder=\"...\")\n text += \"\\n\" # Add newline to separate embed info\n\n ups = data[\"ups\"]\n comments = data[\"num_comments\"]\n author = data[\"author\"]\n\n title = textwrap.shorten(data[\"title\"], width=64, placeholder=\"...\")\n link = self.URL + data[\"permalink\"]\n\n embed.description += (\n f\"[**{title}**]({link})\\n\"\n f\"{text}\"\n f\"| {ups} upvotes | {comments} comments | u/{author} | {subreddit} |\\n\\n\"\n )\n\n embed.colour = Colour.blurple()\n\n return await channel.send(\n content=content,\n embed=embed\n )\n\n async def poll_new_posts(self) -> None:\n \"\"\"Periodically search for new subreddit posts.\"\"\"\n while True:\n await asyncio.sleep(RedditConfig.request_delay)\n\n for subreddit in RedditConfig.subreddits:\n # Make a HEAD request to the subreddit\n head_response = await self.bot.http_session.head(\n url=f\"{self.URL}/{subreddit}/new.rss\",\n headers=self.HEADERS\n )\n\n content_length = head_response.headers[\"content-length\"]\n\n # If the content is the same size as before, assume there's no new posts.\n if content_length == self.prev_lengths.get(subreddit, None):\n continue\n\n self.prev_lengths[subreddit] = content_length\n\n # Now we can actually fetch the new data\n posts = await self.fetch_posts(f\"{subreddit}/new\")\n new_posts = []\n\n # Only show new posts if we've checked before.\n if subreddit in self.last_ids:\n for post in posts:\n data = post[\"data\"]\n\n # Convert the ID to an integer for easy comparison.\n int_id = int(data[\"id\"], 36)\n\n # If we've already seen this post, finish checking\n if int_id <= self.last_ids[subreddit]:\n break\n\n embed_data = {\n \"title\": textwrap.shorten(data[\"title\"], width=64, placeholder=\"...\"),\n \"text\": textwrap.shorten(data[\"selftext\"], width=128, placeholder=\"...\"),\n \"url\": self.URL + data[\"permalink\"],\n \"author\": data[\"author\"]\n }\n\n new_posts.append(embed_data)\n\n self.last_ids[subreddit] = int(posts[0][\"data\"][\"id\"], 36)\n\n # Send all of the new posts as spicy embeds\n for data in new_posts:\n embed = Embed()\n\n embed.title = data[\"title\"]\n embed.url = data[\"url\"]\n embed.description = data[\"text\"]\n embed.set_footer(text=f\"Posted by u/{data['author']} in {subreddit}\")\n embed.colour = Colour.blurple()\n\n await self.reddit_channel.send(embed=embed)\n\n log.trace(f\"Sent {len(new_posts)} new {subreddit} posts to channel {self.reddit_channel.id}.\")\n\n async def poll_top_weekly_posts(self) -> None:\n \"\"\"Post a summary of the top posts every week.\"\"\"\n while True:\n now = datetime.utcnow()\n\n # Calculate the amount of seconds until midnight next monday.\n monday = now + timedelta(days=7 - now.weekday())\n monday = monday.replace(hour=0, minute=0, second=0)\n until_monday = (monday - now).total_seconds()\n\n await asyncio.sleep(until_monday)\n\n for subreddit in RedditConfig.subreddits:\n # Send and pin the new weekly posts.\n message = await self.send_top_posts(\n channel=self.reddit_channel,\n subreddit=subreddit,\n content=f\"This week's top {subreddit} posts have arrived!\",\n time=\"week\"\n )\n\n if subreddit.lower() == \"r/python\":\n # Remove the oldest pins so that only 5 remain at most.\n pins = await self.reddit_channel.pins()\n\n while len(pins) >= 5:\n await pins[-1].unpin()\n del pins[-1]\n\n await message.pin()\n\n @group(name=\"reddit\", invoke_without_command=True)\n async def reddit_group(self, ctx: Context) -> None:\n \"\"\"View the top posts from various subreddits.\"\"\"\n await ctx.invoke(self.bot.get_command(\"help\"), \"reddit\")\n\n @reddit_group.command(name=\"top\")\n async def top_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of all time from a given subreddit.\"\"\"\n await self.send_top_posts(\n channel=ctx.channel,\n subreddit=subreddit,\n content=f\"Here are the top {subreddit} posts of all time!\",\n time=\"all\"\n )\n\n @reddit_group.command(name=\"daily\")\n async def daily_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of today from a given subreddit.\"\"\"\n await self.send_top_posts(\n channel=ctx.channel,\n subreddit=subreddit,\n content=f\"Here are today's top {subreddit} posts!\",\n time=\"day\"\n )\n\n @reddit_group.command(name=\"weekly\")\n async def weekly_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of this week from a given subreddit.\"\"\"\n await self.send_top_posts(\n channel=ctx.channel,\n subreddit=subreddit,\n content=f\"Here are this week's top {subreddit} posts!\",\n time=\"week\"\n )\n\n @with_role(*STAFF_ROLES)\n @reddit_group.command(name=\"subreddits\", aliases=(\"subs\",))\n async def subreddits_command(self, ctx: Context) -> None:\n \"\"\"Send a paginated embed of all the subreddits we're relaying.\"\"\"\n embed = Embed()\n embed.title = \"Relayed subreddits.\"\n embed.colour = Colour.blurple()\n\n await LinePaginator.paginate(\n RedditConfig.subreddits,\n ctx, embed,\n footer_text=\"Use the reddit commands along with these to view their posts.\",\n empty=False,\n max_lines=15\n )\n\n async def init_reddit_polling(self) -> None:\n \"\"\"Initiate reddit post event loop.\"\"\"\n await self.bot.wait_until_ready()\n self.reddit_channel = await self.bot.fetch_channel(Channels.reddit)\n\n if self.reddit_channel is not None:\n if self.new_posts_task is None:\n self.new_posts_task = self.bot.loop.create_task(self.poll_new_posts())\n if self.top_weekly_posts_task is None:\n self.top_weekly_posts_task = self.bot.loop.create_task(self.poll_top_weekly_posts())\n else:\n log.warning(\"Couldn't locate a channel for subreddit relaying.\")\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Reddit cog load.\"\"\"\n bot.add_cog(Reddit(bot))\n log.info(\"Cog loaded: Reddit\")\n","sub_path":"bot/cogs/reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":10627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"29957157","text":"from titanic2 import *\nimport titanic as t1\nimport numpy as np\n# import matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\nimport xgboost as xgb\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\n\ntitanic = Titanic()\ntitanic.fillna()\ntitanic.data_prep()\n\ntrain, labels, test = titanic.data_model()\n\n# XGBoost Classifier\ngbm = xgb.XGBClassifier(\n learning_rate=0.01,\n n_estimators=500,\n max_depth=3,\n min_child_weight=2,\n # gamma=1,\n gamma=0.9,\n subsample=0.8,\n colsample_bytree=0.8,\n objective='binary:logistic',\n nthread=-1,\n scale_pos_weight=1).fit(train, labels)\n\nxgb_parameters = dict(learning_rate=0.01, n_estimators=500, max_depth=3, min_child_weight=2, gamma=0.9,\n subsample=0.8, colsample_bytree=0.8, objective='binary:logistic', nthread=-1,\n scale_pos_weight=1)\n\n\npredictions = gbm.predict(test).astype(int)\n\nprint('Accuracy xgboost: {:.4f}'.format(accuracy_score(predictions, t1.Titanic().solution()['Survived'].values)))\ntitanic.write_predictions(predictions,'xgb_180618_prep2_02.csv') # score 0.80382","sub_path":"titanic2_predict.py","file_name":"titanic2_predict.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"147517479","text":"# -*- coding: utf-8 -*-\nfrom zvt.selector.examples.zvt_selector import MaSelector\nfrom zvt.trader.trader import Trader\n\n\nclass FoolTrader(Trader):\n def init_selectors(self, security_type, exchanges, codes, start_timestamp, end_timestamp):\n self.selectors = []\n\n basic_selector = MaSelector(security_type=security_type, exchanges=exchanges, codes=codes,\n start_timestamp=start_timestamp,\n end_timestamp=end_timestamp)\n basic_selector.run()\n\n self.selectors.append(basic_selector)\n\n\nif __name__ == '__main__':\n FoolTrader(codes=['000020', '000021', '000023', '000025'], start_timestamp='2017-01-01',\n end_timestamp='2019-05-05').run()\n","sub_path":"zvt/trader/examples/zvt_trader.py","file_name":"zvt_trader.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"499155336","text":"import argparse\nimport json\nimport os\nimport re\nimport time\nimport sys\n\n\nimport emoji as emoji\nimport requests\nfrom bs4 import BeautifulSoup\nimport MySQLdb\n\n# 上传视频相关信息至服务器 (即:模拟后台添加视频功能)\nBASIC_PATH = '/Users/mason/Downloads/jsp'\n\ndef getEncode(json_file):\n with open(json_file, 'r+', encoding='utf-8', errors='ignore') as f:\n json_data = json.loads(f.read())\n # 读取信息\n if json_data.get('_抖音数据'):\n return 'utf-8'\n else:\n return 'gbk'\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--inputdir', help='播主文件夹的 父级文件夹')\n args = parser.parse_args()\n\n if args.inputdir is None:\n print('缺少参数')\n exit(0)\n\n BASIC_PATH = args.inputdir\n # FILE_NAME = args.filename\n\n print(f'文件目录{BASIC_PATH}')\n\n # 连接数据库\n conn = MySQLdb.connect(host=\"121.40.52.138\", port=23306, db=\"zhibo\", passwd=\"Ff3*h7E5e8Tr\", user=\"zhibo\", charset='utf8' )\n # 游标 - 操作数据库\n cursor = conn.cursor()\n # 查询标签\n cursor.execute(\"\"\" SELECT s_class, s_id FROM `cmf_video_class` \"\"\")\n # 视频标签\n # 查询出来的是元组格式,直接转成 字典格式\n tags = dict(cursor.fetchall())\n # 获取即将上传的视频标签\n local_tag = BASIC_PATH.split('\\\\')[-1]\n\n # 循环目录\n # 列出博主名字的文件夹\n for v1 in os.listdir(BASIC_PATH):\n # 进入博主文件夹\n for v2 in os.listdir(os.path.join(BASIC_PATH, v1)):\n # 打开视频文件夹下的.JSON文件\n json_file = os.path.join(BASIC_PATH, v1, v2, '数据.json')\n mp4_file = os.path.join(BASIC_PATH, v1, v2, '抖音.mp4')\n if not os.path.exists(json_file) or not os.path.getsize(json_file) or os.path.getsize(mp4_file) < 512000:\n print('没有.json文件 或 mp4文件太小, 跳过')\n continue\n encode_type = getEncode(json_file)\n with open(json_file, 'r+', encoding=encode_type, errors='ignore') as f:\n json_data = json.loads(f.read())\n if json_data['_数据库信息'].get('_cmf_users_video_id'):\n print('已有数据,跳过')\n continue\n\n # 读取信息\n status = '1'\n isdel = '0'\n title = json_data['_抖音数据']['_描述']\n thumb = json_data['_数据库信息']['_封面2_url']\n href = json_data['_数据库信息']['_抖音mp4_url']\n # 时间戳\n addtime = int(time.time())\n # 播主 - uid\n uid = json_data['_数据库信息']['_id']\n thumb_s = json_data['_数据库信息']['_封面2_url']\n show_val = None\n # 视频 - 标签\n tag_id = tags.get(local_tag)\n if not tag_id:\n print('视频二级标签错误,请检查路径')\n exit(0)\n # 播主 - 头像\n avatar = json_data['_数据库信息']['_头像_url']\n avatar_thumb = json_data['_数据库信息']['_头像_url']\n\n # 存储数据库\n try:\n # 执行sql语句\n # 插入 - 视频数据\n cursor.execute(\"\"\"INSERT INTO `cmf_users_video`\n (`status`, `isdel`, `title`, `thumb`, `href`, `addtime`, `uid`, `thumb_s`, `show_val`)\n VALUES\n ('{}','{}','{}','{}','{}','{}','{}','{}','{}')\"\"\".format(status, isdel, title, thumb, href, addtime, uid, thumb_s, show_val))\n # 此方法要在commit之前使用, 否则值为 0\n id = conn.insert_id()\n print('video_id: ', id)\n # 插入 - 视频标签\n cursor.execute(\"\"\"INSERT INTO `cmf_video_class_details`\n (`sd_id`, `video_id`)\n VALUES('{}','{}')\"\"\".format(tag_id, id))\n # 更新 - 播主头像\n cursor.execute(\"\"\"UPDATE `cmf_users` SET avatar='{}', avatar_thumb='{}' WHERE id = '{}'\"\"\".format(avatar, avatar_thumb, uid))\n # 提交到数据库执行\n conn.commit()\n print('提交成功')\n # 获取主键\n # 此方法要在commit之后使用, 否则值为 空\n # id = cursor.lastrowid\n print('last_id: ', id)\n json_data['_数据库信息']['_cmf_users_video_id'] = f'{id}'\n print('new json_data', id)\n # 移动指针 到开始处\n f.seek(0)\n # 清空文件 - 指针必须指向开始处\n f.truncate()\n f.write(json.dumps(json_data, indent = 4, ensure_ascii = False))\n except:\n # Rollback in case there is any error\n conn.rollback()\n conn.close()\n print('结束')\n","sub_path":"爬虫源码/caught-video/upload-server.py","file_name":"upload-server.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"326026657","text":"import sys\nimport base64\nimport hashlib\nimport json\n\nGENESIS = json.load(open(sys.argv[1]))\n\n\ndef get_voting_power(state):\n cointype, coin = state\n assert cointype == 'Bonded'\n return str(int(int(coin) / (10 ** 8)))\n\n\ndef validator_addr(pubkey_base64):\n return hashlib.sha256(base64.b64decode(pubkey_base64)).hexdigest().upper()[:40]\n\n\nGENESIS['validators'] = [\n {\n 'address': validator_addr(node[2]['consensus_pubkey_b64']),\n 'pub_key': {\n 'type': 'tendermint/PubKeyEd25519',\n 'value': node[2]['consensus_pubkey_b64'],\n },\n 'power': get_voting_power(GENESIS['app_state']['distribution'][addr]),\n }\n for addr, node in GENESIS['app_state']['council_nodes'].items()\n]\n\njson.dump(GENESIS, open(sys.argv[1], 'w'), indent=4)\n","sub_path":"integration-tests/fix_genesis.py","file_name":"fix_genesis.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"438154254","text":"import io\nimport time\nimport numpy as np\nimport cv2\nimport picamera\n\nresolutions = ((600, 400), (1600, 900))\nisos = (0, 100, 200, 400, 800, 1600)\nstream = io.BytesIO()\nwith picamera.PiCamera() as cam:\n for res in resolutions:\n cam.resolution = res\n for iso in isos:\n cam.ISO = iso\n time.sleep(1)\n start = time.time()\n count = 0\n for frame in cam.capture_continuous(stream, format='jpeg', quality=100, use_video_port=True):\n data = np.fromstring(stream.getvalue(), dtype=np.uint8)\n img = cv2.imdecode(data, 1)\n cv2.imshow('image', img)\n stream.seek(0)\n count = count + 1\n if cv2.waitKey(1) >= 0:\n break\n width, height = res\n print('({}x{}, ISO:{}) : {:.2f}fps'.format(width, height, iso, count/(time.time()-start)))\n","sub_path":"Repositories/Python/Raspbian/cam02.py","file_name":"cam02.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"270207741","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\nfrom event.noticegroup import views\n\nurlpatterns = patterns('',\n url(r'^api/edit/$', views.notice_group_edit),\n url(r'^api/list/$', views.show_groups),\n url(r'^api/scope_filter/$', views.scope_filter),\n url(r'^api/user_list/$', views.user_list),\n url(r'^api/manager/save/$', views.notice_group_save),\n url(r'^api/manager/delete/$', views.notice_group_del),\n url(r'^api/add/userdata/$', views.users_select_data),\n url(r'^api/add/groupdata/$', views.groups_select_data),\n )","sub_path":"WiseEyeIAMService/event/noticegroup/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"442471232","text":"from django.urls import path\nfrom . import views\n\nurlpatterns =[\n path(\"login_form/\", views.login_form, name=\"login_form\"),\n path(\"login/\", views.login_view, name=\"login\"),\n path(\"logout/\", views.logout_view, name=\"logout\"),\n path(\"userform/\", views.user_form, name=\"user_form\"),\n path(\"saveuser/\", views.saveuser, name=\"saveuser\"),\n path(\"\", views.recipe_booklet, name=\"recipe_booklet\"),\n path(\"saverecepi/\", views.saverecepi, name=\"saverecepi\"),\n path(\"form/\", views.register_recipe, name=\"register_recipe\"),\n path(\"/editrecipe/\", views.editrecipe, name=\"edit_recipe\"),\n path(\"/deleterecipe/\", views.deleterecipe, name=\"deleterecipe\"),\n path(\"contact/\", views.contact_form, name=\"contact_form\"),\n path(\"detail//\", views.recipe_details, name=\"recipe_details\"),\n]\n","sub_path":"recipe/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"494033521","text":"# coding=utf-8\r\n# by GAsss 17/07/15\r\n\r\nimport os\r\n\r\nnotForPrintList=['=','import']\r\n\r\nwhile True:\r\n\tcommand=input('>>> ')\r\n\ttry:\r\n\t\tif command == 'l' or command == 'ls':\r\n\t\t\t# for i in os.listdir():\r\n\t\t\t#\tprint(i)\r\n\t\t\tprint(os.popen('dir').read().rstrip())\r\n\t\telif command == 'pwd' :\r\n\t\t\tprint(os.getcwd())\r\n\t\telif command.split()[0]=='cd' :\r\n\t\t\tos.chdir(command.split()[1])\r\n\t\telif command[0]==':':\r\n\t\t\toutPut=os.popen(command[1:]).read().rstrip()\r\n\t\t\tif outPut:\r\n\t\t\t\tprint(outPut)\r\n\t\telif ':' in command :\r\n\t\t\tstr=command+\"\\n\"\r\n\t\t\twhile command != '' :\r\n\t\t\t\tcommand=input('... ')\r\n\t\t\t\tstr+=command+\"\\n\"\r\n\t\t\texec(str)\r\n\t\telse :\r\n\t\t\tif [True for i in notForPrintList if i in command]:\r\n\t\t\t\texec(command)\r\n\t\t\telse:\r\n\t\t\t\texec(r'print('+command+')')\r\n\texcept Exception as e:\r\n\t\tprint(e)","sub_path":"PythonScript/commandline.py","file_name":"commandline.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"114870396","text":"#!/usr/bin/python\n# -*-coding: UTF-8-*-\n# @Author :Wang Hanmo\n\n\n'''This program is to extract data for analysing mobility of vehicles\n\n[description]: For each vehicle, service + start_service timing + round of service + duration\n'''\n\nimport csv\nimport MyFunctions\nimport pprint\nfrom decimal import Decimal\n\n\nheaders = ['node_id', 'vehicle_serial', 'date', 'time', 'speed', 'POI', 'service', 'service_start_stop', 'times of service', 'duration']\nnodeid_times = {} #{node_id: times}\nnodeid_period = {} #{node_id: [start_time, stop_time]}\nnodeid_duration = {} #{node_id: [duration1, duration2, ...]}\n\ndef calculateDuration(filepath):\n '''calculate duration\n \n Arguments:\n filepath {[String]} -- [eg: '../output/2017-09-04/final_2017-09-04.csv']\n '''\n with open(filepath, 'r') as f:\n r_f = csv.reader(f)\n headline = next(r_f)\n for row in r_f:\n # start\n if len(row) == 12 and row[11] != '':\n if row[11] == 'service_start': \n date = row[2]\n time = row[3].split('T')[1].split('.')[0]\n if row[0] not in nodeid_times.keys(): \n nodeid_times[row[0]] = 0\n nodeid_times[row[0]] += 1\n nodeid_period[row[0]] = ['', '']\n nodeid_period[row[0]][0] = time\n nodeid_duration[row[0]] = []\n else:\n nodeid_times[row[0]] += 1\n nodeid_period[row[0]][0] = time\n # stop \n if row[11] == 'service_stop':\n date = row[2]\n time = row[3].split('T')[1].split('.')[0] \n nodeid_period[row[0]][1] = time\n if len(nodeid_period[row[0]]) == 2:\n duration = MyFunctions.calculateDuration(nodeid_period[row[0]][0], nodeid_period[row[0]][1])\n nodeid_duration[row[0]].append(duration)\n nodeid_period[row[0]] = ['', '']\n else:\n continue\n # print(nodeid_duration)\n # print('SUCCESS 1')\n return(nodeid_duration)\n\n\ndef writeMobility(readFilePath, writeFilePath, nodeid_duration):\n '''Write Mobility Info\n \n Arguments:\n readFilePath {[String]} -- [Read the file from this path--eg: path of final_2017-09-04.csv]\n writeFilePath {[String]} -- [Write info into this file]\n nodeid_duration {[type]} -- [node_id -- duration]\n '''\n nodeid_times_w = {}\n with open(readFilePath, 'r') as f:\n r_f = csv.reader(f)\n headline = next(r_f)\n with open(writeFilePath, 'w', newline ='') as fnew: #use 'w' for writing str and newline='' for deleting extra idle row\n w_fnew = csv.writer(fnew)\n w_fnew.writerow(headers)\n for row in r_f:\n # start\n if len(row) == 12 and row[11] != '':\n if row[11] == 'service_start':\n date = row[2]\n time = row[3].split('T')[1].split('.')[0]\n if row[0] not in nodeid_times_w.keys(): \n nodeid_times_w[row[0]] = 0\n nodeid_times_w[row[0]] += 1 \n else:\n nodeid_times_w[row[0]] += 1\n # print('nodeid_time_w', nodeid_times_w)\n\n # print(nodeid_duration[row[0]][nodeid_times_w[row[0]]-1])\n w_fnew.writerow([row[0], row[1], date, time, row[7], row[9], row[10], row[11], nodeid_times_w[row[0]], nodeid_duration[row[0]][nodeid_times_w[row[0]]-1]])\n # except IndexError as e:\n # print(e)\n # w_fnew.writerow([row[0], row[1], date, time, row[6], row[8], row[9], row[10], nodeid_times[row[0]], nodeid_duration[row[0]][nodeid_times[row[0]]]])\n print('Generate mobility file successfully')\n\n\ndef normalizeDuration(filepath):\n '''It seems that there is no need to use this function'''\n with open(filepath, 'r+') as f:\n r_f = csv.reader(f)\n header = next(r_f)\n w_f = csv.writer(f)\n for row in r_f:\n # print(row)\n if len(row) == 12 and row[11] != '':\n if len(row[10].split(':')) == 3:\n row[10] = row[10][0:5]\n w_f.writerow([row[0], row[1], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10]])\n print('SUCCESS 3')\n\n\ndef run(folderpath):\n '''Run\n \n Arguments:\n filepath {[String]} -- [eg: '../output/2017-09-04/final_2017-09-04.csv']\n '''\n fileName = MyFunctions.visitAllFile(folderpath)\n fileName[0].pop(fileName[0].index('.DS_Store'))\n print('Modified file list is: ', fileName[0])\n print('------ There are %d files ------' %len(fileName[0]))\n count = 1\n for item in fileName[0]:\n filepath = folderpath + '/' + item\n print('This is %d file out of %d' %(count, len(fileName[0])))\n nodeid_duration = calculateDuration(filepath)\n count += 1\n # filepath_nor = 'Mobility/mobility_2017-09-05.csv' \n # normalizeDuration(filepath_nor)\n \n filename = 'mobility_' + filepath.split('/')[-1].split('_')[1]\n newpath = '../mobility/' + filename\n print(newpath)\n MyFunctions.checkPath(newpath)\n writeMobility(filepath, newpath, nodeid_duration)\n\nif __name__ == '__main__':\n\n folderpath = '../output'\n\n run(folderpath)\n\n\n\n\n","sub_path":"Final_aggregate/extractMobilityInfo.py","file_name":"extractMobilityInfo.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"491564624","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@Author : g1879\n@Contact : g1879@qq.com\n@File : driver_page.py\n\"\"\"\nfrom os import popen\nfrom pathlib import Path\nfrom pprint import pprint\nfrom re import search as RE_SEARCH\nfrom typing import Union\n\nfrom selenium import webdriver\n\nfrom DrissionPage.config import OptionsManager, DriverOptions\nfrom DrissionPage.drission import Drission\nfrom DrissionPage.session_page import SessionPage\nfrom .common import unzip\n\n\ndef show_settings(ini_path: str = None) -> None:\n \"\"\"打印ini文件内容\"\"\"\n om = OptionsManager(ini_path)\n print('paths:')\n pprint(om.get_option('paths'))\n print('\\nchrome options:')\n pprint(om.get_option('chrome_options'))\n print('\\nsession options:')\n pprint(om.get_option('session_options'))\n\n\ndef set_paths(driver_path: str = None,\n chrome_path: str = None,\n debugger_address: str = None,\n tmp_path: str = None,\n download_path: str = None,\n user_data_path: str = None,\n cache_path: str = None,\n ini_path: str = None,\n check_version: bool = True) -> None:\n \"\"\"快捷的路径设置函数 \\n\n :param driver_path: chromedriver.exe路径\n :param chrome_path: chrome.exe路径\n :param debugger_address: 调试浏览器地址,例:127.0.0.1:9222\n :param download_path: 下载文件路径\n :param tmp_path: 临时文件夹路径\n :param user_data_path: 用户数据路径\n :param cache_path: 缓存路径\n :param ini_path: 要修改的ini文件路径\n :param check_version: 是否检查chromedriver和chrome是否匹配\n :return: None\n \"\"\"\n om = OptionsManager(ini_path)\n\n def format_path(path: str) -> str:\n return '' if not path else str(path).replace('/', '\\\\')\n\n if driver_path is not None:\n om.set_item('paths', 'chromedriver_path', format_path(driver_path))\n\n if chrome_path is not None:\n om.set_item('chrome_options', 'binary_location', format_path(chrome_path))\n\n if debugger_address is not None:\n om.set_item('chrome_options', 'debugger_address', format_path(debugger_address))\n\n if tmp_path is not None:\n om.set_item('paths', 'tmp_path', format_path(tmp_path))\n\n if download_path is not None:\n experimental_options = om.get_value('chrome_options', 'experimental_options')\n experimental_options['prefs']['download.default_directory'] = format_path(download_path)\n om.set_item('chrome_options', 'experimental_options', experimental_options)\n\n om.save()\n\n if user_data_path is not None:\n set_argument('--user-data-dir', format_path(user_data_path), ini_path)\n\n if cache_path is not None:\n set_argument('--disk-cache-dir', format_path(cache_path), ini_path)\n\n if check_version:\n check_driver_version(format_path(driver_path), format_path(chrome_path))\n\n\ndef set_argument(arg: str, value: Union[bool, str], ini_path: str = None) -> None:\n \"\"\"设置浏览器配置argument属性 \\n\n :param arg: 属性名\n :param value: 属性值,有值的属性传入值,没有的传入bool\n :param ini_path: 要修改的ini文件路径\n :return: None\n \"\"\"\n do = DriverOptions(ini_path=ini_path)\n do.remove_argument(arg)\n\n if value:\n arg_str = arg if isinstance(value, bool) else f'{arg}={value}'\n do.add_argument(arg_str)\n\n do.save()\n\n\ndef set_headless(on_off: bool = True, ini_path: str = None) -> None:\n \"\"\"设置是否隐藏浏览器界面 \\n\n :param on_off: 开或关\n :param ini_path: 要修改的ini文件路径\n :return: None\n \"\"\"\n on_off = True if on_off else False\n set_argument('--headless', on_off, ini_path)\n\n\ndef set_no_imgs(on_off: bool = True, ini_path: str = None) -> None:\n \"\"\"设置是否禁止加载图片 \\n\n :param on_off: 开或关\n :param ini_path: 要修改的ini文件路径\n :return: None\n \"\"\"\n on_off = True if on_off else False\n set_argument('--blink-settings=imagesEnabled=false', on_off, ini_path)\n\n\ndef set_no_js(on_off: bool = True, ini_path: str = None) -> None:\n \"\"\"设置是否禁用js \\n\n :param on_off: 开或关\n :param ini_path: 要修改的ini文件路径\n :return: None\n \"\"\"\n on_off = True if on_off else False\n set_argument('--disable-javascript', on_off, ini_path)\n\n\ndef set_mute(on_off: bool = True, ini_path: str = None) -> None:\n \"\"\"设置是否静音 \\n\n :param on_off: 开或关\n :param ini_path: 要修改的ini文件路径\n :return: None\n \"\"\"\n on_off = True if on_off else False\n set_argument('--mute-audio', on_off, ini_path)\n\n\ndef set_user_agent(user_agent: str, ini_path: str = None) -> None:\n \"\"\"设置user agent \\n\n :param user_agent: user agent文本\n :param ini_path: 要修改的ini文件路径\n :return: None\n \"\"\"\n set_argument('user-agent', user_agent, ini_path)\n\n\ndef set_proxy(proxy: str, ini_path: str = None) -> None:\n \"\"\"设置代理 \\n\n :param proxy: 代理网址和端口\n :param ini_path: 要修改的ini文件路径\n :return: None\n \"\"\"\n set_argument('--proxy-server', proxy, ini_path)\n\n\ndef check_driver_version(driver_path: str = None, chrome_path: str = None) -> bool:\n \"\"\"检查传入的chrome和chromedriver是否匹配 \\n\n :param driver_path: chromedriver.exe路径\n :param chrome_path: chrome.exe路径\n :return: 是否匹配\n \"\"\"\n print('正在检测可用性...')\n om = OptionsManager()\n driver_path = driver_path or om.get_value('paths', 'chromedriver_path') or 'chromedriver'\n chrome_path = str(chrome_path or om.get_value('chrome_options', 'binary_location'))\n do = DriverOptions(read_file=False)\n do.add_argument('--headless')\n\n if chrome_path:\n do.binary_location = chrome_path\n\n try:\n driver = webdriver.Chrome(driver_path, options=do)\n driver.quit()\n print('版本匹配,可正常使用。')\n\n return True\n\n except Exception as e:\n print(f'出现异常:\\n{e}\\n可执行easy_set.get_match_driver()自动下载匹配的版本。\\n'\n f'或自行从以下网址下载:https://chromedriver.chromium.org/downloads')\n\n return False\n\n\n# -------------------------自动识别chrome版本号并下载对应driver------------------------\ndef get_match_driver(ini_path: Union[str, None] = 'default',\n save_path: str = None,\n chrome_path: str = None,\n show_msg: bool = True,\n check_version: bool = True) -> Union[str, None]:\n \"\"\"自动识别chrome版本并下载匹配的driver \\n\n :param ini_path: 要读取和修改的ini文件路径\n :param save_path: chromedriver保存路径\n :param chrome_path: 指定chrome.exe位置\n :param show_msg: 是否打印信息\n :param check_version: 是否检查版本匹配\n :return: None\n \"\"\"\n save_path = save_path or str(Path(__file__).parent)\n\n chrome_path = chrome_path or _get_chrome_path(ini_path, show_msg)\n chrome_path = Path(chrome_path).absolute() if chrome_path else None\n if show_msg:\n print('chrome.exe路径', chrome_path)\n\n ver = _get_chrome_version(str(chrome_path))\n if show_msg:\n print('version', ver)\n\n zip_path = _download_driver(ver, save_path, show_msg=show_msg)\n\n if not zip_path and show_msg:\n print('没有找到对应版本的driver。')\n\n try:\n driver_path = unzip(zip_path, save_path)[0]\n except TypeError:\n driver_path = None\n\n if show_msg:\n print('解压路径', driver_path)\n\n if driver_path:\n Path(zip_path).unlink()\n if ini_path:\n set_paths(driver_path=driver_path, chrome_path=str(chrome_path), ini_path=ini_path, check_version=False)\n\n if check_version:\n if not check_driver_version(driver_path, chrome_path) and show_msg:\n print('获取失败,请手动配置。')\n else:\n if show_msg:\n print('获取失败,请手动配置。')\n\n return driver_path\n\n\ndef _get_chrome_path(ini_path: str = None,\n show_msg: bool = True,\n from_ini: bool = True,\n from_regedit: bool = True,\n from_system_path: bool = True, ) -> Union[str, None]:\n \"\"\"从ini文件或系统变量中获取chrome.exe的路径 \\n\n :param ini_path: ini文件路径\n :return: chrome.exe路径\n \"\"\"\n # -----------从ini文件中获取--------------\n if ini_path and from_ini:\n try:\n path = OptionsManager(ini_path).chrome_options['binary_location']\n except KeyError:\n path = None\n else:\n path = None\n\n if path and Path(path).is_file():\n print('ini文件中', end='')\n return str(path)\n\n # -----------从注册表中获取--------------\n if from_regedit:\n import winreg\n try:\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,\n r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe',\n reserved=0, access=winreg.KEY_READ)\n k = winreg.EnumValue(key, 0)\n winreg.CloseKey(key)\n\n if show_msg:\n print('注册表中', end='')\n\n return k[1]\n\n except FileNotFoundError:\n pass\n\n # -----------从系统变量中获取--------------\n if from_system_path:\n paths = popen('set path').read().lower()\n r = RE_SEARCH(r'[^;]*chrome[^;]*', paths)\n\n if r:\n path = Path(r.group(0)) if 'chrome.exe' in r.group(0) else Path(r.group(0)) / 'chrome.exe'\n\n if path.exists():\n if show_msg:\n print('系统变量中', end='')\n return str(path)\n\n paths = paths.split(';')\n\n for path in paths:\n path = Path(path) / 'chrome.exe'\n\n try:\n if path.exists():\n if show_msg:\n print('系统变量中', end='')\n return str(path)\n except OSError:\n pass\n\n\ndef _get_chrome_version(path: str) -> Union[str, None]:\n \"\"\"根据文件路径获取版本号 \\n\n :param path: chrome.exe文件路径\n :return: 版本号\n \"\"\"\n if not path:\n return\n\n path = str(path).replace('\\\\', '\\\\\\\\')\n\n try:\n return (popen(f'wmic datafile where \"name=\\'{path}\\'\" get version').read()\n .lower().split('\\n')[2].replace(' ', ''))\n except:\n return None\n\n\ndef _download_driver(version: str, save_path: str = None, show_msg: bool = True) -> Union[str, None]:\n \"\"\"根据传入的版本号到镜像网站查找,下载最相近的 \\n\n :param version: 本地版本号\n :return: 保存地址\n \"\"\"\n if not version:\n return\n\n page = SessionPage(Drission().session)\n page.get('http://npm.taobao.org/mirrors/chromedriver')\n\n remote_ver = None\n loc_main = version.split('.')[0]\n\n try:\n loc_num = int(version.replace('.', ''))\n except ValueError:\n return None\n\n for i in page.eles('xpath://pre/a'):\n remote_main = i.text.split('.')[0]\n\n try:\n remote_num = int(i.text.replace('.', '').replace('/', ''))\n except ValueError:\n continue\n\n if remote_main == loc_main and remote_num >= loc_num:\n remote_ver = i.text\n break\n\n if remote_ver:\n url = f'https://cdn.npm.taobao.org/dist/chromedriver/{remote_ver}chromedriver_win32.zip'\n save_path = save_path or Path(__file__).parent\n result = page.download(url, save_path, file_exists='overwrite', show_msg=show_msg)\n\n if result[0]:\n return result[1]\n\n return None\n","sub_path":"DrissionPage/easy_set.py","file_name":"easy_set.py","file_ext":"py","file_size_in_byte":12050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"274472469","text":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests the graph freezing tool.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.tools import strip_unused\n\n\nclass FreezeGraphTest(test_util.TensorFlowTestCase):\n\n def testFreezeGraph(self):\n input_graph_name = \"input_graph.pb\"\n output_graph_name = \"output_graph.pb\"\n\n # We'll create an input graph that has a single constant containing 1.0,\n # and that then multiplies it by 2.\n with tf.Graph().as_default():\n constant_node = tf.constant(1.0, name=\"constant_node\")\n wanted_input_node = tf.sub(constant_node, 3.0, name=\"wanted_input_node\")\n output_node = tf.mul(wanted_input_node, 2.0, name=\"output_node\")\n tf.add(output_node, 2.0, name=\"later_node\")\n sess = tf.Session()\n output = sess.run(output_node)\n self.assertNear(-4.0, output, 0.00001)\n tf.train.write_graph(sess.graph.as_graph_def(), self.get_temp_dir(),\n input_graph_name)\n\n # We save out the graph to disk, and then call the const conversion\n # routine.\n input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)\n input_binary = False\n input_node_names = \"wanted_input_node\"\n output_node_names = \"output_node\"\n output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)\n\n strip_unused.strip_unused(input_graph_path, input_binary, output_graph_path,\n input_node_names, output_node_names,\n tf.float32.as_datatype_enum)\n\n # Now we make sure the variable is now a constant, and that the graph still\n # produces the expected result.\n with tf.Graph().as_default():\n output_graph_def = tf.GraphDef()\n with open(output_graph_path, \"rb\") as f:\n output_graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(output_graph_def, name=\"\")\n\n self.assertEqual(3, len(output_graph_def.node))\n for node in output_graph_def.node:\n self.assertNotEqual(\"Add\", node.op)\n self.assertNotEqual(\"Sub\", node.op)\n\n with tf.Session() as sess:\n input_node = sess.graph.get_tensor_by_name(\"wanted_input_node:0\")\n output_node = sess.graph.get_tensor_by_name(\"output_node:0\")\n output = sess.run(output_node, feed_dict={input_node: [10.0]})\n self.assertNear(20.0, output, 0.00001)\n\nif __name__ == \"__main__\":\n tf.test.main()\n","sub_path":"jni-build/jni-build/jni/include/tensorflow/python/tools/strip_unused_test.py","file_name":"strip_unused_test.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"535622997","text":"#!/usr/bin/python3\n\"\"\"\nThis module contains a function that starts a Flask web app\n\"\"\"\nimport sys\nimport os\nsys.path.append(os.getcwd())\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models.state import State\n\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.route('/states_list')\ndef states_list():\n \"\"\"\n Returns State data from database and insert into template\n \"\"\"\n state_inst = storage.all('States').values()\n state_data = dict([state.name, state.id] for state in state_inst)\n return render_template('7-states_list.html', state_data=state_data)\n\n\n@app.route('/cities_by_states')\ndef display_cities():\n \"\"\"\n Returns State data from database and insert into template\n \"\"\"\n state_inst = storage.all('State').values()\n state_data = dict([state.name, state] for state in state_inst)\n return render_template('8-cities_by_states.html', state_data=state_data)\n\n\n@app.teardown_appcontext\ndef teardown_db(exception):\n \"\"\"\n Handles teardown of app context\n \"\"\"\n storage.close()\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"web_flask/8-cities_by_states.py","file_name":"8-cities_by_states.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"350075597","text":"from collections import Counter\n\ndef word_count(fname):\n with open(fname) as f:\n return Counter(f.read().split())\n\nprint(\"Frequency of words in the file: \", word_count(\"testfile.txt\"))\n\nfrom guesslang import Guess\n\nname = Guess().language_name(\"testo.py\")\nprint(name)","sub_path":"frequency.py","file_name":"frequency.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"440877505","text":"import re\n\ncount_of_messages = int(input())\ncounter = 0\nencrypted_messages = []\n\nfor i in range(count_of_messages):\n message = \"\"\n to_encrypt = input()\n for el in to_encrypt.lower():\n if el.lower() == \"s\" or el == \"t\" or el == \"a\" or el == \"r\":\n counter += 1\n for el in to_encrypt:\n message += chr(ord(el) - counter)\n encrypted_messages.append(message)\n counter = 0\n\npattern = r\"(?<=@)(?P[A-Z][a-z]+)[^@\\-\\!>]+?(?<=\\:)(?P[0-9]+(?!\\.))[^@\\-\\:>]*\" \\\n r\"(?<=\\!)(?P[AD])(?=\\!)[^@:]*\\-\\>(?P[\\d]+)\"\n\nplanets = {'A': [], 'D': []}\n\nfor el in encrypted_messages:\n match_obj = [obj.groupdict() for obj in re.finditer(pattern, el)]\n if match_obj:\n for obj in match_obj:\n if obj['type'] == \"A\":\n planets['A'].append(obj['planet'])\n elif obj['type'] == \"D\":\n planets['D'].append(obj['planet'])\n\nplanets['A'].sort()\nplanets['D'].sort()\n\nprint(f\"Attacked planets: {len(planets['A'])}\")\nfor ap in planets['A']:\n print(f\"-> {ap}\")\nprint(f\"Destroyed planets: {len(planets['D'])}\")\nfor dp in planets['D']:\n print(f\"-> {dp}\")\n","sub_path":"regular_expressions_exercise/star_enigma.py","file_name":"star_enigma.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"410543049","text":"import datetime\n\nfrom flask import request\nfrom flask_wtf import FlaskForm\nfrom wtforms import validators, SubmitField\nfrom wtforms.fields.html5 import DateField\n\nfrom flask_monitoringdashboard.core.timezone import to_local_datetime\n\nDATE_FORMAT = '%Y-%m-%d'\n\n\nclass SelectDateRangeForm(FlaskForm):\n \"\"\" Used for selecting two dates, which together specify a range. \"\"\"\n start_date = DateField('Start date', format=DATE_FORMAT, validators=[validators.data_required()])\n end_date = DateField('End date', format=DATE_FORMAT, validators=[validators.data_required()])\n submit = SubmitField('Update')\n title = 'Select the time interval'\n\n def get_days(self):\n \"\"\"\n :return: A list with datetime.date object from form.start_date to (including) form.end_date\n \"\"\"\n delta = self.end_date.data - self.start_date.data\n return [self.start_date.data + datetime.timedelta(days=i) for i in range(delta.days + 1)]\n\n def content(self):\n return '''\n
\n
{}
\n
{}
\n
\n
\n
{}
\n
{}
\n
{}
\n
'''.format(self.start_date.label, self.end_date.label,\n self.start_date(class_=\"form-control\", required=True),\n self.end_date(class_=\"form-control\", required=True),\n self.submit(class_=\"btn btn-primary btn-block\"))\n\n\ndef get_daterange_form(num_days=20):\n \"\"\"\n Returns a SelectDateRangeForm with two dates:\n - end_date is today\n - start_date is the today - numdays\n :param num_days: the date for the start_date\n :return: A SelectDateRangeForm object with the required logic\n \"\"\"\n form = SelectDateRangeForm(request.form)\n if form.validate():\n if form.start_date.data > form.end_date.data:\n form.start_date.data, form.end_date.data = form.end_date.data, form.start_date.data\n else:\n form.end_date.data = to_local_datetime(datetime.datetime.utcnow()).date()\n form.start_date.data = form.end_date.data - datetime.timedelta(days=num_days)\n return form\n","sub_path":"flask_monitoringdashboard/core/forms/daterange.py","file_name":"daterange.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"520057120","text":"import pymysql\n\n\nclass Bdatos:\n\tdef __init__(self, host, usuario, psword, nombre_bd, tabla): \n\t\tself.host = host\n\t\tself.usuario = usuario\n\t\tself.psw = psword\n\t\tself.nombre_bd = nombre_bd\n\t\tself.conexion = pymysql.connect(self.host, self.usuario, self.psw, self.nombre_bd)\n\t\tself.cursor = self.conexion.cursor()\t\n\t\tself.tabla = tabla\n\t\n\tdef crear_tabla(self, campos, campo_primario, campos_unicos):\n\n\t\t\"\"\"Crea Tabla los campos deben ser pasados en cadena\n\t\t\tejemplplo: id INT AUTO_INCREMENT,control INT(8) NOT NULL,\"\"\"\n\t\t\n\t\torden = \"CREATE TABLE {}.{}({} PRIMARY KEY({}), UNIQUE KEY({}))\".format(self.nombre_bd, self.tabla,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcampos, campo_primario, campos_unicos)\n\n\n\t\tself.cursor.execute(orden)\n\t\tself.conexion.commit()\n\t\t\n\n\tdef insertar_filas(self, campos, datos):\n\t\terrores_guardado = list()\n\t\ttry:\n\t\t\ttabla = \"SELECT 1 FROM {} LIMIT 1\".format(self.tabla)\n\t\t\tself.cursor.execute(tabla)\n\n\t\texcept pymysql.err.ProgrammingError:\n\t\t\tprint(\"No existe la tabla\")\n\t\t\treturn False\t\n\t\t\n\t\t\n\t\ttry:\n\t\t\torden = \"INSERT INTO {}({}) \\\n\t\t\tVALUES({})\".format(self.tabla, campos, datos) \n\t\t\tself.cursor.execute(orden)\n\t\t\tself.conexion.commit()\t\t\t\n\t\t\t\t\n\t\texcept pymysql.err.IntegrityError:\n\t\t\terrores_guardado.append(datos)\n\t\t\tprint(\"WARNING:El Registro {} No se Pudo Guardar\".format(datos.split(',')))\n\t\t\n\n\t\t\n\t\t\t\t\n\t\tself.conexion.close()\n\t\treturn errores_guardado\t\n\t\t\t\n\t\t\t\n\t\n\tdef consultar(self, campos, condiciones):\n\t\ttry:\n\t\t\torden = \"SELECT {} FROM {}.{}\\\n\t\t\tINNER JOIN {}.{} {}\".format(campos, self.nombre_bd, self.tabla, self.nombre_bd,\"recibos\",condiciones)\n\t\t\t\t\n\t\t\tself.cursor.execute(orden)\n\t\t\tregistro = self.cursor.fetchall()\n\t\t\treturn registro\n\t\t\t\n\t\texcept pymysql.err.ProgrammingError:\n\t\t\tprint(\"No existen datos de este año\")\t\t\n\t\t\tpass\n\t\t\n\t\n\t\t\n\t\n\n\t\t\n","sub_path":"backend/bdatos.py","file_name":"bdatos.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"463734259","text":"from fastai.conv_learner import *\nfrom fastai.dataset import *\n\nimport pandas as pd\nimport numpy as np\nimport os\nfrom PIL import Image\nfrom sklearn.model_selection import train_test_split\n\nfrom loss import *\n\nPATH = '/home/raznem/proj_kaggle_airbus'\nTRAIN = '../data/train_v2/'\nTEST = '../data/test_v2/'\nSEGMENTATION = os.path.join(PATH, 'data/train_ship_segmentations_v2.csv')\nPRETRAINED = '../models/Resnet34_lable_256_1.h5'\nexclude_list = ['6384c3e78.jpg', '13703f040.jpg', '14715c06d.jpg', '33e0ff2d5.jpg',\n '4d4e09f2a.jpg', '877691df8.jpg', '8b909bb20.jpg', 'a8d99130e.jpg',\n 'ad55c3143.jpg', 'c8260c541.jpg', 'd6c7f17c7.jpg', 'dc3e7c901.jpg',\n 'e44dffe88.jpg', 'ef87bad36.jpg', 'f083256d8.jpg']\n\nnw = 2 # number of workers for data loader\narch = resnet34 # specify target architecture\n\ntrain_names = [f for f in os.listdir(TRAIN)]\ntest_names = [f for f in os.listdir(TEST)]\nfor el in exclude_list:\n if el in train_names:\n train_names.remove(el)\n if el in test_names:\n test_names.remove(el)\n# 5% of data in the validation set is sufficient for model evaluation\ntr_n, val_n = train_test_split(train_names, test_size=0.05, random_state=42)\nsegmentation_df = pd.read_csv(os.path.join(PATH, SEGMENTATION)).set_index('ImageId')\n\n\ndef cut_empty(names):\n return [name for name in names\n if(type(segmentation_df.loc[name]['EncodedPixels']) != float)]\n\n\ntr_n = cut_empty(tr_n)\nval_n = cut_empty(val_n)\n\n\ndef get_mask(img_id, df):\n shape = (768, 768)\n img = np.zeros(shape[0] * shape[1], dtype=np.uint8)\n masks = df.loc[img_id]['EncodedPixels']\n if type(masks) == float:\n return img.reshape(shape)\n if type(masks) == str:\n masks = [masks]\n for mask in masks:\n s = mask.split()\n for i in range(len(s)//2):\n start = int(s[2*i]) - 1\n length = int(s[2*i+1])\n img[start:start+length] = 1\n return img.reshape(shape).T\n\n\nclass PdFilesDataset(FilesDataset):\n def __init__(self, fnames, path, transform):\n self.segmentation_df = pd.read_csv(SEGMENTATION).set_index('ImageId')\n super().__init__(fnames, transform, path)\n\n def get_x(self, i):\n img = open_image(os.path.join(self.path, self.fnames[i]))\n if self.sz == 768:\n return img\n else:\n return cv2.resize(img, (self.sz, self.sz))\n\n def get_y(self, i):\n mask = np.zeros((768, 768), dtype=np.uint8) if (self.path == TEST) \\\n else get_mask(self.fnames[i], self.segmentation_df)\n img = Image.fromarray(mask).resize((self.sz, self.sz)).convert('RGB')\n return np.array(img).astype(np.float32)\n\n def get_c(self):\n return 0\n\n\nclass RandomLighting(Transform):\n def __init__(self, b, c, tfm_y=TfmType.NO):\n super().__init__(tfm_y)\n self.b, self.c = b, c\n\n def set_state(self):\n self.store.b_rand = rand0(self.b)\n self.store.c_rand = rand0(self.c)\n\n def do_transform(self, x, is_y):\n if is_y and self.tfm_y != TfmType.PIXEL: return x # add this line to fix the bug\n b = self.store.b_rand\n c = self.store.c_rand\n c = -1/(c-1) if c < 0 else c+1\n x = lighting(x, b, c)\n return x\n\n\ndef get_data(sz, bs):\n # data augmentation\n aug_tfms = [RandomRotate(20, tfm_y=TfmType.CLASS),\n RandomDihedral(tfm_y=TfmType.CLASS),\n RandomLighting(0.05, 0.05, tfm_y=TfmType.CLASS)]\n tfms = tfms_from_model(arch, sz, crop_type=CropType.NO, tfm_y=TfmType.CLASS, aug_tfms=aug_tfms)\n tr_names = tr_n if (len(tr_n) % bs == 0) else tr_n[:-(len(tr_n) % bs)] # cut incomplete batch\n ds = ImageData.get_ds(PdFilesDataset, (tr_names, TRAIN), (val_n, TRAIN), tfms, test=(test_names, TEST))\n md = ImageData(PATH, ds, bs, num_workers=nw, classes=None)\n return md\n\n\ncut, lr_cut = model_meta[arch]\n\n\ndef get_base(): # load resNet34.py model\n layers = cut_model(arch(True), cut)\n return nn.Sequential(*layers)\n\n\ndef load_pretrained(model, path): # load a model pretrained on ship/no-ship classification\n weights = torch.load(PRETRAINED, map_location=lambda storage, loc: storage)\n model.load_state_dict(weights, strict=False)\n\n return model\n\n\nclass UnetBlock(nn.Module):\n def __init__(self, up_in, x_in, n_out):\n super().__init__()\n up_out = x_out = n_out // 2\n self.x_conv = nn.Conv2d(x_in, x_out, 1)\n self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2)\n self.bn = nn.BatchNorm2d(n_out)\n\n def forward(self, up_p, x_p):\n up_p = self.tr_conv(up_p)\n x_p = self.x_conv(x_p)\n cat_p = torch.cat([up_p, x_p], dim=1)\n return self.bn(F.relu(cat_p))\n\n\nclass SaveFeatures():\n features = None\n\n def __init__(self, m): self.hook = m.register_forward_hook(self.hook_fn)\n\n def hook_fn(self, module, input, output): self.features = output\n\n def remove(self): self.hook.remove()\n\n\nclass Unet34(nn.Module):\n def __init__(self, rn):\n super().__init__()\n self.rn = rn\n self.sfs = [SaveFeatures(rn[i]) for i in [2, 4, 5, 6]]\n self.up1 = UnetBlock(512, 256, 256)\n self.up2 = UnetBlock(256, 128, 256)\n self.up3 = UnetBlock(256, 64, 256)\n self.up4 = UnetBlock(256, 64, 256)\n self.up5 = nn.ConvTranspose2d(256, 1, 2, stride=2)\n\n def forward(self, x):\n x = F.relu(self.rn(x))\n x = self.up1(x, self.sfs[3].features)\n x = self.up2(x, self.sfs[2].features)\n x = self.up3(x, self.sfs[1].features)\n x = self.up4(x, self.sfs[0].features)\n x = self.up5(x)\n return x[:, 0]\n\n def close(self):\n for sf in self.sfs: sf.remove()\n\n\nclass UnetModel():\n def __init__(self, model, name='Unet'):\n self.model, self.name = model, name\n\n def get_layer_groups(self, precompute):\n lgs = list(split_by_idxs(children(self.model.rn), [lr_cut]))\n return lgs + [children(self.model)[1:]]\n\n\nclass UnetBlock(nn.Module):\n def __init__(self, up_in, x_in, n_out):\n super().__init__()\n up_out = x_out = n_out // 2\n self.x_conv = nn.Conv2d(x_in, x_out, 1)\n self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2)\n self.bn = nn.BatchNorm2d(n_out)\n\n def forward(self, up_p, x_p):\n up_p = self.tr_conv(up_p)\n x_p = self.x_conv(x_p)\n cat_p = torch.cat([up_p, x_p], dim=1)\n return self.bn(F.relu(cat_p))\n\n\nclass SaveFeatures():\n features = None\n\n def __init__(self, m):\n self.hook = m.register_forward_hook(self.hook_fn)\n\n def hook_fn(self, module, input, output):\n self.features = output\n\n def remove(self): self.hook.remove()\n\n\nclass Unet34(nn.Module):\n def __init__(self, rn):\n super().__init__()\n self.rn = rn\n self.sfs = [SaveFeatures(rn[i]) for i in [2, 4, 5, 6]]\n self.up1 = UnetBlock(512, 256, 256)\n self.up2 = UnetBlock(256, 128, 256)\n self.up3 = UnetBlock(256, 64, 256)\n self.up4 = UnetBlock(256, 64, 256)\n self.up5 = nn.ConvTranspose2d(256, 1, 2, stride=2)\n\n def forward(self, x):\n x = F.relu(self.rn(x))\n x = self.up1(x, self.sfs[3].features)\n x = self.up2(x, self.sfs[2].features)\n x = self.up3(x, self.sfs[1].features)\n x = self.up4(x, self.sfs[0].features)\n x = self.up5(x)\n return x[:, 0]\n\n def close(self):\n for sf in self.sfs: sf.remove()\n\n\nclass UnetModel():\n def __init__(self, model, name='Unet'):\n self.model, self.name = model, name\n\n def get_layer_groups(self, precompute):\n lgs = list(split_by_idxs(children(self.model.rn), [lr_cut]))\n return lgs + [children(self.model)[1:]]\n\n\nm_base = load_pretrained(get_base(), PRETRAINED)\nm = to_gpu(Unet34(m_base))\nmodels = UnetModel(m)\n\nsz = 256 # image size\nbs = 32 # batch size\n\nmd = get_data(sz, bs)\n\nlearn = ConvLearner(md, models)\nlearn.opt_fn = optim.Adam\nlearn.crit = MixedLoss(10.0, 2.0)\nlearn.metrics = [accuracy_thresh(0.5), dice, IoU]\nwd = 1e-7\nlr = 1e-2\n\nlearn.freeze_to(1)\n\nlearn.fit(lr, 1, wds=wd, cycle_len=1, use_clr=(5, 8))\n\nlearn.save('Unet34_256_0')\n\nlrs = np.array([lr/100, lr/10, lr])\nlearn.unfreeze() # unfreeze the encoder\nlearn.bn_freeze(True)\n\ntorch.cuda.empty_cache()\n\nlearn.fit(lrs, 2, wds=wd, cycle_len=1, use_clr=(20, 8))\n\nlearn.fit(lrs/3, 2, wds=wd, cycle_len=2, use_clr=(20, 8))\n\nlearn.sched.plot_lr()\n\nlearn.save('Unet34_256_1')\n\nsz = 384 # image size\nbs = 16 # batch size\n\nmd = get_data(sz, bs)\nlearn.set_data(md)\nlearn.unfreeze()\nlearn.bn_freeze(True)\n\ntorch.cuda.empty_cache()\n\nlearn.fit(lrs/5, 1, wds=wd, cycle_len=2, use_clr=(10, 8))\n\nlearn.save('Unet34_384_1')\n\nlearn.model.eval()\nx, y = next(iter(md.val_dl))\nyp = to_np(F.sigmoid(learn.model(V(x))))\n\nshow_images(np.asarray(md.val_ds.denorm(x)), yp, y)\n\nsz = 768 # image size\nbs = 6 # batch size\n\nmd = get_data(sz, bs)\nlearn.set_data(md)\nlearn.unfreeze()\nlearn.bn_freeze(True)\n\ntorch.cuda.empty_cache()\n\nlearn.fit(lrs/10, 1, wds=wd, cycle_len=1, use_clr=(10, 8))\n\nlearn.save('Unet34_768_1')\n\ntorch.cuda.empty_cache()\n\nlearn.fit(lrs/10, 1, wds=wd, cycle_len=3, use_clr=(10, 8))\n\nlearn.save('Unet34_768_1p3')\n\nlrs = np.array([lr/100, lr/10, lr])\n\nsz = 768 # image size\nbs = 6 # batch size\n\nmd = get_data(sz, bs)\nlearn.set_data(md)\nlearn.unfreeze()\nlearn.bn_freeze(True)\ntorch.cuda.empty_cache()\n\n\nlearn.fit(lrs/10, 1, wds=wd, cycle_len=7, use_clr=(10, 8))\n\nlearn.save('Unet34_768_1p10')\n\n","sub_path":"src/unet34.py","file_name":"unet34.py","file_ext":"py","file_size_in_byte":9492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"42851449","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 6 16:13:17 2018\n\n@author: yamini\n\n\"\"\"\nclass Solution:\n def dominantIndex(self, nums):\n if len(nums) == 0: return -1\n\n highest = -1\n secondHighest = -1\n highestIndex = 0\n \n for i,n in enumerate(nums):\n if n >= highest:\n secondHighest = highest\n highest = n\n highestIndex = i\n elif n > secondHighest:\n secondHighest = n\n\n if highest < secondHighest*2:\n highestIndex = -1\n \n return highestIndex\ns=Solution()\nprint(s.dominantIndex([3, 6, 1, 0]))","sub_path":"Largest_No_Twice_others.py","file_name":"Largest_No_Twice_others.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"626657787","text":"import time\nimport numpy as np\nimport pickle\n\nfrom utils import report_params\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import KFold\nfrom scipy.stats import randint\n\n\nclass RandomForestModel:\n \"\"\"\n Random Forest Model Wrapper which is intended to be data agnostic.\n\n Keyword Arguments:\n params : initialization params for random forest regressor.\n \"\"\"\n\n def __init__(self, **params):\n if params:\n self.model = RandomForestRegressor(**params, random_state=1)\n else:\n self.model = None\n\n def train(self, X_train, y_train):\n \"\"\" Train RF Model.\n\n Keyword Arguments:\n X_train : samples\n y_train : labels\n \"\"\"\n if self.model is None:\n self.model = RandomForestRegressor(random_state=1)\n\n assert (len(X_train) == len(y_train))\n\n self.model.fit(X=X_train, y=np.log1p(y_train))\n\n def test(self, X_test, y_test):\n \"\"\" Test RF Model.\n\n Keyword Arguments:\n X_test : samples\n y_test : labels\n Returns:\n dict : dictionary of results with y_pred, rmsle and mad\n \"\"\"\n if self.model is None:\n print(\"Please load or train a model before!\")\n return\n assert (len(X_test) == len(y_test))\n y_pred = np.exp(self.model.predict(X=X_test)) - 1\n mad = np.mean(np.abs(y_pred - np.mean(y_pred)))\n rmsle = self.compute_loss(y_pred, y_test)\n return {'y_pred': y_pred, 'rmsle': rmsle, 'mad': mad}\n\n @staticmethod\n def compute_loss(y_pred, y):\n return np.sqrt(np.mean((np.log(y_pred + 1) - np.log(y + 1)) ** 2))\n\n def save(self, path):\n \"\"\" Save RF Model.\n\n Keyword Arguments:\n path : pkl file path\n Returns:\n success : True if successful\n \"\"\"\n with open(path, 'wb') as model_out:\n pickle.dump(self.model, model_out)\n return True\n\n def load(self, path):\n \"\"\" Load RF Model.\n\n Keyword Arguments:\n path : pkl file path\n Returns:\n success : True if successful\n \"\"\"\n with open(path, 'rb') as model_in:\n self.model = pickle.load(model_in)\n return True\n\n def reset_model_params(self, **params):\n self.model = RandomForestRegressor(**params)\n\n def set_model(self, model):\n self.model = model\n\n def cross_validate(self, X, y, cv=5):\n kfold = KFold(n_splits=cv, shuffle=True, random_state=1)\n results = []\n\n for train_index, test_index in kfold.split(X):\n print('train: %s, test: %s' % (train_index, test_index))\n X_train, y_train = X.iloc[train_index], y.iloc[train_index]\n X_test, y_test = X.iloc[test_index], y.iloc[test_index]\n\n self.train(X_train, y_train)\n results.append(self.test(X_test, y_test))\n\n print('For parameter set: ')\n print(self.model.get_params())\n print('Mean RMSLE: ', np.mean([r['rmsle'] for r in results]))\n\n @staticmethod\n def hyper_search(X_train, y_train, n_iter_search=10, report=False):\n \"\"\" Randomized hyperparameter search with 5-fold cross validation.\n\n Keyword Arguments:\n X_train : samples\n y_train : labels\n n_iter_search : Number of parameter settings that are sampled.\n Returns:\n best_estimator_ : best model trained on whole data\n \"\"\"\n clf = RandomForestRegressor()\n max_depth = [x*10 for x in range(1, 11)]\n max_depth.append(None)\n param_dist = {\"n_estimators\": [100, 200, 300, 400],\n \"max_features\": randint(1, 15),\n \"min_samples_split\": randint(2, 15),\n \"min_samples_leaf\": randint(1, 15),\n \"max_depth\": max_depth,\n # \"criterion\": ['mae'],\n \"bootstrap\": [True, False]}\n random_search = RandomizedSearchCV(clf, param_distributions=param_dist,\n n_iter=n_iter_search, cv=5, iid=False)\n\n start = time.time()\n random_search.fit(X_train, np.log1p(y_train))\n end = time.time()\n if report:\n print(\"RandomizedSearchCV took %.2f seconds for %d candidates\" % ((end - start), n_iter_search))\n report_params(random_search.cv_results_)\n return random_search.best_estimator_\n\n\nif __name__ == '__main__':\n m = RandomForestModel(n_estimators=50)\n print(m.load('a'))\n\n\n\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"141585664","text":"import os # operating system\n\n# 確認檔案是否存在\ndef read_file(filename):\n\tproducts = []\n\twith open(filename, 'r', encoding = \"utf-8\") as f:\n\t\tfor line in f:\n\t\t\tif \"商品名稱,商品價格\" in line:\n\t\t\t\tcontinue\n\t\t\tname, price = line.strip().split(\",\")\n\t\t\t# s = line.strip().split(\",\")\n\t\t\t# name = s[0]\n\t\t\t# price = s[1]\n\t\t\tproducts.append([name,price])\n\t\tprint(products)\n\treturn products\n\n\n# 使用者輸入\ndef user_input(products):\n\twhile True:\n\t\tname = input(\"輸入商品名稱(若要停止輸入請按q):\")\n\t\tif name == \"q\":\n\t\t\tbreak\n\t\tprice = input(\"輸入商品價格:\") \n\t\tp = [name, price] # 建立二維清單的進階寫法\n\t\tproducts.append(p) \n\tprint(products)\n\treturn products\n\n\n# 印出所有購買紀錄\ndef print_products(products):\n\tfor p in products:\n\t\tprint(p[0], \"的價格是\", p[1])\n\n\n# 寫入檔案\ndef write_file(filename, products):\n\twith open(\"product.csv\", 'w', encoding = \"utf-8\") as f: #寫入中文時編碼,但若用excel開啟時還需從data在選編碼\n\t\tf.write(\"商品名稱\" + \",\" + \"商品價格\" + \"\\n\")\n\t\tfor p in products:\n\t\t\tf.write(p[0] + \",\" + p[1] + \"\\n\")\n\n\n# 主程式\ndef main(filename):\n\tif os.path.isfile(filename):\n\t\tprint(\"找到檔案了!!!\")\n\t\tproducts = read_file(filename)\n\telse:\n\t\tprint(\"找不到檔案~~\")\n\n\tproducts = user_input(products)\n\n\tprint_products(products)\n\n\twrite_file(filename, products)\n\t\n\n\nfilename = \"product.csv\"\n# 主程式進入點\nmain(filename)\n","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"344551537","text":"#A\ndef gcd(a, b):\n while b > 0:\n a, b = b, a % b\n return a\nprint(gcd(50,20))\n\n#B\ndef reduce_fraction(a,b):\n divisor = gcd(a,b)\n nevner = a / divisor\n teller = b / divisor\n return int(nevner),int(teller)\n\nnevner,teller = reduce_fraction(4,2)\n\nprint(nevner,\"/\",teller)\n","sub_path":"Oving5/ForenklingAvBroker.py","file_name":"ForenklingAvBroker.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"484596530","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom ckeditor.fields import RichTextField\n# Create your models here.\n\nclass Post(models.Model):\n STATUS_CHOICES = (\n ('draft', 'Draft'),\n ('published', 'Published'),\n )\n PAGE_CHOICES = (\n ('null', 'NULL'),\n ('ptb', 'PTB'),\n )\n title = models.CharField(max_length=250)\n slug = models.SlugField(max_length=250)\n content = RichTextField(config_name = 'default', external_plugin_resources=[(\n 'youtube',\n '/static/ckeditor/plugins/youtube/youtube/',\n 'plugin.js',\n )],)\n seo_title = models.CharField(max_length=250)\n seo_description = models.CharField(max_length=250)\n author = models.ForeignKey(User, related_name = 'blog_posts' ,on_delete=models.CASCADE)\n published = models.DateTimeField(default = timezone.now)\n created = models.DateTimeField(auto_now_add = True)\n updated = models.DateTimeField(auto_now = True)\n status = models.CharField(max_length = 9, choices = STATUS_CHOICES, default = 'draft')\n page = models.CharField(max_length = 9, choices = PAGE_CHOICES, default = 'null')\n\n\n def __str__(self):\n return \"%s\" % (self.title)\n","sub_path":"myapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"634285909","text":"import unittest.mock\nfrom os import path\nfrom unittest.mock import patch\n\nfrom homework2.task1 import (count_non_ascii_chars, count_punctuation_chars,\n get_longest_diverse_words,\n get_most_common_non_ascii_char, get_rarest_char)\n\ncurrent_dir = path.dirname(__file__)\nfilename = path.join(current_dir, \"test_data.txt\")\n\ndata = \"аабббббб аабббаабаа абвб вбвба бвавбв бвабваб аб аб аб аб аб аб аб аб\"\n\n\ndef test_function_that_opens_file_mock():\n \"\"\"Check longest diverse words\"\"\"\n mock_on = unittest.mock.mock_open(read_data=data)\n with patch(\"builtins.open\", mock_on):\n result = get_longest_diverse_words(path.join(current_dir, \"test_mock.txt\"))\n assert result == [\n \"абвб\",\n \"вбвба\",\n \"бвавбв\",\n \"бвабваб\",\n \"аабббаабаа\",\n \"аабббббб\",\n \"аб\",\n \"аб\",\n \"аб\",\n \"аб\",\n ]\n\n\ndef test_get_rarest_char():\n \"\"\"Checking find rarest symbol for document\"\"\"\n dat = (\n \"Es handelt sich um eine Kernfrage unserer Zeit, das hei\\u00dft, \"\n \"um eine Frage, die auf alle F\\u00e4lle Gef\\u00e4hrdung mit sich bringt.\"\n )\n mock_on = unittest.mock.mock_open(read_data=dat)\n with patch(\"builtins.open\", mock_on):\n result = get_rarest_char(path.join(current_dir, \"test_mock.txt\"))\n assert result == \"EKZßGb.\"\n\n\ndef test_count_punctuation_chars():\n \"\"\"Checking count every punctuation char\"\"\"\n assert count_punctuation_chars(filename) == 19\n\n\ndef test_count_non_ascii_chars():\n \"\"\"Checking count every non ascii char\"\"\"\n assert count_non_ascii_chars(filename) == 42\n\n\ndef test_get_most_common_non_ascii_char():\n \"\"\"Checking find most common non ascii char for document\"\"\"\n assert get_most_common_non_ascii_char(filename) == \"à\"\n","sub_path":"tests/homework2/test_hw2_task1.py","file_name":"test_hw2_task1.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"202191423","text":"'''\nCreated on May 31, 2010\n\n@author: Guan Gui, Will Zhang\n$LastChangedBy$\n'''\nfrom collections import defaultdict\nimport math\n\n__version__ = \"$Rev$\"\n\nclass LanguageModel(object):\n \"\"\"\n This class implements a N-Gram Language Model for Phrase Based Translation \n Systems. \n \"\"\"\n\n def __init__(self, n, aligned_sent_collection):\n '''\n Initialize a new C{LanguageModel} using n-gram model\n \n @param n: n-gram size\n @type n: C{int}\n @param aligned_sent_collection: collection of all C{AlignedSent} in a \n given corpus\n @type aligned_sent_collection: C{AlignedSentCollection}\n '''\n self._ngram = NGramModel(n, aligned_sent_collection)\n self._n = n\n if n > 1:\n self._nminus1gram = NGramModel(n - 1, aligned_sent_collection)\n else:\n self._nminus1gram = None\n\n @property\n def n(self):\n '''\n The order of ngram\n '''\n return self._n\n\n def __getitem__(self, key):\n '''\n Query the language model\n \n @param key: a tuple of the form (e1, e2, e3...)\n @type key: C{tuple}\n @return: P(e1 | e2 e3 ...)\n @rtype: float\n '''\n assert len(key) <= self._n, \\\n \"You cannot query LM with higher order than N:%d\" % (self._n)\n if self._n == 1:\n return self._ngram[key]\n elif self._n == 2 and len(key) == 1:\n return self._nminus1gram[key]\n elif len(key) == self._n:\n return self._ngram[key] / self._nminus1gram[key[0:-1]]\n else:\n return 1\n\nclass NGramModel(defaultdict):\n\n def __init__(self, n, aligned_sent_collect):\n '''\n Initialize a new C{NGramModel} with order n\n \n @param n: n-gram size\n @type n: C{int}\n @param aligned_sent_collect: collection of all C{AlignedSent} in a \n given corpus\n @type aligned_sent_collect: C{AlignedSentCollection}\n '''\n base = super(NGramModel, self)\n base.__init__(float)\n self._n = n\n self._default_value = 0\n num_ngram = 0\n for aligned_sent in aligned_sent_collect.sents:\n for i in xrange(0, len(aligned_sent.words) - n):\n key = tuple(aligned_sent.words[i:i + n])\n base.__setitem__(key, base.__getitem__(key) + 1)\n num_ngram += 1\n b = len(aligned_sent_collect.set_words)\n b = math.pow(b, n)\n denominator = num_ngram + b\n self._default_value = 1.0 / denominator\n for k in self:\n base.__setitem__(k, (1.0 + base.__getitem__(k)) / denominator)\n\n def __getitem__(self, key):\n '''\n Query the ngram model\n \n @param key: a tuple of the form (e1, e2, e3...)\n @type key: C{tuple}\n @return: P(e1 e2 e3 ...)\n @rtype: float\n '''\n if key in self:\n return super(NGramModel, self).__getitem__(key)\n else:\n return self._default_value\n\n @property\n def n(self):\n '''\n The order of ngram\n '''\n return self._n\n\nif __name__ == '__main__':\n pass\n","sub_path":"scratch/issue38/language_model.py","file_name":"language_model.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"263512066","text":"from __future__ import unicode_literals\n\nimport json\n\nfrom django.conf import settings\nfrom django.contrib.auth import REDIRECT_FIELD_NAME\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.http import urlquote\nfrom django.views.generic.base import TemplateView\n\nfrom forms_builder.forms.forms import FormForForm\nfrom forms_builder.forms.models import Form\nfrom forms_builder.forms.settings import EMAIL_FAIL_SILENTLY\nfrom forms_builder.forms.signals import form_invalid, form_valid\nfrom forms_builder.forms.utils import split_choices\n\n\nclass FormDetail(TemplateView):\n\n template_name = \"forms_builder/form_detail.html\"\n\n def get_context_data(self, **kwargs):\n context = super(FormDetail, self).get_context_data(**kwargs)\n published = Form.objects.published(for_user=self.request.user)\n context[\"form\"] = get_object_or_404(published, slug=kwargs[\"slug\"])\n return context\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n def post(self, request, *args, **kwargs):\n published = Form.objects.published(for_user=request.user)\n form = get_object_or_404(published, slug=kwargs[\"slug\"])\n form_for_form = FormForForm(form, RequestContext(request),\n request.POST or None,\n request.FILES or None)\n if not form_for_form.is_valid():\n form_invalid.send(sender=request, form=form_for_form)\n else:\n # Attachments read must occur before model save,\n # or seek() will fail on large uploads.\n attachments = []\n for f in form_for_form.files.values():\n f.seek(0)\n attachments.append((f.name, f.read()))\n entry = form_for_form.save()\n form_valid.send(sender=request, form=form_for_form, entry=entry)\n if not self.request.is_ajax():\n return redirect(form.redirect_url or\n reverse(\"form_sent\", kwargs={\"slug\": form.slug}))\n context = {\"form\": form, \"form_for_form\": form_for_form}\n return self.render_to_response(context)\n\n def render_to_response(self, context, **kwargs):\n if self.request.method == \"POST\" and self.request.is_ajax():\n json_context = json.dumps({\n \"errors\": context[\"form_for_form\"].errors,\n \"form\": context[\"form_for_form\"].as_p(),\n \"message\": context[\"form\"].response,\n \"redirect_url\": context[\"form\"].redirect_url\n })\n return HttpResponse(json_context, content_type=\"application/json\")\n return super(FormDetail, self).render_to_response(context, **kwargs)\n\n\n\nform_detail = FormDetail.as_view()\n\n\ndef form_sent(request, slug, template=\"forms_builder/form_sent.html\"):\n \"\"\"\n Show the response message.\n \"\"\"\n published = Form.objects.published(for_user=request.user)\n context = {\"form\": get_object_or_404(published, slug=slug)}\n return render_to_response(template, context, RequestContext(request))\n","sub_path":"forms_builder/forms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"511267381","text":"import urllib.request\nimport urllib.parse\nimport json, time\nfrom bs4 import BeautifulSoup\nimport csv\n\n\n# doc:https://www.crummy.com/software/BeautifulSoup/bs4/doc.zh/\ndef timeMilli2Date(timeMillis):\n return time.strftime(\"%Y-%m-%d\", time.localtime(timeMillis / 1000))\n\n\ndef collectStockCode(keyword, pages=200):\n url = 'http://gs.amac.org.cn/amac-infodisc/api/pof/manager?rand=0.22292638394801246&page=0&size=%d' % pages\n req = urllib.request.Request(url)\n fakeHeaders(req)\n params = ''\n if keyword == '':\n params = json.dumps({})\n keyword = '总计'\n else:\n params = json.dumps({'keyword': keyword})\n params = bytes(params, 'utf8')\n response = urllib.request.urlopen(req, params)\n result = response.read().decode('utf8')\n full_json = json.loads(result)\n content_arr = full_json[\"content\"]\n printTitle = False\n print('总长度%d' % len(content_arr))\n cvsArrs = []\n counter = 0\n corpName = ''\n for corp in content_arr:\n corpUrl = 'http://gs.amac.org.cn/amac-infodisc/res/pof/manager/%s' % corp[\"url\"]\n try:\n req = urllib.request.Request(corpUrl)\n fakeHeaders(req)\n response = urllib.request.urlopen(req)\n except:\n print('网络连接出现问题,网址:%s' % corpUrl)\n continue\n soup = BeautifulSoup(response.read().decode())\n titles = soup.find_all(attrs={'class': 'td-title'})\n contents = soup.find_all(attrs={'class': 'td-content'})\n firstRow = []\n if not printTitle:\n printTitle = True\n for t in titles:\n if '机构诚信' in t.text:\n continue\n titleStr = t.text.strip().replace(':', '')\n firstRow.append(titleStr)\n cvsArrs.append(firstRow)\n i = 0\n row = []\n for ct in contents:\n if i == 0:\n i += 1\n continue\n ct = ct.text.strip().replace('\\n', '').replace(' ', '').replace('\\t', '')\n if i == 1:\n ct = ct.split('\\xa0')[0]\n corpName = ct\n row.append(ct)\n i += 1\n if i > 19:\n break\n counter += 1\n print('解析完成第%d条,对应公司:%s' % (counter, corpName))\n cvsArrs.append(row)\n write2Csv(cvsArrs, keyword)\n return\n\n\ndef write2Csv(cvsArrs, keyword):\n # 这里一定要加上encoding属性 不然会造成乱码\n # csv 文档 : https://docs.python.org/3/library/csv.html\n with open('%s.csv' % keyword, \"w\", newline=\"\", encoding='utf-8') as datacsv:\n # dialect为打开csv文件的方式,默认是excel,delimiter=\"\\t\"参数指写入的时候的分隔符\n csvWriter = csv.writer(datacsv, dialect=\"excel\")\n for arr in cvsArrs:\n csvWriter.writerow(arr)\n\n\ndef fakeHeaders(req):\n req.add_header('Host', 'gs.amac.org.cn')\n req.add_header('Origin', 'http://gs.amac.org.cn')\n req.add_header('Accept', 'application/json, text/javascript, */*; q=0.01')\n req.add_header('Connection', 'keep-alive')\n req.add_header('X-Requested-With', 'XMLHttpRequest')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Referer', 'http://gs.amac.org.cn/amac-infodisc/res/pof/manager/index.html')\n req.add_header('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36')\n # req.add_header('Accept-Encoding', 'gzip, deflate')\n # 上面这句如果加上会导致服务器返回gzip内容\n req.add_header('Accept-Language', 'zh-CN,zh;q=0.8,en;q=0.6')\n\n\n'''\n{\n \"id\": \"101000000739\",\n \"managerName\": \"广州越秀产业投资基金管理股份有限公司\",\n \"artificialPersonName\": \"王恕慧\",\n \"registerNo\": \"P1000696\",\n \"establishDate\": 1312156800000,\n \"managerHasProduct\": null,\n \"url\": \"101000000739.html\",\n \"registerDate\": 1396310400000,\n \"registerAddress\": \"广东省广州市天河区珠江西路5号广州国际金融中心63层\",\n \"registerProvince\": \"广东省\",\n \"registerCity\": \"广州市\",\n \"regAdrAgg\": \"广东省\",\n \"fundCount\": 17,\n \"fundScale\": 1755326.2139,\n \"paidInCapital\": 1220161.2039,\n \"subscribedCapital\": 2163347.2139,\n \"hasSpecialTips\": false,\n \"inBlacklist\": false,\n \"hasCreditTips\": false,\n \"regCoordinate\": \"23.123886896807683,113.32937730581973\",\n \"officeCoordinate\": \"23.123886896807683,113.32937730581973\",\n \"officeAddress\": \"广东省广州市天河区珠江西路5号广州国际金融中心主塔写字楼第63层01-A、E单元\",\n \"officeProvince\": \"广东省\",\n \"officeCity\": \"广州市\",\n \"primaryInvestType\": null\n}\n'''\n\n'''POST HTTP/1.1\nPOST /amac-infodisc/api/pof/manager?rand=0.22282638394801246&page=0&size=20 HTTP/1.1\nHost: gs.amac.org.cn\nConnection: keep-alive\nContent-Length: 26\nAccept: application/json, text/javascript, */*; q=0.01\nOrigin: http://gs.amac.org.cn\nX-Requested-With: XMLHttpRequest\nUser-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36\nContent-Type: application/json\nReferer: http://gs.amac.org.cn/amac-infodisc/res/pof/manager/index.html\nAccept-Encoding: gzip, deflate\nAccept-Language: zh-CN,zh;q=0.8,en;q=0.6\n'''\n\n#collectStockCode('股权投资', 2350)\ncollectStockCode('', 25)\n# collectStockCode('产业投资')\n# with open(\"infos.csv\", \"w\", newline=\"\",encoding='utf-8') as datacsv:\n# # dialect为打开csv文件的方式,默认是excel,delimiter=\"\\t\"参数指写入的时候的分隔符\n# csvwriter = csv.writer(datacsv, dialect=(\"excel\"))\n# # csv文件插入一行数据,把下面列表中的每一项放入一个单元格(可以用循环插入多行)\n# csvwriter.writerow(['你好', '再见'])\n","sub_path":"corpInfo.py","file_name":"corpInfo.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"362938890","text":"# -*- coding: utf-8 -*-\n# List 7-16 ケンドールのτをscipy.stats.kendalltauによって計算するプログラム例\n\n# 分割表からデータを生成し、それをscipy.stats.kendalltauによって計算する例\nimport numpy as np\nimport scipy.stats as st\n\nd = np.array([ [ 91 , 284 , 22 ],\n[ 35 , 106 , 6 ],\n[ 52 , 55 , 10 ] ] )\n# dの分割表に従うデータを生成するループ\nz = [[[i,j]]*(d[i,j]) for i in range(3) for j in range(3)]\n# zを2重リストから平らなリストに変換する\ntdata = []\nfor v in z:\n tdata.extend(v)\n# kendalltauの入力はxの値のベクトルとyの値のベクトルなのでそれに合わせる\nx = [u[0] for u in tdata]\ny = [u[1] for u in tdata]\n# kencalltauを呼び出す。結果はtauとp値が返る\ntau, p_value = st.kendalltau(x, y)\nprint('tau', tau.round(4), 'p値', p_value)\n# 出力結果は\n# tau -0.1101 p値 2.29971067849e-05\n","sub_path":"py/山内 テキスト text sample code/ohm/ch7/list7-16.py","file_name":"list7-16.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"360060681","text":"import numpy as np\nimport glob\nfrom scipy.interpolate import interp1d\n\ntrans_data = np.loadtxt('plot_0.5.data')\ndata = np.load('trans_pntg_rms.npy')\nfreqs = np.loadtxt('freq.out')\n\nff = interp1d(data[:,0], data[:,1])\n\nxx = freqs\n\nsig_120 = ff(xx)\n\nsig_x = np.loadtxt('sigma_x.out')\n\nt_x = ((sig_120/sig_x)**2)*120\n\nnp.savetxt('t_x_pntg.out', t_x)","sub_path":"codes/interp_norm_to_archive.py","file_name":"interp_norm_to_archive.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"75179608","text":"\" pyradio -- Console radio player. \"\n\nversion_info = (0, 8, 8, 2)\n\n# Application state:\n# New stable version: ''\n# Beta version: 'betax', x=1,2,3...\n# RC version: 'RCx', x=1,23...\napp_state = ''\n\n__version__ = version = '.'.join(map(str, version_info))\n__project__ = __name__\n__author__ = \"Ben Dowling\"\n__license__ = \"MIT\"\n","sub_path":"pyradio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"80579390","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 8 17:38:36 2018\n\n@author: test\n\"\"\"\nimport os\nimport sys\nimport multiprocessing\nsys.path.append(os.path.abspath(\"/Users/janhavisavla/Desktop/Jio_cloud/\"))\nimport videocode\nfrom multiprocessing import Queue, Process, cpu_count\n\n\ndef myMultiprocessing(root_folder):\n '''\n Splits the source filelist into sublists according to the number of CPU cores and provides multiprocessing of them.\n '''\n files = os.listdir(root_folder)\n #q = Queue()\n procs = []\n for k in range(0,4):\n # Split the source filelist into several sublists.\n lst = [files[j] for j in range(1, len(files)) if j % 4 == k]\n print(lst)\n \n if len(lst)>0:\n p = Process(target=videocode.main, args=(lst, root_folder))\n p.start()\n procs += [p]\n \n #all_results = []\n p.join()\n #for i in range(0, len(procs)):\n # Save all results from the queue.\n #while(q):\n #all_results += q.get()\n\n \nroot_folder = \"/Users/janhavisavla/Desktop/Image_test_data/Parent_dir/\"\n\nif __name__ == \"__main__\":\n myMultiprocessing(root_folder)\n","sub_path":"multiproccess.py","file_name":"multiproccess.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"108117290","text":"import dash_html_components as html\nimport dash_core_components as dcc\n\nside_chart = html.Div(children=[\n dcc.Store(id= 'index-score', data=[0,1,2]),\n dcc.Store(id= 'index-county', data = 0),\n dcc.Store(id='total-ctys', data=50),\n dcc.Store(id='cty-distance', data= 50),\n # html.Pre(id='test'),\n html.Div([\n html.I(id='prev-county', className=\"fa fa-caret-left\", **{'aria-hidden': 'true'}, style = {'display':'inline-block', 'font-size': '15px', 'margin-right': '8px'}),\n html.P(id= 'count-county' ,#children='1 of 50',\n style= {'display':'inline-block', 'textOverflow':'ellipsis', 'fontSize': '13px'}),\n html.I(id='next-county', className= \"fa fa-caret-right\", **{'aria-hidden': 'true'}, style={'display':'inline-block', 'font-size': '15px', 'margin-left': '8px'})\n ], style= {'textAlign': 'center', 'position':'static'}),\n html.Div(id= \"chart_num\", children =[]\n #html.H4(children='County Score: Severe COVID Case Complications'),\n #generate_table(full_datasets25[criteria[0]])\n , style = {'position':'static','display':'grid'}),\n html.Div(id= \"choose_score\", children = [\n html.I(id='prev-score', className=\"fa fa-caret-left\", **{'aria-hidden': 'true'}, style = {'display':'inline-block', 'fontSize': '16px','marginRight': '10px', 'marginTop':'5px'}),\n html.Div(html.P(id= 'count-score', #children='Severe COVID Case Complications'\n ), style = {'width': '100%'}), #style = {'width': 150, 'position':'static', 'textOverflow':'ellipsis'}, \n html.I(id='next-score', className=\"fa fa-caret-right\", **{'aria-hidden': 'true'}, style={'display':'inline-block', 'fontSize': '16px', 'marginLeft': '10px',\n 'marginTop':'5px'})\n ],style={'textAlign': 'center', 'position':'static', 'display':'flex', 'marginTop':'20px'})#'display':'flex', 'margin-top':55,'margin-left':30, 'position':'static'}) #'position':'static', 'text-align':'center'})\n ],\n style = {\n #'label':'no legend',\n 'width':'20%', 'maxHeight':'300px', #'maxHeight':400, \n #'overflowY': 'scroll', \n #'margin-left': 10,\n 'overflow':'scroll',\n 'border': '5px solid gray', #'margin': 10,\n 'display': 'grid', \n #'position':'static', \n 'padding':'5px'\n })\n\n\n","sub_path":"AllComponents/side_chart.py","file_name":"side_chart.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"350246377","text":"from setuptools import find_packages, setup\nimport pathlib\n\nHERE = pathlib.Path(__file__).parent\n\nREADME = ( HERE / \"README.md\").read_text() \nsetup(\n name='AutoFeedback',\n packages=find_packages(),\n install_requires=[\n 'matplotlib',\n 'numpy'],\n version='0.1.5',\n description='check basic python exercises with pretty feedback',\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/abrown41/AutoFeedback\",\n author='Andrew Brown',\n author_email=\"andrew.brown@qub.ac.uk\",\n license='MIT',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"331453326","text":"import argparse\nimport os\nimport inflect\nimport matplotlib.pyplot as plt\nimport IPython.display as ipd\nfrom tacotron2_model import Tacotron2\nimport torch\nimport numpy as np\nimport sys\nimport matplotlib\n\nsys.path.append(\"synthesis/\")\nmatplotlib.use(\"Agg\")\n\nfrom training.clean_text import clean_text\n\n\nSYMBOLS = \"_-!'(),.:;? ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\nSYMBOL_TO_ID = {s: i for i, s in enumerate(SYMBOLS)}\n\n\ndef load_model(model_path):\n if torch.cuda.is_available():\n model = Tacotron2().cuda()\n model.load_state_dict(torch.load(model_path)[\"state_dict\"])\n _ = model.cuda().eval().half()\n else:\n model = Tacotron2()\n model.load_state_dict(torch.load(model_path, map_location=torch.device(\"cpu\"))[\"state_dict\"])\n return model\n\n\ndef load_waveglow(waveglow_path):\n waveglow = torch.load(waveglow_path)[\"model\"]\n if torch.cuda.is_available():\n waveglow.cuda().eval().half()\n\n for k in waveglow.convinv:\n k.float()\n return waveglow\n\n\ndef generate_graph(alignments, filepath):\n data = alignments.float().data.cpu().numpy()[0].T\n plt.imshow(data, aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.savefig(filepath)\n\n\ndef generate_audio(mel, waveglow, filepath, sample_rate=22050):\n with torch.no_grad():\n audio = waveglow.infer(mel, sigma=0.666)\n\n audio = audio[0].data.cpu().numpy()\n audio = ipd.Audio(audio, rate=sample_rate)\n with open(filepath, \"wb\") as f:\n f.write(audio.data)\n\n\ndef text_to_sequence(text):\n sequence = np.array([[SYMBOL_TO_ID[s] for s in text if s in SYMBOL_TO_ID]])\n if torch.cuda.is_available():\n return torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long()\n else:\n return torch.autograd.Variable(torch.from_numpy(sequence)).cpu().long()\n\n\ndef synthesize(model, waveglow_model, text, inflect_engine, graph=None, audio=None):\n text = clean_text(text, inflect_engine)\n sequence = text_to_sequence(text)\n _, mel_outputs_postnet, _, alignments = model.inference(sequence)\n\n if graph:\n generate_graph(alignments, graph)\n\n if audio:\n generate_audio(mel_outputs_postnet, waveglow_model, audio)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--model_path\", type=str, help=\"tacotron2 model path\", required=True)\n parser.add_argument(\"-w\", \"--waveglow_model_path\", type=str, help=\"waveglow model path\", required=True)\n parser.add_argument(\"-t\", \"--text\", type=str, help=\"text to synthesize\", required=True)\n parser.add_argument(\"-g\", \"--graph_output_path\", type=str, help=\"path to save alignment graph to\", required=False)\n parser.add_argument(\"-a\", \"--audio_output_path\", type=str, help=\"path to save output audio to\", required=False)\n args = parser.parse_args()\n\n assert os.path.isfile(args.model_path), \"Model not found\"\n assert os.path.isfile(args.waveglow_model_path), \"Waveglow model not found\"\n\n model = load_model(args.model_path)\n waveglow_model = load_waveglow(args.waveglow_path)\n inflect_engine = inflect.engine()\n\n synthesize(\n model,\n waveglow_model,\n args.text,\n inflect_engine,\n args.graph_output_path,\n args.audio_output_path,\n )\n","sub_path":"synthesis/synthesize.py","file_name":"synthesize.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"183384376","text":"# import the source{d} engine\nfrom sourced.engine import Engine\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\n\n# start a new session\nspark = SparkSession.builder \\\n .master(\"local[*]\").appName(\"Examples\") \\\n .getOrCreate()\n\nengine = Engine(spark, \"/repositories\")\n\n# get all the files of all head commits\nhead_files = engine.repositories.filter(\"is_fork = false\") \\\n .references \\\n .head_ref.commits.first_reference_commit \\\n .files \\\n .classify_languages() \\\n .filter(\"is_binary = false\") \\\n .select(\"file_hash\", \"path\", \"content\", \"lang\") \\\n .filter(\"lang is not null\")\n\n# shows top languages per number of files\ntop_ten_langs = head_files.distinct() \\\n .groupBy(\"lang\").agg(count(\"*\").alias(\"count\")) \\\n .orderBy(\"count\").sort(desc(\"count\")).limit(10) \\\n .show()\n","sub_path":"hugo/data/code/engine-2.py","file_name":"engine-2.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"310390001","text":"\nfrom turtle import *\nimport time\nimport random\nfrom ball import Ball\ntracer(0)\nlife_counter=3\n# hideturtle()\nRUNNING=True\nSLEEP=0.0077\nSCREEN_WIDTH=getcanvas().winfo_width()/2\nSCREEN_HEIGHT=getcanvas().winfo_height()/2\nregister_shape(\"ballimage.gif\")\ncolor(\"pink\")\n# dot(1000)\nwrite(\"Agario\", align=\"center\", font=(\"Arial\",150))\n\n\n\n\nmy_ball=Ball(100,0,0,0,20,\"black\")\nNUMBER_OF_BALLS = 5\nMINIMUM_BALL_RADIUS = 10\nMAXIMUM_BALL_RADIUS = 35\nMINIMUM_BALL_DX = -2\nMAXIMUM_BALL_DX = 2\nMINIMUM_BALL_DY = -2\nMAXIMUM_BALL_DY = 2\t\nBALLS=[]\n#life\nshowturtle()\nlife= clone()\nlife.shape(\"ballimage.gif\")\nlife.penup()\nlife.goto(100,200)\nlife2=life.clone()\nlife2.goto(150,200)\nlife3=life.clone()\nlife3.goto(200,200)\nhideturtle()\ngetscreen().update()\n\nfor i in range(NUMBER_OF_BALLS):\n\n\tx=random.randint(int(-SCREEN_WIDTH) + int(MAXIMUM_BALL_RADIUS), int(SCREEN_WIDTH) - int(MAXIMUM_BALL_RADIUS))\n\ty=random.randint(int(-SCREEN_WIDTH) + int(MAXIMUM_BALL_RADIUS), int(SCREEN_WIDTH) - int(MAXIMUM_BALL_RADIUS))\n\tdx=random.randint(int(MINIMUM_BALL_DX ),int (MAXIMUM_BALL_DX))\n\tdy=random.randint(int(MINIMUM_BALL_DY ),int (MAXIMUM_BALL_DY))\n\tr=random.randint(int(MINIMUM_BALL_RADIUS),int(MAXIMUM_BALL_RADIUS))\n\tcolor = (random.randint(0,255),random.randint(0,255), random.randint(0,255))\n\n\twhile dx==0:\n\t\tdx=random.randint(int(MINIMUM_BALL_DX ),int (MAXIMUM_BALL_DX))\n\twhile dy==0:\n\t\tdy=random.randint(int(MINIMUM_BALL_DY ),int (MAXIMUM_BALL_DY))\n\n\n\tNEW_BALL=Ball(x,y,dx,dy,r,color)\n\tBALLS.append(NEW_BALL)\n\n\n\ndef move_all_balls():\n\tfor i in BALLS:\n\t\ti.move(SCREEN_WIDTH,SCREEN_HEIGHT)\n\n\n\ndef collide(ball_a,ball_b):\n\tif ball_a==ball_b:\n\t\treturn False\n\tb1y=ball_a.ycor()\n\tb2y=ball_b.ycor()\n\tb1x=ball_a.xcor()\n\tb2x=ball_b.xcor()\n\tb1r=ball_a.r\n\tb2r=ball_b.r\n\tsr=b1r+b2r\n\td=((b2x-b1x)**2+(b2y-b1y)**2)**0.5\n\tif d<=sr:\n\t\treturn True\n\telse:\n\t\treturn False\ndef check_all_balls_collision():\n\tfor ball_a in BALLS:\n\t\tfor ball_b in BALLS:\n\t\t\tif collide(ball_a,ball_b)== True:\n\n\t\t\t\tradius1=ball_a.r\n\t\t\t\tradius2=ball_b.r\n\n\t\t\t\tif radius1>radius2:\n\t\t\t\t\tball_a.r=radius1+1\n\n\t\t\t\t\tx=random.randint(int(-SCREEN_WIDTH) + int(MAXIMUM_BALL_RADIUS), int(SCREEN_WIDTH) - int(MAXIMUM_BALL_RADIUS))\n\t\t\t\t\ty=random.randint(int(-SCREEN_WIDTH) + int(MAXIMUM_BALL_RADIUS), int(SCREEN_WIDTH) - int(MAXIMUM_BALL_RADIUS))\n\t\t\t\t\tdx=random.randint(int(MINIMUM_BALL_DX ),int (MAXIMUM_BALL_DX))\n\t\t\t\t\tdy=random.randint(int(MINIMUM_BALL_DY ),int (MAXIMUM_BALL_DY))\n\t\t\t\t\tr=random.randint(int(MINIMUM_BALL_RADIUS),int(MAXIMUM_BALL_RADIUS))\n\t\t\t\t\tcolor = (random.randint(0,255),random.randint(0,255), random.randint(0,255))\n\n\n\t\t\t\t\twhile dx==0:\n\t\t\t\t\t\tdx=random.randint(int(MINIMUM_BALL_DX ),int (MAXIMUM_BALL_DX))\n\t\t\t\t\twhile dy==0:\n\t\t\t\t\t\tdy=random.randint(int(MINIMUM_BALL_DY ),int (MAXIMUM_BALL_DY))\n\n\n\t\t\t\t\tball_b.goto(x,y)\n\t\t\t\t\tball_b.dx=dx\n\t\t\t\t\tball_b.dy=dy\n\t\t\t\t\tball_b.r=r\n\t\t\t\t\tball_b.color(color)\n\t\t\t\t\tball_b.shapesize(r/10)\n\t\t\t\t\tball_a.shapesize(ball_a.r/10)\n\n\t\t\t\tif radius1\", movearound)\ngetscreen().listen()\n\ndef score():\n\t\n\tscore_turtle=Turtle()\n\tscore_turtle.hideturtle()\n\tscore_turtle.clear()\n\tscore_turtle.pu()\n\tscore_turtle.color(\"black\")\n\tscore_turtle.goto(-200, 200)\n\tscore_turtle.write(\"Score: \" + str((my_ball.r - 20)+1), font=(\"Arial\", 16, \"normal\"))\n\tscore_turtle.clear()\n\t\n\t\n\nwhile RUNNING == True:\n\tif SCREEN_WIDTH != getcanvas().winfo_width()/2 or SCREEN_HEIGHT!=getcanvas().winfo_height()/2:\n\t\tSCREEN_WIDTH = getcanvas().winfo_width()/2 \n\t\tSCREEN_HEIGHT=getcanvas().winfo_height()/2\n\tmove_all_balls()\n\tif check_myball_collision()==False:\n\t\tif life_counter==3:\n\t\t\tlife.hideturtle()\n\t\t\tlife_counter-=1\n\t\tif life_counter==2:\n\t\t\tlife2.hideturtle()\n\t\t\tlife_counter-=1\n\t\tif life_counter==1:\n\t\t\tlife3.hideturtle()\n\t\t\tlife_counter-=1\n\t\t\tRUNNING=False\n\tcheck_all_balls_collision()\n\t# my_ball.move(SCREEN_WIDTH,SCREEN_HEIGHT)\n\tRUNNING = check_myball_collision()\n\tupdate()\n\ttime.sleep(SLEEP)\n\tscore()\n\n\nif RUNNING==False:\n\n\twrite(\"GAME OVER\",align=\"center\",font=(\"Arial\",50))\n\n\n\nmainloop()","sub_path":"ballfinale.py","file_name":"ballfinale.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"495226233","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 12 12:41:19 2021\n\n@author: Henao\n\"\"\"\nimport math \ndef area_triangulo(s1: float, s2: float, s3: float)->float:\n '''Parameters\n ----------\n s1 : float\n Primer Lado del triangulo\n s2 : float\n Segundo Lado del triangulo\n s3 : float\n Tercer Lado del triangulo\n\n Returns\n ----------\n float\n \n El area del triangulo a partir del subperimetro s = (s1+s2+s3)/2 \n '''\n s=(s1+s2+s3)/2\n area = round(math.sqrt(s * (s-s1) * (s-s2) * (s-s3)),1)\n return area\n\ns1 = float(input(\"Escriba el valor para el primer lado: \"))\ns2 = float(input(\"Escriba el valor para el segundo lado: \"))\ns3 = float(input(\"Escriba el valor para el tercer lado: \"))\n\nprint(\"El area del triangulo es: \" , area_triangulo(s1,s2,s3))\n\n","sub_path":"area_triangulo.py","file_name":"area_triangulo.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"26800303","text":"# Copyright DataStax, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom decimal import Decimal\nimport os\nimport random\nimport unittest\n\nfrom cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT\nfrom cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \\\n WhiteListRoundRobinPolicy, ColDesc, AES256ColumnEncryptionPolicy, AES256_KEY_SIZE_BYTES\nfrom cassandra.pool import Host\nfrom cassandra.connection import DefaultEndPoint\n\nfrom tests.integration import local, use_singledc, TestCluster\n\nfrom concurrent.futures import wait as wait_futures\n\n\ndef setup_module():\n use_singledc()\n\n\nclass HostFilterPolicyTests(unittest.TestCase):\n\n def test_predicate_changes(self):\n \"\"\"\n Test to validate host filter reacts correctly when the predicate return\n a different subset of the hosts\n HostFilterPolicy\n @since 3.8\n @jira_ticket PYTHON-961\n @expected_result the excluded hosts are ignored\n\n @test_category policy\n \"\"\"\n external_event = True\n contact_point = DefaultEndPoint(\"127.0.0.1\")\n\n single_host = {Host(contact_point, SimpleConvictionPolicy)}\n all_hosts = {Host(DefaultEndPoint(\"127.0.0.{}\".format(i)), SimpleConvictionPolicy) for i in (1, 2, 3)}\n\n predicate = lambda host: host.endpoint == contact_point if external_event else True\n hfp = ExecutionProfile(\n load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), predicate=predicate)\n )\n cluster = TestCluster(contact_points=(contact_point,), execution_profiles={EXEC_PROFILE_DEFAULT: hfp},\n topology_event_refresh_window=0,\n status_event_refresh_window=0)\n session = cluster.connect(wait_for_all_pools=True)\n\n queried_hosts = set()\n for _ in range(10):\n response = session.execute(\"SELECT * from system.local\")\n queried_hosts.update(response.response_future.attempted_hosts)\n\n self.assertEqual(queried_hosts, single_host)\n\n external_event = False\n futures = session.update_created_pools()\n wait_futures(futures, timeout=cluster.connect_timeout)\n\n queried_hosts = set()\n for _ in range(10):\n response = session.execute(\"SELECT * from system.local\")\n queried_hosts.update(response.response_future.attempted_hosts)\n self.assertEqual(queried_hosts, all_hosts)\n\n\nclass WhiteListRoundRobinPolicyTests(unittest.TestCase):\n\n @local\n def test_only_connects_to_subset(self):\n only_connect_hosts = {\"127.0.0.1\", \"127.0.0.2\"}\n white_list = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(only_connect_hosts))\n cluster = TestCluster(execution_profiles={\"white_list\": white_list})\n #cluster = Cluster(load_balancing_policy=WhiteListRoundRobinPolicy(only_connect_hosts))\n session = cluster.connect(wait_for_all_pools=True)\n queried_hosts = set()\n for _ in range(10):\n response = session.execute('SELECT * from system.local', execution_profile=\"white_list\")\n queried_hosts.update(response.response_future.attempted_hosts)\n queried_hosts = set(host.address for host in queried_hosts)\n self.assertEqual(queried_hosts, only_connect_hosts)\n\nclass ColumnEncryptionPolicyTest(unittest.TestCase):\n\n def _recreate_keyspace(self, session):\n session.execute(\"drop keyspace if exists foo\")\n session.execute(\"CREATE KEYSPACE foo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}\")\n session.execute(\"CREATE TABLE foo.bar(encrypted blob, unencrypted int, primary key(unencrypted))\")\n\n def test_end_to_end_prepared(self):\n\n # We only currently perform testing on a single type/expected value pair since CLE functionality is essentially\n # independent of the underlying type. We intercept data after it's been encoded when it's going out and before it's\n # encoded when coming back; the actual types of the data involved don't impact us.\n expected = 12345\n expected_type = \"int\"\n\n key = os.urandom(AES256_KEY_SIZE_BYTES)\n cl_policy = AES256ColumnEncryptionPolicy()\n col_desc = ColDesc('foo','bar','encrypted')\n cl_policy.add_column(col_desc, key, expected_type)\n\n cluster = TestCluster(column_encryption_policy=cl_policy)\n session = cluster.connect()\n self._recreate_keyspace(session)\n\n prepared = session.prepare(\"insert into foo.bar (encrypted, unencrypted) values (?,?)\")\n session.execute(prepared, (expected,expected))\n\n # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted\n # values here to confirm that we don't interfere with regular processing of unencrypted vals.\n (encrypted,unencrypted) = session.execute(\"select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering\", (expected,)).one()\n self.assertEquals(expected, encrypted)\n self.assertEquals(expected, unencrypted)\n\n # Confirm the same behaviour from a subsequent prepared statement as well\n prepared = session.prepare(\"select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering\")\n (encrypted,unencrypted) = session.execute(prepared, [expected]).one()\n self.assertEquals(expected, encrypted)\n self.assertEquals(expected, unencrypted)\n\n def test_end_to_end_simple(self):\n\n expected = 67890\n expected_type = \"int\"\n\n key = os.urandom(AES256_KEY_SIZE_BYTES)\n cl_policy = AES256ColumnEncryptionPolicy()\n col_desc = ColDesc('foo','bar','encrypted')\n cl_policy.add_column(col_desc, key, expected_type)\n\n cluster = TestCluster(column_encryption_policy=cl_policy)\n session = cluster.connect()\n self._recreate_keyspace(session)\n\n # Use encode_and_encrypt helper function to populate date\n session.execute(\"insert into foo.bar (encrypted, unencrypted) values (%s,%s)\",(cl_policy.encode_and_encrypt(col_desc, expected), expected))\n\n # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted\n # values here to confirm that we don't interfere with regular processing of unencrypted vals.\n (encrypted,unencrypted) = session.execute(\"select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering\", (expected,)).one()\n self.assertEquals(expected, encrypted)\n self.assertEquals(expected, unencrypted)\n\n # Confirm the same behaviour from a subsequent prepared statement as well\n prepared = session.prepare(\"select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering\")\n (encrypted,unencrypted) = session.execute(prepared, [expected]).one()\n self.assertEquals(expected, encrypted)\n self.assertEquals(expected, unencrypted)\n","sub_path":"tests/integration/standard/test_policies.py","file_name":"test_policies.py","file_ext":"py","file_size_in_byte":7532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"425897956","text":"\nimport http.server\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport logging\nimport socketserver\n\n\nclass SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n\n def _set_response(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n\n def do_GET(self):\n \"\"\"Respond to a GET request.\"\"\"\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(\"Title goes here.\")\n self.wfile.write(\"

This is a test.

\")\n # If someone went to \"http://something.somewhere.net/foo/bar/\",\n # then s.path equals \"/foo/bar/\".\n self.wfile.write(\"

You accessed path: %s

\" % self.path)\n self.wfile.write(\"\")\n \ndef main():\n try:\n PORT = 8000\n\n Handler = http.server.SimpleHTTPRequestHandler\n\n with socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n print(\"serving at port\", PORT)\n httpd.serve_forever()\n\n except KeyboardInterrupt:\n print (\" ^C entered, stopping web server....\")\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Comprimir/comprimirOne.py","file_name":"comprimirOne.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"109311426","text":"import time\r\n\r\n\"\"\" Ejercicio 1\r\n La función max() del ejercicio 1 (primera parte) y la función max_de_tres() del ejercicio 2 (primera parte),\r\n solo van a funcionar para 2 o 3 números. Supongamos que tenemos mas de 3 números o no sabemos cuantos números son. \r\n Escribir una función max_in_list() que tome una lista de números y devuelva el mas grande. \"\"\"\r\n\r\ndef max_in_list(lista):\r\n \treturn max(lista)\r\n\r\n\r\nprint(max_in_list([2,6,3,9,4,1,5,11,45,2,55,1,98,3,4,2]))\r\n\r\n\r\n\r\n\"\"\" Ejercicio 2\r\n Escribir una función mas_larga() que tome una lista de palabras y devuelva la mas larga. \"\"\"\r\n\r\ndef mas_larga(palabras):\r\n \tlista=[]\r\n\t\r\n \tfor i in palabras:\r\n \t\t lista.append(len(i))\r\n \tmaxim= max(lista)\r\n \tfor a in palabras:\r\n \t\tif maxim is len(a):\r\n \t\t\treturn a\r\n\t\r\n\t\r\nprint(mas_larga([\"hola\",\"mi\",\"marios\",\"cacacolapituperro\",\"escalada\",\"motocicleta\",\"El rey en el Norte\",\"candela\"]))\r\n\r\n\r\n\"\"\" Ejercicio 3\r\n Escribir una función filtrar_palabras() que tome una lista de palabras y un entero n, y devuelva las palabras que tengan mas de n caracteres. \r\n \"\"\"\r\n\"\"\"\r\ndef filtar_palabras(palabras,n):\r\n\tlista=[]\r\n \t\r\n \tfor i in palabras:\r\n \t\tif len(i) > n:\r\n \t\t\tlista.append(i)\r\n\t\r\n\treturn lista\r\n\t\t\r\nnum=int(input(\"Introduce un número: \"))\r\nprint(filtar_palabras([\"calamar\",\"hola\",\"mi\",\"marios\",\"cacacolapituperro\"],num))\r\n\"\"\"\r\n\"\"\"Ejercicio 4\r\nEscribir un programa que le diga al usuario que ingrese una cadena.\r\nEl programa tiene que evaluar la cadena y decir cuantas letras mayúsculas tiene. \"\"\"\r\n\r\ndef mayus_Cadena(cadena):\r\n \tmayus=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','Ñ','O','P','Q','R','S','T','U','V','W','Y','Z']\r\n \tcount=0\r\n \tfor i in range(len(cadena)):\r\n \t\tfor j in range(len(mayus)):\r\n \t\t\tif cadena[i] is mayus[j]:\r\n \t\t\t\tcount+=1\r\n\r\n \treturn count\r\n\r\n\r\ncade= input(\"Introduzca una cadena de carateres: \")\r\nprint(mayus_Cadena(cade))\r\n\r\n\"\"\" Ejercicio 5\r\nConstruir un pequeño programa que convierta números binarios en enteros. \"\"\"\r\n\r\ndef bin_int(bina):\r\n \tnum=int(bina,2)\r\n \treturn num\r\n\r\nbinario=input(\"Introduzca un numero binario: \")\r\nprint(bin_int(binario))\r\n\r\n\"\"\"Ejercicio 6\r\nEscribir un pequeño programa donde:\r\n- Se ingresa el año en curso.\r\n- Se ingresa el nombre y el año de nacimiento de tres personas.\r\n- Se calcula cuántos años cumplirán durante el año en curso.\r\n- Se imprime en pantalla. \"\"\"\r\n\r\n\r\ndef año_cumple():\r\n\ttiempo = time.gmtime()\r\n\tsegundos_dia=tiempo.tm_hour*3600 +tiempo.tm_min*60 + tiempo.tm_sec\r\n\r\n\tfor i in range(3):\r\n\t\tprint(\"Persona {} \\n\".format(i+1))\r\n\t\tnombre=input(\"Cual es tu nombre?: \")\r\n\t\tdata=int(input(\"En que año naciste, {}?: \".format(nombre)))\r\n\t\tmes= int(input(\"En que mes?(en número): \"))\r\n\t\tif mes> tiempo.tm_mon:\r\n\t\t\tprint(\"{}, tienes {} años ahora mismo\".format(nombre,tiempo.tm_year-data-1))\r\n\t\t\tprint(\"De hecho existes en este mundo desde hace {} segundos. \".format(((tiempo.tm_year-data-1)*8760*3600)+segundos_dia))\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tprint(\"{}, tienes {} años ahora mismo\".format(nombre,tiempo.tm_year-data))\r\n\t\t\tprint(\"De hecho existes en este mundo desde hace {} segundos. \".format(((tiempo.tm_year-data)*8760*3600)+segundos_dia))\r\n\t\tprint(\"\\n---------------------------------------------------------------------------------------------\\n\")\t\r\n\r\naño_cumple()\r\n\r\n\r\n\r\n\"\"\"Ejercicio 7\r\nDefinir una tupla con 10 edades de personas. Imprimir la cantidad de personas con edades superiores a 20.\r\nPuedes variar el ejercicio para que sea el usuario quien ingrese las edades.\"\"\"\r\n\r\ndef tupla_edad(tupla):\r\n\tcount=0\r\n\tprint(\"\\n Las personas con una edad superior a 20 son: \\n\")\r\n\tfor i in range(len(tupla)):\r\n\t\tif tupla[i] > 20:\r\n\t\t\tprint(tupla[i])\r\n\t\t\tcount+=1\r\n\tprint(\"\\n El numero de personas con una edad superior a 20 son: \\n\")\r\n\treturn count\r\n\t\r\n\t\r\n\r\nedades=(10,30,54,32,12,76,98,45,20,65)\r\nprint(tupla_edad(edades))\r\n\r\n\t\t\r\n\"\"\" Ejercicio 8\r\nDefinir una lista con un conjunto de nombres, imprimir la cantidad de comienzan con la letra a.\r\nTambién se puede hacer elegir al usuario la letra a buscar. (Un poco mas emocionante) \"\"\"\r\n\r\n\r\n\r\ndef comienzo_nombre(nombres):\r\n\t\r\n\tfor i in range(len(nombres)):\r\n\t\tinicial(nombres[i])\r\n\r\n\t\r\ndef inicial(nom):\r\n\r\n\r\n\tfor j in range(1):\r\n\t\tif nom[0] == \"a\":\r\n\t\t\tprint(nom,\" empieza por a \\n\")\r\n\t\t\t\r\n\t\r\nnombre=['macario','lorenzo','anacardo','ana','laurentino','samael','andres']\r\ncomienzo_nombre(nombre)\r\n\r\n\r\n\"\"\" Ejercicio 10\r\nEscriba una función es_bisiesto() que determine si un año determinado es un año bisiesto.Un año bisiesto es divisible por 4, pero no por 100. También es divisible por 400\"\"\"\r\n\r\ndef bisiesto(año):\r\n\tif año %4 is 0:\r\n\t\tif año %100 is not 0:\r\n\t\t\tprint(\"es bisiesto \")\r\n\t\tif año %100 is 0 and año %400 is 0:\r\n\t\t\tprint(\"es bisiesto total, con el error gregoriano corregido \")\r\n\telse:\r\n\t\tprint(\"no es bisiesto\")\r\n\r\nano=int(input(\"Introduce un año: \"))\r\nbisiesto(ano)\t\r\n\r\n\r\n\r\n","sub_path":"ejericios_parte2.py","file_name":"ejericios_parte2.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"242052054","text":"\"\"\"\n 给定一个字符串数组,将字母异位词组合在一起。字母异位词指字母相同,但排列不同的字符串。\n\n 示例:\n 输入: [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"],\n 输出:\n [\n [\"ate\",\"eat\",\"tea\"],\n [\"nat\",\"tan\"],\n [\"bat\"]\n ]\n\n 说明:\n 所有输入均为小写字母。\n 不考虑答案输出的顺序。\n\"\"\"\nfrom collections import defaultdict\nfrom typing import List\n\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n res = defaultdict(list)\n for s in strs:\n count = [0] * 26\n for s_one in s:\n count[ord(s_one) - ord('a')] += 1\n key = tuple(count)\n res[key].append(s)\n return list(res.values())\n","sub_path":"algorithm/LeetCode_49_字母异位词分组.py","file_name":"LeetCode_49_字母异位词分组.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"326795305","text":"import random\n\n# #todo 1. Создать список, в котором каждый элемент – кортеж из двух чисел. Отсортировать данный список по\n# #todo убыванию вторых элементов кортежей.\n#\n#\n# def set_2():\n# sett_list = [(1, 0), (2, 4), (3, 2), (4, 9)]\n# print(sett_list)\n# # sett_list = [(i+1, i**2) for i in range(4)]\n#\n# s = sorted(sett_list, key=lambda i: i[1], reverse=True)\n# return s\n# print(set_2())\n#\n\n\n# # todo 2. Отсортируйте список слов по убыванию длины слова.\n#\n# d = ' и вот я дома дорогая марья ивановна'\n# dc = d.split(' ')\n# print(dc)\n# dss = sorted(dc, key=lambda i: len(i), reverse= True)\n# print(dss)\n#\n# # todo 3. Реализуйте пример замыкания (например, «инкрементатор»)\n# def ink_a(a):\n#\n# def ddf(x =6):\n#\n# return a + x\n# return ddf\n#\n# a = ink_a(5)\n#\n# a1 = ink_a(6)\n#\n# print(a(7))\n#\n\n\n# todo 4. Написать генератор, возвращающий по очереди все слова, входящие в предложение.\n# d = 'и вот я дома дорогая Раиса Владимировна'\n#\n# dc = d.split(' ')\n# # dc = d.strip('')\n# print(dc)\n#\n# def power(dc):\n# for i in dc:\n# yield i\n# d = power(dc)\n# df = iter(d)\n#\n# print(next(df))\n# print(next(df))\n# print(next(df))\n# print(next(df))\n# print(next(df))\n# print(next(df))\n# print(next(df))\n# если раскоментить сломается, превышена длинна строки\n# print(next(df))\n\n# todo . Написать генератор псевдо случайных чисел\n# a = [lambda x : x*random.randint() for n in range(5)]\n# b = iter(a)\n# print(next(b))\nimport time\n# s = int(time.time())\n#\n# def ran(x):\n# while True:\n# t = str((x *int(time.time()))**888)\n# t = t[-3::]\n# yield t\n\n\n\n\n# #todo . Генератор внутри задается какой-нибудь формулой, которая выдает «случайный» результат\n\n#\n# a = [lambda x : x*time.time() for _ in range(5)]\n# print(a)\n\n#todo 5.На вход генератору приходит seed – начальное значение, при одинаковых начальных значениях два генератора\n #todo будут выдавать одинаковые следующие значения\n\n#\n# def ran22(x):\n# s = 0\n# while True:\n# t = str((x *2*s*int(time.time()))**888)\n# t = str(int(t[-2::]) **228 - len('dfdff'))[-3::]\n# s+=1*int(time.time())\n# yield t\n# print(next(ran22(59)))\n# #\n# a = ran22(6)\n# print([next(a) for i in range(8)])\n\n#todo 6. Написать корутину, которая реализует бесконечную арифметическую прогрессию с возможностью перезапуска с\n# любого места (3, 4, 5, 6, send(30), 31, 32, 33, …)\ndef coroutine(func):\n def inner(*args, **kwargs):\n g = func(*args, **kwargs)\n g.send(None)\n return g\n return inner\n\n@coroutine\ndef sd():\n i = 0\n while True:\n i +=1\n y = 2 ** i\n\n x = yield y\n i = x\n\nd = sd()\n# d.send(None)\n\n\n'''\n\n7.Давайте вместе напишем декоратор, который после каждого вызова функции будет выводить в консоль информацию о том, \nсколько раз данная функция уже вызывалась.\nДля этого немного отвлечемся на PEP 232 – Function Attributes\n\n'''\n\n\ndef counter(fn):\n\tdef wrapper(*args, **kwargs):\n\t\tresult = fn(*args, **kwargs)\n\t\twrapper.count = wrapper.count + 1\n\t\tprint(\"Функция была вызвана {} раз\".format(wrapper.count))\n\t\treturn result\n\twrapper.count = 0\n\treturn wrapper\n\n\n@counter\ndef say_word(word):\n\tprint(word)\n\nsay_word(\"Hi\")\t# Hi! Функция была вызвана 1 раз\nsay_word(\"Hi\")\t# Hi! Функция была вызвана 2 раз\nsay_word(\"Hi\")\t# Hi! Функция была вызвана 3 раз\nsay_word(\"Hi\")\t# Hi! Функция была вызвана 4 раз\nsay_word(\"Hi\") \t# Hi! Функция была вызвана 5 раз\n\n\n\n\n\n#\n# def coroutine(func):\n# def inner(*args, **kwargs):\n# i = 0\n# func()\n# i+=1\n# print(i)\n#\n# return inner()\n#\n# @coroutine\n# def makebold():\n# d = 2+2\n#\n# return d\n#\n# makebold()\n# makebold()\n# makebold()\n\n\n'''\n8. Напишите декоратор, выводящий в консоль время, в которое функция была запущена.\nВам понадобится модуль datetime и функция datetime.now()\n'''\n\nimport time\nimport datetime\n# def coro(func):\n# def inner():\n# print(time.time())\n# print(func())\n# print(time.time())\n#\n# return inner()\n\n\ndef time_decor(func):\n def wrapper(*arg, **kwarg):\n print(datetime.datetime.now())\n\n a = func(*arg, **kwarg)\n\n return a\n\n return wrapper\n\n\n@time_decor\ndef funny(x):\n print(\"I am funny \", x)\n\n return 30\nprint(funny(4))\n\n'''\nНаписать следующие декораторы:\n9. декоратор, замеряющий время выполнения функции и выводящий информацию в консоль\n'''\n\n\ndef time_decor(func):\n def wrapper(*arg, **kwarg):\n t1 = datetime.datetime.now()\n\n print(t1)\n\n a = func(*arg, **kwarg)\n\n t2 = datetime.datetime.now()\n\n print(t2 - t1)\n\n return a\n\n return wrapper\n\n\n@time_decor\ndef funny(x):\n print(\"I am funny \", x)\n\n return 30\nprint(funny(4))\n\n\n'''\n10. декоратор, кэширующий результаты функции и при каждом вызове обращающийся сначала в кэш \nчтобы проверить, нет ли результата в уже посчитанных. Декоратор не должен зависеть от количества аргументов \nдекорируемой функции\n'''","sub_path":"DEV_PY_110/proba_2.py","file_name":"proba_2.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"336372594","text":"objetivo = int(input('Escoge un numero: '))\n# Precision\nepsilon = 0.001\n# limite inferior\nbajo = 0.0\n# limite superior\nalto = max(1.0, objetivo)\n\nrespuesta = (alto + bajo) / 2\n\n# Derecha => alto\n# izquierda => bajo\n\n# MIsion disminuir a la mitad la busqueda\n\nwhile abs(respuesta ** 2 - objetivo) >= epsilon:\n print(f'bajo={bajo}, alto={alto}, respuesta={respuesta}')\n if respuesta ** 2 < objetivo:\n bajo = respuesta\n else:\n alto = respuesta\n # Divivir entre 2 el espacio de busqueda por ende solo la mitad\n respuesta = (alto + bajo) / 2\n\nprint(f'La raiz cuadrada de {objetivo} es {respuesta}')","sub_path":"enumeraciones/busqueda_binaria.py","file_name":"busqueda_binaria.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"305813389","text":"\"\"\"\nConfiguration of a brink project.\n\nThis is the basic public configuration.\nPrivate data is loaded from ~/.config/chevah-brink.ini\n\"\"\"\n\nSETUP = {\n 'product': {\n 'name': 'ChevahProduct',\n 'version': '0.0.1',\n 'version_major': '0',\n 'version_minor': '0',\n 'copyright_holder': 'Chevah Project',\n 'distributables': {}\n },\n 'python': {\n 'version': '2.5',\n },\n 'folders': {\n 'source': None,\n 'static': u'static',\n 'dist': u'dist',\n 'publish': u'publish',\n 'configuration': u'configuration',\n 'deps': u'deps',\n 'brink': u'brink',\n 'test_data': u'test_data',\n 'nsis': 'nsis'\n },\n 'repository': {\n 'name': None,\n 'github': 'NO GitHub URI defined',\n },\n 'buildbot': {\n 'vcs': 'git',\n 'builders_filter': None,\n },\n 'publish': {\n 'download_production_hostname': 'download.chevah.com',\n 'download_staging_hostname': 'staging.download.chevah.com',\n 'website_production_hostname': 'chevah.com',\n 'website_staging_hostname': 'staging.chevah.com'\n },\n 'pypi': {\n 'index_url': 'http://172.20.0.1:10042/simple',\n },\n 'scame': {\n 'scope': {\n 'include': [],\n 'exclude': [],\n },\n 'towncrier': {\n 'fragments_directory': '',\n 'excluded_fragments': ['readme', 'readme.rst'],\n },\n },\n 'website_package': 'chevah.website',\n 'test': {\n 'package': 'chevah.product.tests',\n # Module inside the test-package where elevated test are located.\n 'elevated': None,\n # List of nose arguments passed to all tests.\n 'nose_options': [],\n # URL for publishing the coverage reports to coverator.\n 'coverator_url': ''\n },\n }\n\nDIST_TYPE = {\n 'ZIP': 0,\n 'NSIS': 1,\n 'TAR_GZ': 2,\n 'NSIS_RENAMED': 3,\n 'TAR_GZ_LINK': 4,\n }\n\nDIST_EXTENSION = {\n DIST_TYPE['ZIP']: 'zip',\n DIST_TYPE['NSIS']: 'exe',\n DIST_TYPE['TAR_GZ']: 'tar.gz',\n DIST_TYPE['TAR_GZ_LINK']: 'tar.gz',\n DIST_TYPE['NSIS_RENAMED']: 'rename_to_exe'\n }\n","sub_path":"brink/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"128244063","text":"# Copyright (C) 2020 Zurich Instruments\n#\n# This software may be modified and distributed under the terms\n# of the MIT license. See the LICENSE file for details.\n\nimport textwrap\nimport attr\nimport numpy as np\nfrom pathlib import Path\nimport deprecation\nimport logging\n\nfrom .sequence_commands import SequenceCommand\nfrom .utils import SequenceType, TriggerMode, Alignment\nfrom zhinst.toolkit.interface import DeviceTypes\nfrom zhinst.toolkit._version import version as __version__\n\n_logger = logging.getLogger(__name__)\n\n\ndef is_greater_equal(min_value):\n \"\"\"Check if the attribute value is greater than or equal to a minimum value.\n\n This validator can handle both lists and single element attributes. If it\n is a list, it checks if the element with the smallest value is greater than\n or equal to the specified minimum value.\n \"\"\"\n\n def compare(self, attribute, value):\n if type(value) is not list:\n value = [value]\n if np.min(value) < min_value:\n raise ValueError(f\"{attribute.name} cannot be smaller than {min_value}!\")\n\n return compare\n\n\ndef is_smaller_equal(max_value):\n \"\"\"Check if the attribute value is smaller than or equal to a maximum value.\n\n This validator can handle both lists and single element attributes. If it\n is a list, it checks if the element with the greatest value is smaller than\n or equal to the specified maximum value.\n \"\"\"\n\n def compare(self, attribute, value):\n if type(value) is not list:\n value = [value]\n if np.max(value) > max_value:\n raise ValueError(f\"{attribute.name} cannot be greater than {max_value}!\")\n\n return compare\n\n\ndef is_multiple(factor):\n \"\"\"Check if the attribute value is multiple of a certain factor.\n\n This validator is the most useful for checking if an attribute related\n to waveform length comply with the waveform granularity specification of\n an instrument.\n\n The validator can handle both lists and single element attributes. If it\n is a list, it checks if each element is multiple of the given factor.\n \"\"\"\n\n def compare(self, attribute, value):\n if type(value) is not list:\n value = [value]\n for i in value:\n if i % factor != 0:\n raise ValueError(f\"{attribute.name} must be multiple of {factor}!\")\n\n return compare\n\n\n@attr.s\nclass Sequence(object):\n \"\"\"Base class for an AWG sequence to be programmed on a :class:`AWGCore` .\n\n Attributes:\n period (double): Period in seconds at which the experiment is repeated.\n trigger_mode (str or :class:`TriggerMode` enum): The trigger mode of the\n sequence, i.e if the AWG Core is used to send out the triger signal\n (*'Send Triger'* or :class:`TriggerMode.SEND_TRIGGER`), to wait\n for an external trigger signal (*'Receive Triger'* or\n :class:`TriggerMode.RECEIVE_TRIGGER`) or to wait for an external\n signal to send out the triger signal (*'Send and Receive Triger'* or\n :class:`TriggerMode.SEND_AND_RECEIVE_TRIGGER`). (default:\n :class:`TriggerMode.NONE`)\n trigger_samples (int): The duration of the trigger signal sent out by\n the AWG Core. It is given in number of samples. (default: 32)\n repetitions (int): The number of repetitions of the experiment.\n alignment (str): The alignment of the played waveform with the trigger\n signal, i.e. if the waveform should start with the trigger (or the\n time origin `t=0` of the sequence). Waveforms can either *'Start\n with Trigger'* (:class:`Alignment.START_WITH_TRIGGER`) or *'End with\n Trigger'* (:class:`Alignment.END_WITH_TRIGGER`).\n dead_time (double): The `dead time` of a sequence is the time in seconds\n after the time origin of the sequence before the next trigger\n signal is sent / expected. This time defines the maximum length of a\n waveform played after the time origin, otherwise triggers can be\n missed. (default: 5 us)\n trigger_delay (double): The `trigger delay` is an addittional delay in\n seconds that shifts the time origin `t=0` with respect to the\n trigger signal. (default: 0)\n latency (double): The `latency` is a time in seconds that compensated\n for different trigger latencies of different instruments. It works\n as a constant `trigger_delay`.\n latency_adjustment (int): In order to compensate for different trigger\n latencies of different instrument types, it is necessary for some\n instruments to wait for certain number of sequencer cycles after\n receiving the trigger. This way, it is possible to align the\n waveforms sent out from different instruments. The attribute\n `latency_adjustment` is an additional latency given as number of\n sequencer cycles that is used to increase the time an instrument\n waits after receiving the trigger signal. (default: 0)\n reset_phase (bool): A flag that specifies if the phase of the modulation\n oscillator should be reset to 0 for every repetition of the\n experiment before the waveform is played.\n\n \"\"\"\n\n target = attr.ib(\n default=DeviceTypes.HDAWG,\n validator=attr.validators.in_(\n [DeviceTypes.HDAWG, DeviceTypes.UHFQA, DeviceTypes.UHFLI]\n ),\n )\n clock_rate = attr.ib(default=2.4e9, validator=is_greater_equal(0))\n period = attr.ib(default=100e-6, validator=is_greater_equal(0))\n trigger_mode = attr.ib(\n default=TriggerMode.SEND_TRIGGER,\n converter=lambda m: TriggerMode.NONE if m == \"None\" else TriggerMode(m),\n )\n trigger_samples = attr.ib(\n default=32,\n validator=[is_greater_equal(32), is_multiple(16)],\n )\n repetitions = attr.ib(default=1)\n alignment = attr.ib(\n default=Alignment.END_WITH_TRIGGER, converter=lambda a: Alignment(a)\n )\n n_HW_loop = attr.ib(default=1, converter=int, validator=is_greater_equal(0))\n dead_time = attr.ib(default=5e-6, validator=is_greater_equal(0))\n trigger_delay = attr.ib(default=0)\n latency = attr.ib(default=160e-9, validator=is_greater_equal(0))\n latency_cycles = attr.ib(default=27, validator=is_greater_equal(0))\n latency_adjustment = attr.ib(default=0, validator=is_greater_equal(0))\n trigger_cmd_1 = attr.ib(default=\"//\")\n trigger_cmd_2 = attr.ib(default=\"//\")\n trigger_cmd_define = attr.ib(default=\"//\\n\")\n trigger_cmd_send = attr.ib(default=\"//\\n\")\n trigger_cmd_wait = attr.ib(default=\"//\\n\")\n trigger_cmd_latency = attr.ib(default=\"//\\n\")\n readout_cmd_trigger = attr.ib(default=\"//\\n\")\n osc_cmd_reset = attr.ib(default=\"//\\n\")\n wait_cycles = attr.ib(\n default=28500, validator=is_greater_equal(0)\n ) # 95 us by default\n dead_cycles = attr.ib(\n default=1500, validator=is_greater_equal(0)\n ) # 5 us by default\n wait_samples = attr.ib(\n default=228000, validator=is_greater_equal(0)\n ) # 95 us by default (Assuming HDAWG)\n dead_samples = attr.ib(\n default=12000, validator=is_greater_equal(0)\n ) # 5 us by default (Assuming HDAWG)\n reset_phase = attr.ib(default=False)\n\n def set(self, **settings):\n \"\"\"Sets attributes, updates related attributes and checks attributes.\"\"\"\n for key in settings:\n if hasattr(self, key):\n setattr(self, key, settings[key])\n self.update_params()\n self.check_attributes()\n\n def get(self):\n \"\"\"Updates and checks attributes, writes and returns the sequence program.\"\"\"\n self.update_params()\n self.check_attributes()\n self.write_sequence()\n return self.sequence\n\n def write_sequence(self):\n \"\"\"Create header for the sequencer program.\n\n The header displays the sequence type, trigger mode and alignment\n information of the program. Sequence type is temporarily selected as\n `None` here. It will be overwritten by the children classes depending\n on the actual sequence type.\n\n \"\"\"\n self.sequence = SequenceCommand.header_info(\n SequenceType.NONE, self.trigger_mode, self.alignment\n )\n\n def update_params(self):\n \"\"\"Update interrelated parameters.\"\"\"\n # Convert wait_time to number of samples\n self.wait_samples = self.time_to_samples(\n self.period - self.dead_time + self.trigger_delay\n )\n # Convert dead_time to number of samples\n self.dead_samples = self.time_to_samples(self.dead_time - self.trigger_delay)\n # Set the correct clock rate, trigger latency compensation\n # and QA trigger command depending on the device type\n if self.target in [DeviceTypes.HDAWG]:\n self.clock_rate = 2.4e9\n if self.trigger_mode in [TriggerMode.ZSYNC_TRIGGER]:\n # Default trigger latency for HDAWG with ZSync trigger\n # = 0 cycles\n self.latency_cycles = 0 + self.latency_adjustment\n else:\n # Default trigger latency for HDAWG with Master trigger\n # = 27 cycles\n self.latency_cycles = 27 + self.latency_adjustment\n # HDAWG has no quantum analyzer\n self.readout_cmd_trigger = SequenceCommand.comment_line()\n elif self.target in [DeviceTypes.UHFLI, DeviceTypes.UHFQA]:\n self.clock_rate = 1.8e9\n # Default trigger latency compensation for UHFQA = 0 cycles\n self.latency_cycles = 0 + self.latency_adjustment\n # UHFLI has no has quantum analyzer, only UHFQA has quantum analyzer\n if self.target in [DeviceTypes.UHFQA]:\n self.readout_cmd_trigger = SequenceCommand.readout_trigger()\n else:\n self.readout_cmd_trigger = SequenceCommand.comment_line()\n # Set the oscillator phase to 0 if the reset_phase option is on\n if self.reset_phase:\n self.osc_cmd_reset = SequenceCommand.reset_osc_phase()\n else:\n self.osc_cmd_reset = SequenceCommand.comment_line()\n # Set the trigger latency command depending on the `latency_cycles`\n if self.latency_cycles == 0:\n self.trigger_cmd_latency = SequenceCommand.comment_line()\n else:\n # strip '\\n' at the end and add an inline comment\n self.trigger_cmd_latency = (\n SequenceCommand.wait(self.latency_cycles).rstrip()\n + SequenceCommand.space()\n + SequenceCommand.inline_comment(\n f\"Wait to compensate for trigger latency\"\n )\n )\n # Set the trigger commands depending on the trigger mode\n if self.trigger_mode == TriggerMode.NONE:\n self.trigger_cmd_1 = SequenceCommand.comment_line()\n self.trigger_cmd_2 = SequenceCommand.comment_line()\n self.dead_cycles = self.time_to_cycles(self.dead_time)\n self.trigger_cmd_define = SequenceCommand.comment_line()\n self.trigger_cmd_send = SequenceCommand.comment_line()\n self.trigger_cmd_wait = SequenceCommand.comment_line()\n # No trigger latency compensation in TriggerMode.NONE\n self.trigger_cmd_latency = SequenceCommand.comment_line()\n elif self.trigger_mode == TriggerMode.SEND_AND_RECEIVE_TRIGGER:\n # Define a waveform to send out as trigger\n self.trigger_cmd_define = SequenceCommand.define_trigger(\n self.trigger_samples\n )\n # Wait for an external clock to send out the trigger signal\n # strip '\\n' at the end and add an inline comment\n self.trigger_cmd_send = (\n SequenceCommand.wait_dig_trigger(2, self.target).rstrip()\n + SequenceCommand.space()\n + SequenceCommand.inline_comment(\"Wait for external clock\")\n + SequenceCommand.play_trigger()\n )\n # Wait for self triggering\n # strip '\\n' at the end and add an inline comment\n self.trigger_cmd_wait = (\n SequenceCommand.wait_dig_trigger(1, self.target).rstrip()\n + SequenceCommand.space()\n + SequenceCommand.inline_comment(\"Wait for self trigger\")\n )\n elif self.trigger_mode == TriggerMode.SEND_TRIGGER:\n self.trigger_cmd_1 = SequenceCommand.trigger(1)\n self.trigger_cmd_2 = SequenceCommand.trigger(0)\n self.dead_cycles = self.time_to_cycles(self.dead_time)\n # Define a waveform to send out as trigger\n self.trigger_cmd_define = SequenceCommand.define_trigger(\n self.trigger_samples\n )\n # Send out the trigger signal\n self.trigger_cmd_send = (\n SequenceCommand.comment_line() + SequenceCommand.play_trigger()\n )\n # Wait for self triggering\n # strip '\\n' at the end and add an inline comment\n self.trigger_cmd_wait = (\n SequenceCommand.wait_dig_trigger(1, self.target).rstrip()\n + SequenceCommand.space()\n + SequenceCommand.inline_comment(\"Wait for self trigger\")\n )\n elif self.trigger_mode in [\n TriggerMode.EXTERNAL_TRIGGER,\n TriggerMode.RECEIVE_TRIGGER,\n ]:\n self.trigger_cmd_1 = SequenceCommand.wait_dig_trigger(1, self.target)\n self.trigger_cmd_2 = SequenceCommand.comment_line()\n self.dead_cycles = 0\n self.trigger_cmd_define = SequenceCommand.comment_line()\n self.trigger_cmd_send = (\n SequenceCommand.comment_line() + SequenceCommand.comment_line()\n )\n # Wait for external trigger\n self.trigger_cmd_wait = SequenceCommand.wait_dig_trigger(1, self.target)\n elif self.trigger_mode == TriggerMode.ZSYNC_TRIGGER:\n self.trigger_cmd_define = SequenceCommand.comment_line()\n self.trigger_cmd_send = SequenceCommand.comment_line()\n # Wait for ZSYNC trigger\n # strip '\\n' at the end and add an inline comment\n self.trigger_cmd_wait = (\n SequenceCommand.wait_zsync_trigger().rstrip()\n + SequenceCommand.space()\n + SequenceCommand.inline_comment(\"Wait for ZSYNC trigger\")\n )\n\n @deprecation.deprecated(\n deprecated_in=\"0.2.0\",\n current_version=__version__,\n details=\"Use the time_to_samples function instead\",\n )\n def time_to_cycles(self, time, wait_time=True):\n \"\"\"Helper method to convert time to FPGA clock cycles.\"\"\"\n if wait_time:\n return int(time * self.clock_rate / 8)\n else:\n return int(time * self.clock_rate)\n\n def time_to_samples(self, time):\n \"\"\"Helper method to convert time to number of samples.\"\"\"\n return round(time * self.clock_rate)\n\n def get_gauss_params(self, width, truncation):\n \"\"\"Calculates the attribute `gauss_params` from width and truncation.\n\n Arguments:\n width (double): width in seconds of the gaussian pulse\n truncation (double): the gaussian pulse shape will be truncated\n at `truncation * width`\n\n \"\"\"\n gauss_length = (\n self.time_to_cycles(2 * truncation * width, wait_time=False) // 16 * 16\n )\n gauss_pos = int(gauss_length / 2)\n gauss_width = self.time_to_cycles(width, wait_time=False)\n self.gauss_params = [gauss_length, gauss_pos, gauss_width]\n\n def check_attributes(self):\n \"\"\"Performs sanity checks on the sequence parameters.\"\"\"\n if (self.period - self.dead_time - self.latency + self.trigger_delay) < 0:\n raise ValueError(\"Wait time cannot be negative!\")\n\n def __setattr__(self, name, value) -> None:\n \"\"\"Call the validator when we set the field (by default it only runs on __init__)\"\"\"\n for attribute in [\n a for a in getattr(self.__class__, \"__attrs_attrs__\", []) if a.name == name\n ]:\n if attribute.type is not None:\n if isinstance(value, attribute.type) is False:\n raise TypeError(\n f\"{self.__class__.__name__}.{attribute.name} cannot set {value} because it is not a {attribute.type.__name__}\"\n )\n if attribute.converter is not None:\n value = attribute.converter(value)\n if attribute.validator is not None:\n attribute.validator(self, attribute, value)\n super().__setattr__(name, value)\n\n\n@attr.s\nclass PulseTrainSequence(Sequence):\n \"\"\"Sequence for playback of *pulse trains*.\n\n Initializes placeholders (`randomUniform(...)`) of the correct length for \n the waveforms in the queue of the AWG Core. The data of the waveform \n placeholders is then replaced in memory when uploading the waveform using \n `upload_waveforms()`. The waveforms are played sequentially within the main \n loop of the sequence program.\n\n As opposed to the \"Simple\" Sequence, the \"Pulse Train\" pays no attention to \n triggering, the defined period or waveform alignment. It just plays all \n queued waveforms directly after one another and repeats this *repetitions*\n times. \n\n >>> awg.set_sequence_params(sequence_type=\"Pulse Train\")\n >>> for amp in np.linspace(-1, 1, 20):\n >>> wave = amp * np.ones(800)\n >>> awg.queue_waveform(wave)\n >>> awg.compile_and_upload_waveforms()\n >>> ...\n \n Attributes:\n buffer_lengths (list): A list of integers with the required lengths of \n the waveform buffers. These values will be taken from the waveforms\n in the queue of the AWG Core.\n\n \"\"\"\n\n buffer_lengths = attr.ib(\n default=attr.Factory(list), validator=attr.validators.instance_of(list)\n )\n\n def write_sequence(self):\n self.sequence = SequenceCommand.header_comment(sequence_type=\"Pulse Train\")\n for i in range(self.n_HW_loop):\n self.sequence += SequenceCommand.init_buffer_indexed(\n self.buffer_lengths[i], i\n )\n self.sequence += SequenceCommand.trigger(0)\n self.sequence += SequenceCommand.repeat(self.repetitions)\n for i in range(self.n_HW_loop):\n self.sequence += SequenceCommand.count_waveform(i, self.n_HW_loop)\n self.sequence += SequenceCommand.play_wave_indexed(i)\n self.sequence += SequenceCommand.close_bracket()\n\n def update_params(self):\n super().update_params()\n if len(self.buffer_lengths) != self.n_HW_loop:\n self.n_HW_loop = len(self.buffer_lengths)\n\n\n@attr.s\nclass SimpleSequence(Sequence):\n \"\"\"Sequence for *simple* playback of waveform arrays.\n\n Initializes placeholders (`placeholder(...)`) of the correct length for\n the waveforms in the queue of the AWG Core. The data of the waveform\n placeholders is then replaced in memory when uploading the waveform using\n `upload_waveforms()`. The waveforms are played sequentially within the main\n loop of the sequence program.\n\n >>> awg.set_sequence_params(sequence_type=\"Simple\")\n >>> awg.queue_waveform(np.ones(800), np.ones(800))\n >>> awg.compile_and_upload_waveforms()\n >>> ...\n\n Attributes:\n buffer_lengths (list): A list of integers with the required lengths of\n the waveform buffers. These values will be taken from the waveforms\n in the queue of the AWG Core.\n delay_times (list): A list of delay times for each fo the individual\n waveform w.r.t. the time origin of the period. These values will be\n taken from the waveform queue of the AWG Core.\n\n \"\"\"\n\n buffer_lengths = attr.ib(default=[800], validator=attr.validators.instance_of(list))\n delay_times = attr.ib(default=[0])\n wait_samples_updated = attr.ib(default=[0])\n dead_samples_updated = attr.ib(default=[0])\n\n def write_sequence(self):\n # Call the method from parent class `Sequence` and then overwrite it\n super().write_sequence()\n # Update the sequence type information in the header\n self.sequence = SequenceCommand.replace_sequence_type(\n self.sequence, SequenceType.SIMPLE\n )\n # Loop over the waveforms and initialize placeholders\n self.sequence += SequenceCommand.inline_comment(\"Waveform definitions\")\n for i in range(self.n_HW_loop):\n self.sequence += SequenceCommand.init_buffer_indexed(\n self.buffer_lengths[i], i, self.target\n )\n # Define trigger waveform (depends on the trigger mode)\n self.sequence += self.trigger_cmd_define\n self.sequence += SequenceCommand.new_line()\n # Loop over the waveforms and assign indices\n for i in range(self.n_HW_loop):\n self.sequence += SequenceCommand.assign_wave_index(i)\n self.sequence += SequenceCommand.new_line()\n self.sequence += SequenceCommand.inline_comment(\"Trigger commands\")\n # Send trigger (depends on the trigger mode)\n self.sequence += self.trigger_cmd_send\n # Wait for external trigger (depends on the trigger mode)\n self.sequence += self.trigger_cmd_wait\n # Compensate for trigger latency differences (depends on the device type)\n self.sequence += self.trigger_cmd_latency\n self.sequence += SequenceCommand.new_line()\n self.sequence += SequenceCommand.inline_comment(\"Start main sequence\")\n # Start repeat loop in sequencer\n self.sequence += SequenceCommand.repeat(self.repetitions)\n # Loop over the waveforms\n for i in range(self.n_HW_loop):\n self.sequence += SequenceCommand.tab() + SequenceCommand.count_waveform(\n i, self.n_HW_loop\n )\n # Play zeros to wait before playing the waveform.\n # (depends on `period`, `dead_time` and alignment options)\n self.sequence += SequenceCommand.tab() + SequenceCommand.play_zero(\n self.wait_samples_updated[i], self.target\n )\n # Reset oscillator phase (depends on `reset_phase` option)\n self.sequence += SequenceCommand.tab() + self.osc_cmd_reset\n # Play the waveforms\n self.sequence += SequenceCommand.tab() + SequenceCommand.play_wave_indexed(\n i\n )\n # Trigger quantum analyzer (depends on the device type)\n self.sequence += SequenceCommand.tab() + self.readout_cmd_trigger\n # Play zeros to wait until end of period.\n # (depends on `dead_time` and alignment options)\n self.sequence += SequenceCommand.tab() + SequenceCommand.play_zero(\n self.dead_samples_updated[i], self.target\n )\n # Finish repeat loop\n self.sequence += SequenceCommand.close_bracket()\n\n def update_params(self):\n super().update_params()\n if len(self.buffer_lengths) != self.n_HW_loop:\n self.n_HW_loop = len(self.buffer_lengths)\n if len(self.buffer_lengths) < len(self.delay_times):\n self.delay_times = self.delay_times[: len(self.buffer_lengths)]\n if len(self.buffer_lengths) > len(self.delay_times):\n n = len(self.buffer_lengths) - len(self.delay_times)\n self.delay_times = np.append(self.delay_times, np.zeros(n))\n # Update the number of samples to wait before and after playing the waveform\n # according to the list of delay_times, buffer lengths and alignment option.\n self.wait_samples_updated = [self.wait_samples for i in range(self.n_HW_loop)]\n self.dead_samples_updated = [self.dead_samples for i in range(self.n_HW_loop)]\n for i in range(self.n_HW_loop):\n if self.alignment == Alignment.START_WITH_TRIGGER:\n self.wait_samples_updated[i] += self.delay_times[i]\n self.dead_samples_updated[i] -= (\n self.delay_times[i] + self.buffer_lengths[i]\n )\n elif self.alignment == Alignment.END_WITH_TRIGGER:\n self.wait_samples_updated[i] += (\n self.delay_times[i] - self.buffer_lengths[i]\n )\n self.dead_samples_updated[i] -= self.delay_times[i]\n\n def check_attributes(self):\n super().check_attributes()\n if len(self.buffer_lengths) > self.n_HW_loop:\n raise ValueError(\n \"Length of list buffer_lengths has to be equal to length of HW loop!\"\n )\n\n\n@attr.s\nclass TriggerSequence(Sequence):\n \"\"\"Predefined sequence for *Master Trigger*.\n\n This sequence does not play any waveforms but only sends out the\n trigger signal once at the start of the sequence program.\n The `trigger_mode` parameter must be chosen as *'Send Trigger'* or\n *'Send and Receive Trigger'*. Otherwise, it will automatically be\n overwritten to be *'Send Trigger'*. The trigger signal will be\n played on the AWG core output 1. However, this signal should still\n be manually assigned to the desired *Mark* output in DIO settings by\n selecting `Output 1 Marker 1`.\n\n >>> awg.set_sequence_params(\n >>> sequence_type=\"Trigger\",\n >>> period=50e-6,\n >>> repetitions=1e3,\n >>> )\n\n \"\"\"\n\n def write_sequence(self):\n # Call the method from parent class `Sequence` and then overwrite it\n super().write_sequence()\n # Update the sequence type information in the header\n self.sequence = SequenceCommand.replace_sequence_type(\n self.sequence, SequenceType.TRIGGER\n )\n # Define trigger waveform\n self.sequence += SequenceCommand.inline_comment(\"Trigger waveform definition\")\n self.sequence += self.trigger_cmd_define\n self.sequence += SequenceCommand.new_line()\n self.sequence += SequenceCommand.inline_comment(\"Trigger commands\")\n # Send trigger (depends on the trigger mode)\n self.sequence += self.trigger_cmd_send.rstrip() # strip '\\n' at the end\n\n def update_params(self):\n # Set the trigger mode to \"Send Trigger\" if the selected\n # trigger mode is not correct.\n if self.trigger_mode not in [\n TriggerMode.SEND_TRIGGER,\n TriggerMode.SEND_AND_RECEIVE_TRIGGER,\n ]:\n _logger.warning(\n f\"The selected trigger mode {self.trigger_mode.value} does not work \\n\"\n f\"with Master Trigger sequence. The trigger mode is set \\n\"\n f\"to {TriggerMode.SEND_TRIGGER.value}.\"\n )\n self.trigger_mode = TriggerMode.SEND_TRIGGER\n # Call the parent function to update all parameters that depend\n # on the trigger mode after overwiritng the trigger mode above.\n # Note that the parent class should not change the trigger mode.\n super().update_params()\n\n def check_attributes(self):\n super().check_attributes()\n\n\n@attr.s\nclass RabiSequence(Sequence):\n \"\"\"Predefined *Rabi Sequence*.\n\n This sequence plays a Gaussian pulse with width `pulse_width` and varies its\n amplitude. The values for the amplitude sweep are defined in the array\n parameter `pulse_amplitudes`. For each value in the array, one pulse of that\n amplitude is played in the main loop of the seuence program in the same\n order as in the array.\n\n >>> awg.set_sequence_params(\n >>> sequence_type=\"Rabi\",\n >>> pulse_width=50e-9,\n >>> pulse_amplitudes=np.linspace(0, 1.0, 101),\n >>> )\n\n Attributes:\n pulse_amplitudes (list): A list of pulse amplitudes for each point in\n the Rabi sequence. The pulse amplitudes have to be within -1.0 and\n 1.0.\n pulse_width (double): The width of the gaussian pulse (sigma) in\n seconds.\n pulse_truncation (double): The truncation of the gaussian pulse as\n multiples of the width.\n\n \"\"\"\n\n pulse_amplitudes = attr.ib(\n default=[1.0],\n validator=[is_greater_equal(-1.0), is_smaller_equal(1.0)],\n )\n pulse_width = attr.ib(default=50e-9, validator=is_greater_equal(0))\n pulse_truncation = attr.ib(default=3, validator=is_greater_equal(0))\n\n def write_sequence(self):\n self.sequence = SequenceCommand.header_comment(sequence_type=\"Rabi\")\n self.sequence += SequenceCommand.init_gauss(self.gauss_params)\n self.sequence += self.trigger_cmd_2\n self.sequence += SequenceCommand.repeat(self.repetitions)\n for i, amp in enumerate(self.pulse_amplitudes):\n self.sequence += SequenceCommand.count_waveform(i, self.n_HW_loop)\n self.sequence += self.trigger_cmd_1\n if self.reset_phase:\n self.sequence += SequenceCommand.reset_osc_phase()\n self.sequence += SequenceCommand.wait(self.wait_cycles)\n self.sequence += self.trigger_cmd_2\n self.sequence += SequenceCommand.play_wave_scaled(amp, amp)\n self.sequence += SequenceCommand.wait_wave()\n self.sequence += SequenceCommand.wait(self.dead_cycles)\n self.sequence += SequenceCommand.close_bracket()\n\n def update_params(self):\n super().update_params()\n self.n_HW_loop = len(self.pulse_amplitudes)\n self.get_gauss_params(self.pulse_width, self.pulse_truncation)\n if self.trigger_mode == TriggerMode.NONE:\n self.wait_cycles = self.time_to_cycles(self.period - self.dead_time)\n self.dead_cycles = (\n self.time_to_cycles(self.dead_time) - self.gauss_params[0] / 8\n )\n elif self.trigger_mode == TriggerMode.SEND_TRIGGER:\n self.wait_cycles = self.time_to_cycles(self.period - self.dead_time)\n if self.alignment == Alignment.END_WITH_TRIGGER:\n self.wait_cycles -= self.gauss_params[0] / 8\n elif self.alignment == Alignment.START_WITH_TRIGGER:\n self.dead_cycles -= self.gauss_params[0] / 8\n elif self.trigger_mode in [\n TriggerMode.EXTERNAL_TRIGGER,\n TriggerMode.RECEIVE_TRIGGER,\n ]:\n self.wait_cycles = self.time_to_cycles(\n self.period - self.dead_time - self.latency + self.trigger_delay\n )\n if self.alignment == Alignment.END_WITH_TRIGGER:\n self.wait_cycles -= self.gauss_params[0] / 8\n elif self.alignment == Alignment.START_WITH_TRIGGER:\n self.dead_cycles = 0\n\n def check_attributes(self):\n super().check_attributes()\n if (\n self.period - self.dead_time - 2 * self.pulse_width * self.pulse_truncation\n ) < 0:\n raise ValueError(\"Wait time cannot be negative!\")\n if self.n_HW_loop < len(self.pulse_amplitudes):\n raise ValueError(\n \"Length of hardware loop too long for number of specified amplitudes!\"\n )\n\n\n@attr.s\nclass T1Sequence(Sequence):\n \"\"\"Predefined *T1 Sequence*.\n\n This sequence plays a Gaussian pulse with width `pulse_width` and amplitude\n `pulse_amplitude`. The shift of the waveform with respect to the period's\n time origin `t=0` is defined in the array parameter `time_delays`. For each\n value in the array, one pulse is shifted by the given value (in seconds)\n forward in time is played in the main loop of the seuence program.\n\n >>> awg.set_sequence_params(\n >>> sequence_type=\"T1\",\n >>> pulse_amplitude=0.876,\n >>> pulse_width=50e-9,\n >>> delay_times=np.linspace(0.1e-6, 10e-6, 100),\n >>> )\n\n Attributes:\n pulse_amplitude (double): The amplitude of the Gaussian pulse\n (pi-pulse). Must be between -1.0 and 1.0.\n pulse_width (double): The width of the gaussian pulse (sigma) in\n seconds.\n pulse_truncation (double): The truncation of the gaussian pulse as\n multiples of the width.\n delay_times (array): The time shifts in seconds of the waveforms forward\n in time with respect to the period's time origin `t=0`.\n\n \"\"\"\n\n pulse_amplitude = attr.ib(\n default=1,\n validator=[is_greater_equal(-1.0), is_smaller_equal(1.0)],\n )\n pulse_width = attr.ib(default=50e-9, validator=is_greater_equal(0))\n pulse_truncation = attr.ib(default=3, validator=is_greater_equal(0))\n delay_times = attr.ib(default=[1e-6])\n\n def write_sequence(self):\n self.sequence = SequenceCommand.header_comment(sequence_type=\"T1\")\n self.sequence += SequenceCommand.init_gauss_scaled(\n self.pulse_amplitude, self.gauss_params\n )\n self.sequence += self.trigger_cmd_2\n self.sequence += SequenceCommand.repeat(self.repetitions)\n for i, t in enumerate([self.time_to_cycles(t) for t in (self.delay_times)]):\n self.sequence += SequenceCommand.count_waveform(i, self.n_HW_loop)\n self.sequence += self.trigger_cmd_1\n if self.reset_phase:\n self.sequence += SequenceCommand.reset_osc_phase()\n self.sequence += SequenceCommand.wait(self.wait_cycles - t)\n self.sequence += self.trigger_cmd_2\n self.sequence += SequenceCommand.play_wave()\n self.sequence += SequenceCommand.wait_wave()\n self.sequence += SequenceCommand.wait(self.dead_cycles + t)\n self.sequence += SequenceCommand.close_bracket()\n\n def update_params(self):\n super().update_params()\n self.n_HW_loop = len(self.delay_times)\n self.get_gauss_params(self.pulse_width, self.pulse_truncation)\n if self.trigger_mode in [TriggerMode.NONE, TriggerMode.SEND_TRIGGER]:\n self.wait_cycles = self.time_to_cycles(self.period - self.dead_time)\n elif self.trigger_mode in [\n TriggerMode.EXTERNAL_TRIGGER,\n TriggerMode.RECEIVE_TRIGGER,\n ]:\n self.wait_cycles = self.time_to_cycles(\n self.period - self.dead_time - self.latency + self.trigger_delay\n )\n\n def check_attributes(self):\n super().check_attributes()\n if (self.period - self.dead_time - self.gauss_params[0] / self.clock_rate) < 0:\n raise ValueError(\"Wait time cannot be negative!\")\n if self.n_HW_loop > len(self.delay_times):\n raise ValueError(\n \"Length of hardware loop too long for number of specified delay times!\"\n )\n\n\n@attr.s\nclass T2Sequence(T1Sequence):\n \"\"\"Predefined *T2 Ramsey* sequence.\n\n This sequence plays *two* Gaussian pulses with width `pulse_width` and\n amplitude 1/2 * `pulse_amplitude`. The shift between the waveforms is defined\n in the array parameter `time_delays`. For each value in the array, the first\n pulse is shifted forward in time by the given value (in seconds) before the\n second pulse is played.\n\n >>> awg.set_sequence_params(\n >>> sequence_type=\"T1\",\n >>> pulse_amplitude=0.876,\n >>> pulse_width=50e-9,\n >>> delay_times=np.linspace(0.1e-6, 10e-6, 100),\n >>> )\n\n Attributes:\n pulse_amplitude (double): Twice the amplitude of the Gaussian pulse\n (pi-half pulse). Must be between -1.0 and 1.0.\n pulse_width (double): The width of the gaussian pulse (sigma) in\n seconds.\n pulse_truncation (double): The truncation of the gaussian pulse as\n multiples of the width.\n delay_times (array): The time shifts in seconds of the waveforms forward\n in time with respect to the period's time origin `t=0`.\n\n \"\"\"\n\n def write_sequence(self):\n self.sequence = SequenceCommand.header_comment(sequence_type=\"T2* (Ramsey)\")\n self.sequence += SequenceCommand.init_gauss_scaled(\n 0.5 * self.pulse_amplitude, self.gauss_params\n )\n self.sequence += self.trigger_cmd_2\n self.sequence += SequenceCommand.repeat(self.repetitions)\n for i, t in enumerate([self.time_to_cycles(t) for t in (self.delay_times)]):\n self.sequence += SequenceCommand.count_waveform(i, self.n_HW_loop)\n self.sequence += self.trigger_cmd_1\n if self.reset_phase:\n self.sequence += SequenceCommand.reset_osc_phase()\n self.sequence += SequenceCommand.wait(self.wait_cycles - t)\n self.sequence += self.trigger_cmd_2\n self.sequence += SequenceCommand.play_wave()\n if t > 3:\n self.sequence += SequenceCommand.wait(\n t - 3\n ) # -3 to subtract additional cycles of playWave()\n else:\n self.sequence += SequenceCommand.wait(t)\n self.sequence += SequenceCommand.play_wave()\n self.sequence += SequenceCommand.wait_wave()\n self.sequence += SequenceCommand.wait(self.dead_cycles)\n self.sequence += SequenceCommand.close_bracket()\n\n\n@attr.s\nclass ReadoutSequence(Sequence):\n \"\"\"Predefined sequence for *Multiplexed Qubit Readout*.\n\n The *Readout* sequence is designed for multiplexed qubit readout. It is made\n to work together with the `Readout Channels` of the *UHFQA*. The sequence\n generates a readout pulse as the sum of readout tones at different readout\n frequencies. The readout frequencies are given in the list parameter\n `readout_frequencies`, their amplitudes in `readout_amplitudes`. If the\n *Readout* sequence is configured, the *integration mode* is automatically\n set to *Standard* (weighted integration).\n\n For the *UHFQA* the values for *readout frequencies* and *readout\n amplitudes* can be taken from the respective *Readout Channel*. If the AWG\n Core is configured to use the *Readout* sequence, upon compilation the\n sequence takes the values from *all enabled channels*. This ensures that for\n all channels for which a readout tone is generated, the weighted integration\n is also a demodulation at that readout frequency. The transfer of the\n corresponding values from channels to the sequence program is done before\n compilation or manually using `update_readout_params()`.\n\n >>> frequencies = [34e6, 56e6, 78e6, 90e6]\n >>> amplitudes = [0.4, 0.5, 0.6, 0.7]\n >>> for i, ch in enumerate(uhfqa.channels[:4]):\n >>> ch.enable()\n >>> ch.readout_frequency(frequencies[i])\n >>> ch.readout_amplitude(amplitudes[i])\n >>>\n >>> uhfqa.awg.set_sequence_params(\n >>> sequence_type=\"Readout\",\n >>> readout_length=1e-6,\n >>> )\n >>> uhfqa.awg.update_readout_params()\n >>> uhfqa.awg\n qa: \n parent : \n index : 0\n sequence:\n type: Readout\n ('target', )\n ('clock_rate', 1800000000.0)\n ('period', 0.0001)\n ('trigger_mode', 'None')\n ('repetitions', 1)\n ('alignment', 'End with Trigger')\n ...\n ('readout_length', 2e-06)\n ('readout_amplitudes', [0.4, 0.5, 0.6, 0.7])\n ('readout_frequencies', [34000000.0, 56000000.0, 78000000.0, 90000000.0])\n ('phase_shifts', [0, 0, 0, 0])\n\n Attributes:\n readout_length (double): The duration in seconds of the readout pulse.\n Note that the maximum integration time for weighted integration is\n 4096 samples or roughly 2.3 us. (default: 2 us)\n readout_freqencies (list): A list of readout frequencies in Hz. These\n values are typically taken from the *Readout Channels* of the\n *UHFQA*.\n readout_amplitudes (list): A list of readout amplitudes (-1.0 to 1.0).\n These values are typically taken from the *Readout Channels* of the\n *UHFQA*. Note that the amplitude of each tone is always divided by\n the number of tones.\n phase_shifts (list): A list of additional phase shifts (in degrees)\n between the generated I and Q quadratures of each individual readout\n tone.\n\n \"\"\"\n\n readout_length = attr.ib(default=2e-6, validator=is_greater_equal(0))\n readout_amplitudes = attr.ib(default=[1])\n readout_frequencies = attr.ib(default=[100e6])\n phase_shifts = attr.ib(default=[0])\n\n def write_sequence(self):\n self.sequence = SequenceCommand.header_comment(sequence_type=\"Readout\")\n length = self.time_to_cycles(self.readout_length, wait_time=False) // 16 * 16\n self.sequence += SequenceCommand.init_readout_pulse(\n length,\n self.readout_amplitudes,\n self.readout_frequencies,\n self.phase_shifts,\n clk_rate=self.clock_rate,\n )\n self.sequence += SequenceCommand.trigger(0)\n self.sequence += SequenceCommand.repeat(self.repetitions)\n self.sequence += self.trigger_cmd_1\n self.sequence += SequenceCommand.wait(self.wait_cycles)\n self.sequence += self.trigger_cmd_2\n if self.target == DeviceTypes.UHFQA:\n self.sequence += SequenceCommand.readout_trigger()\n self.sequence += SequenceCommand.play_wave()\n self.sequence += SequenceCommand.wait_wave()\n self.sequence += SequenceCommand.wait(self.dead_cycles)\n self.sequence += SequenceCommand.close_bracket()\n\n def update_params(self):\n super().update_params()\n temp = self.period - self.dead_time\n if self.alignment == Alignment.END_WITH_TRIGGER:\n temp -= self.readout_length\n elif self.alignment == Alignment.START_WITH_TRIGGER:\n self.dead_cycles = self.time_to_cycles(self.dead_time - self.readout_length)\n if self.trigger_mode == TriggerMode.NONE:\n self.wait_cycles = self.time_to_cycles(temp)\n elif self.trigger_mode == TriggerMode.SEND_TRIGGER:\n self.wait_cycles = self.time_to_cycles(temp)\n elif self.trigger_mode in [\n TriggerMode.EXTERNAL_TRIGGER,\n TriggerMode.RECEIVE_TRIGGER,\n ]:\n self.wait_cycles = self.time_to_cycles(\n temp - self.latency + self.trigger_delay\n )\n len_f = len(self.readout_frequencies)\n len_a = len(self.readout_amplitudes)\n len_p = len(self.phase_shifts)\n if len_a < len_f:\n self.readout_amplitudes += [1] * (len_f - len_a)\n if len_a > len_f:\n self.readout_amplitudes = self.readout_amplitudes[:len_f]\n if len_p < len_f:\n self.phase_shifts += [0] * (len_f - len_p)\n if len_p > len_f:\n self.phase_shifts = self.phase_shifts[:len_f]\n\n\n@attr.s\nclass PulsedSpectroscopySequence(Sequence):\n \"\"\"Predefined Sequence for Pulsed Spectroscopy.\n\n This sequence plays a rectangular pulse of duration `pulse_length`\n (in seconds). When this sequence is configured, the AWG output\n modulation of the *UHFQA* is enabled and the two output channels are\n modulated with the *sine* and *cosine* of the internal oscillator.\n The oscillators frequency can be set with the *Parameter*\n `uhfqa.nodetree.osc.freq`.\n\n In this sequence the sine generators are enabled, so the oscillator\n runs continouosly. Therefore, it is required that the oscillator\n phase is set to zero with the `resetOscPhase` instruction before\n playing the pulse. Therefore, `reset_phase` parameter is set to\n `True`.\n\n Similarly, the *integration mode* of the *UHFQA* is set to\n *Spectroscopy* to demodulate the input signals with the *sine* and\n *cosine* of the same internal oscillator. Note that when modulating\n the AWG output, the value for *integration time* has to be set to at\n least as long as the *pulse duration* of the modulated pulse.\n\n\n >>> awg.set_sequence_params(\n >>> sequence_type=\"Pulsed Spectroscopy\",\n >>> trigger_mode=\"Receive Trigger\",\n >>> pulse_length=5e-6,\n >>> pulse_amplitude=0.567,\n >>> )\n\n Attributes:\n pulse_length (double): The duration of the spectroscopy pulse in\n seconds.\n pulse_amplitude (double): The amplitude of the generated\n rectangular pulse.\n\n \"\"\"\n\n pulse_length = attr.ib(default=2e-6, validator=is_greater_equal(0))\n pulse_amplitude = attr.ib(default=1)\n pulse_samples = attr.ib(default=3600, validator=is_greater_equal(0))\n wait_samples_updated = attr.ib(default=0, validator=is_greater_equal(0))\n dead_samples_updated = attr.ib(default=0, validator=is_greater_equal(0))\n\n def write_sequence(self):\n # Call the method from parent class `Sequence` and then\n # overwrite it\n super().write_sequence()\n # Update the sequence type information in the header\n self.sequence = SequenceCommand.replace_sequence_type(\n self.sequence, SequenceType.PULSED_SPEC\n )\n self.sequence += SequenceCommand.inline_comment(\"Waveform definitions\")\n # Define a square pulse of specified length\n self.sequence += SequenceCommand.init_ones(\n self.pulse_amplitude, self.pulse_samples\n )\n self.sequence += SequenceCommand.inline_comment(\"Trigger commands\")\n # Wait for external trigger (depends on the trigger mode)\n self.sequence += self.trigger_cmd_wait\n # Compensate for trigger latency differences\n self.sequence += self.trigger_cmd_latency\n self.sequence += SequenceCommand.new_line()\n self.sequence += SequenceCommand.inline_comment(\"Start main sequence\")\n # Start repeat loop in sequencer\n self.sequence += SequenceCommand.repeat(self.repetitions)\n # Play zeros to wait before playing the waveform (depends on\n # period, dead_time and alignment setting).\n self.sequence += SequenceCommand.tab() + SequenceCommand.play_zero(\n self.wait_samples_updated, self.target\n )\n # Reset oscillator phase\n self.sequence += SequenceCommand.tab() + self.osc_cmd_reset\n # Play the waveforms\n self.sequence += SequenceCommand.tab() + SequenceCommand.play_wave()\n # Trigger quantum analyzer\n self.sequence += SequenceCommand.tab() + self.readout_cmd_trigger\n # Play zeros to wait until end of period.\n self.sequence += SequenceCommand.tab() + SequenceCommand.play_zero(\n self.dead_samples_updated, self.target\n )\n # Finish repeat loop\n self.sequence += SequenceCommand.close_bracket()\n\n def update_params(self):\n # Phase of the modulation oscillator must be reset to 0 in this\n # sequence type, overwriting user preference\n self.reset_phase = True\n # Call the parent function to update all parameters including\n # the ones that depend on the `reset_phase` option. Note that\n # the parent class should not change `reset_phase` setting.\n super().update_params()\n # Convert pulse length to number of samples. Use floor division\n # operator `//` and round down to greatest multiple of 8.\n self.pulse_samples = self.time_to_samples(self.pulse_length) // 8 * 8\n # Update the number of samples to wait before and after playing\n # the pulse according to the alignment and pulse length.\n if self.alignment == Alignment.END_WITH_TRIGGER:\n self.wait_samples_updated = self.wait_samples - self.pulse_samples\n self.dead_samples_updated = self.dead_samples\n elif self.alignment == Alignment.START_WITH_TRIGGER:\n self.wait_samples_updated = self.wait_samples\n self.dead_samples_updated = self.dead_samples - self.pulse_samples\n\n def check_attributes(self):\n super().check_attributes()\n if self.alignment == Alignment.END_WITH_TRIGGER:\n if (\n self.period - self.dead_time + self.trigger_delay - self.pulse_length\n ) < 0:\n raise ValueError(\"Wait time cannot be shorter than pulse length!\")\n elif self.alignment == Alignment.START_WITH_TRIGGER:\n if (self.dead_time - self.trigger_delay - self.pulse_length) < 0:\n raise ValueError(\"Dead time cannot be shorter than pulse length!\")\n\n\n@attr.s\nclass CWSpectroscopySequence(Sequence):\n \"\"\"Predefined sequence for Continuous-Wave Spectroscopy.\n\n The sequence configures the direct output of the oscillator signal. There\n are no actual waveforms payed within the seuqence program, however, the data\n acquisition of the QA Results is triggered.\n\n \"\"\"\n\n def write_sequence(self):\n self.sequence = SequenceCommand.header_comment(sequence_type=\"CW Spectroscopy\")\n self.sequence += SequenceCommand.repeat(self.repetitions)\n self.sequence += self.trigger_cmd_1\n self.sequence += SequenceCommand.wait(self.wait_cycles)\n self.sequence += self.trigger_cmd_2\n self.sequence += SequenceCommand.readout_trigger()\n self.sequence += SequenceCommand.wait(self.dead_cycles)\n self.sequence += SequenceCommand.close_bracket()\n\n def update_params(self):\n super().update_params()\n if self.trigger_mode == TriggerMode.NONE:\n self.wait_cycles = self.time_to_cycles(self.period - self.dead_time)\n elif self.trigger_mode == TriggerMode.SEND_TRIGGER:\n self.wait_cycles = self.time_to_cycles(self.period - self.dead_time)\n elif self.trigger_mode in [\n TriggerMode.EXTERNAL_TRIGGER,\n TriggerMode.RECEIVE_TRIGGER,\n ]:\n self.wait_cycles = self.time_to_cycles(\n self.period - self.dead_time - self.latency + self.trigger_delay\n )\n\n\n@attr.s\nclass CustomSequence(Sequence):\n \"\"\"A *Custom Sequence* for compiling an existing `.seqC` program.\n\n The *Custom Sequence* allows the user to specify the file path to an\n existing `.seqC` program. It needs to be located in the folder\n *\".../Zurich Instruments/LabOne/WebServer/awg/src\"*.\n\n Additionally, the *Custom Sequence* gives the user the ability to define\n variable placeholders in their `.seqC` program. So parameter `custom_params`\n expects a list of values that replace placeholders in the program. The\n placeholders are specified as special in the format `\"$param{i}$\"` where `i`\n is the index of the value in the *custom_params* list.\n\n >>> awg.set_sequence_params(\n >>> sequence_type=\"Custom\",\n >>> path=\"...\\Zurich Instruments\\LabOne\\WebServer\\awg\\src\\myProgram.seqC\",\n >>> custom_params=[1000, 99, 1],\n >>> )\n\n If the specified *'myProgram.seqC'* sequence program has placeholders\n `\"$param0$\"`, `\"$param1$\"`, `\"$param2$\"`, they will be replaced by `\"1000\"`,\n `\"99\"`, `\"1\"`.\n\n Attributes:\n path (str): The file path to a preexisting `.seqC` program.\n custom_params (list): A list of parameter values to replace placeholders\n in the program.\n\n \"\"\"\n\n path = attr.ib(default=\"\")\n program = attr.ib(default=\"\")\n custom_params = attr.ib(default=[])\n\n def write_sequence(self):\n self.sequence = SequenceCommand.header_comment(sequence_type=\"Custom\")\n self.sequence += f\"// from file: {self.path}\\n\\n\"\n self.sequence += self.program\n\n def update_params(self):\n super().update_params()\n if self.path:\n self.program = Path(self.path).read_text()\n for i, p in enumerate(self.custom_params):\n self.program = self.program.replace(f\"$param{i+1}$\", str(p))\n\n def check_attributes(self):\n if self.path:\n p = Path(self.path)\n if p.suffix != \".seqc\":\n raise ValueError(\"Specified file is not a .seqc file!\")\n","sub_path":"src/zhinst/toolkit/helpers/sequences.py","file_name":"sequences.py","file_ext":"py","file_size_in_byte":52365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"115376026","text":"import urllib.request\nimport urllib.parse\nfrom html.parser import HTMLParser\nimport os.path\nimport re\nimport sys\n\ndef printSafeEncoding(s):\n\tprint(str(s).encode(sys.stdout.encoding, \"replace\").decode(sys.stdout.encoding))\n\nclass MainPageParser(HTMLParser):\n\tinsideLetters = False\n\tletterLinks = []\n\n\tdef handle_starttag(self, tag, attr):\n\t\tif (tag == 'div' and ('id', 'letters') in attr): self.insideLetters = True\n\t\telif (self.insideLetters and tag == 'a'):\n\t\t\tattrDict = dict(attr)\n\t\t\tif ('href' in attrDict):\n\t\t\t\tself.letterLinks.append(attrDict['href'])\n\n\tdef handle_endtag(self, tag):\n\t\tif (tag == 'div'): self.insideLetters = False\n\nclass SingleLetterPageParser(HTMLParser):\n\tinsideRanges = False\n\trangesLinks = []\n\n\tdef handle_starttag(self, tag, attr):\n\t\tif (tag == 'div' and ('id', 'ranges') in attr): self.insideRanges = True\n\t\telif (self.insideRanges and tag == 'option'):\n\t\t\tattrDict = dict(attr)\n\t\t\tif ('value' in attrDict):\n\t\t\t\tself.rangesLinks.append(attrDict['value'])\n\n\tdef handle_endtag(self, tag):\n\t\tif (tag == 'div'): self.insideRanges = False\n\n\nclass WordListParser(HTMLParser):\n\tinsideDiv = False\n\twords = []\n\n\tdef handle_starttag(self, tag, attr):\n\t\t#printSafeEncoding(tag)\n\t\t#printSafeEncoding(attr)\n\n\t\t#if (tag == 'div'): print(attr)\n\n\t\tif (tag == 'div' and ('id', 'lexems') in attr):\n\t\t\tself.insideDiv = True\n\t\telif (self.insideDiv and tag == 'a'):\n\t\t\tattrDict = dict(attr)\n\t\t\tif ('name' in attrDict):\n\t\t\t\tself.words.append(attrDict['name'])\n\n\tdef handle_endtag(self, tag):\n\t\tif (tag == 'div'): self.insideDiv = False\n\n\nmainPageParser = MainPageParser()\nletterParser = SingleLetterPageParser()\nwordParser = WordListParser()\n\nif os.path.isfile(\"recnik-makinfo.txt\") == False:\n\turlParts = urllib.parse.urlparse(\"http://makedonski.info/\")\n\turlParts = urlParts._replace(path = urllib.parse.quote(urlParts.path) )\n\turl = urllib.parse.urlunparse(urlParts)\n\theaders={\"User-Agent\":\"Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11\"}\n\trequest = urllib.request.Request(url, headers=headers)\n\tresponse = urllib.request.urlopen(request)\n\tdata = response.read()\n\thtmltext = data.decode('utf-8')\n\n\tmainPageParser.feed(htmltext)\n\n\n\tfor l in mainPageParser.letterLinks:\n\t\tprintSafeEncoding(l)\n\t\turlParts = urlParts._replace(path = urllib.parse.quote(l) )\n\t\turl = urllib.parse.urlunparse(urlParts)\n\t\trequest = urllib.request.Request(url, headers=headers)\n\t\tresponse = urllib.request.urlopen(request)\n\t\tdata = response.read()\n\t\thtmltext = data.decode('utf-8')\n\t\tletterParser.feed(htmltext)\n\n\n\n\tfor l in letterParser.rangesLinks:\n\t\tprintSafeEncoding(l)\n\t\turlParts = urlParts._replace(path = urllib.parse.quote(l) )\n\t\turl = urllib.parse.urlunparse(urlParts)\n\t\trequest = urllib.request.Request(url, headers=headers)\n\t\tresponse = urllib.request.urlopen(request)\n\t\tdata = response.read()\n\t\thtmltext = data.decode('utf-8')\n\t\twordParser.feed(htmltext)\n\n\toutfile = open(\"recnik-makinfo.txt\", \"w\", encoding=\"utf-8\")\n\toutfile.writelines([\"%s\\n\" % x for x in wordParser.words])\n\toutfile.close()\nelse:\n\tinfile = open(\"recnik-makinfo.txt\", \"r\", encoding=\"utf-8\")\n\twordParser.words = infile.read().splitlines()\n\tinfile.close()\n\n\n#from operator import itemgetter\n#from itertools import groupby\n#wordParser.words.sort()\n#words2 = list(map(itemgetter(0), groupby(wordParser.words)))\nwords = sorted(set(wordParser.words))\ndel wordParser.words[:]\nwordGroups = {re.search(\"(?<=/).+$\", x).group() for x in words }\n\nwordGroupMap = {\n# https://mk.wikipedia.org/wiki/Морфологија_на_македонскиот_јазик\n#менливи\n#именки\n'м'\t\t\t: 'М',\n'ж'\t\t\t: 'Ж',\n'ср'\t\t: 'С',\n'само мн'\t: 'О',\n'име'\t\t: 'И', #неменливи\n\n#придавки\n'прид'\t\t: 'П',\n\n#броеви\n'бр'\t\t: 'Б',\n\n#заменки\n'зам'\t\t: 'З',\n\n#глаголи\n'св. и несв': 'ВН',\n'св'\t\t: 'В',\n'несв'\t\t: 'Н',\n\n#неменливи\n#прилози\n'прил'\t\t: 'Л',\n\n#предлози\n'предл'\t\t: 'Д',\n\n#сврзници\n'сврз'\t\t: 'Р',\n\n#честички\n'чест'\t\t: 'Ч',\n\n#извици\n'изв'\t\t: 'Ц',\n\n#модални зборови\n'мод'\t\t: 'А',\n\n#останати, веројатно сите се неменливи\n#скратеници, најчесто на именки и придавки, но може и друго, пример итн.\n'скр'\t\t: 'К',\n#префикси\n'преф'\t\t: 'Ф',\n#суфикси\n'суф'\t\t: 'У',\n#сложенки, исто најчесто се именки\n'сло'\t\t: 'Е'\n}\nprint(len(wordGroups), len(set(wordGroupMap.values())))\nprint(wordGroups)\n\nhist = {}\ndicList = []\nsporniList = []\n\nfor x in words:\n\txx = x.split('/')\n\n\tif (re.search(\"\\W\", xx[0])): #golemo W значи не е буква\n\t\t# овде би одело некое справување со интерпункциски знаци уште за .dic\n\t\t# за почеток сите зборови со интерпункциски знак нема да одат во mk.dic.\n\t\t# со нив ќе се справиме подоцна\n\n\t\t# Oбичен апостроф да се смени во тој на лево, поубав и според правопис. иницијално има 2-3 збора со едниот и 2-3 со другиот.\n\t\t#Сите нека бидат со ист\n\t\txx[0] = xx[0].replace(\"'\", \"’\")\n\n\t\t# додаи апостров ако почнува на р и нема самогласки\n\t\t#if (re.match(\"^р[^аеиоу]+\", xx[0])):\n\t\t#\txx[0] = \"’\"+ xx[0]\n\n\t\tsporniList.append(xx[0] + '/' + wordGroupMap[xx[1]] + '\\n')\n\telse:\n\t\tdicList.append(xx[0] + '/' + wordGroupMap[xx[1]] + '\\n')\n\t\tfor c in xx[0].lower():\n\t\t\tif (c in hist): hist[c] += 1\n\t\t\telse: hist[c] = 1\n\ndicfile = open(\"mk.dic\", \"w\", encoding=\"utf-8\",buffering=True)\ndicfile.write(str(len(dicList)) + '\\n')\ndicfile.writelines(dicList)\ndicfile.close()\n\nspornifile = open(\"mk-sporni.dic\", \"w\", encoding=\"utf-8\",buffering=True)\nspornifile.write(str(len(sporniList)) + '\\n')\nspornifile.writelines(sporniList)\nspornifile.close()\n\nhistSorted = sorted(hist.items(), key=lambda x: (x[1],x[0]), reverse=True)\nprintSafeEncoding(histSorted)\nletters = \"абвгдѓежзѕијклљмнњопрстќуфхцчџш\"\nlettersSorted = \"\"\npunctSorted = \"\"\nfor c in histSorted:\n\tc = c[0]\n\tif c in letters: lettersSorted+=c\n\telse: punctSorted += c\n\nlettersSorted += lettersSorted.upper() + punctSorted\nprintSafeEncoding(lettersSorted)\n\nafffilename = \"mk.aff.tmp\"\nif os.path.isfile(\"mk.aff\") == False:\n\tafffilename = \"mk.aff\"\nafffile = open(afffilename, \"w\", encoding=\"utf-8\",buffering=True)\nafffile.write(\"SET UTF-8\\n\")\nafffile.write(\"TRY \" + lettersSorted + '\\n')\nafffile.write(\"FLAG UTF-8\\n\")\nafffile.close()\n","sub_path":"src/makinfo.py","file_name":"makinfo.py","file_ext":"py","file_size_in_byte":6683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"165362811","text":"import argparse\nimport os\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport PIL.Image\nimport spacy\n\nfrom keras.applications.vgg16 import VGG16, preprocess_input\nfrom keras.layers.core import Dense, Dropout, Reshape\nfrom keras.layers.merge import concatenate\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import Input, Model\nfrom keras.preprocessing import image\nfrom sklearn.externals import joblib\n\n# demo code and data came from https://github.com/iamaaditya/VQA_Demo\nlabel_encoder_file = \"./data/FULL_labelencoder_trainval.pkl\"\nVQA_weights_file = \"./data/VQA_MODEL_WEIGHTS.hdf5\"\nlength_max_questions = 30\nlength_vgg_features = 4096\nlength_spacy_features = 300\n\n\ndef get_image_features(img_path, vgg16_model_full):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n features = vgg16_model_full.predict(x)\n model_extract_features = Model(\n input=vgg16_model_full.input, output=vgg16_model_full.get_layer(\"fc2\").output\n )\n fc2_features = model_extract_features.predict(x)\n fc2_features = fc2_features.reshape((1, length_vgg_features))\n return fc2_features\n\n\ndef get_question_features(question):\n \"\"\"Given a question, returns the time series vector with each word token\n transformed into a 300 dimension representation calculated using Glove.\"\"\"\n\n word_embeddings = spacy.load(\"en_core_web_md\")\n tokens = word_embeddings(question)\n n_tokens = len(tokens)\n if len(tokens) > length_max_questions:\n n_tokens = length_max_questions\n question_tensor = np.zeros((1, length_max_questions, 300))\n for j in range(n_tokens):\n question_tensor[0, j, :] = tokens[j].vector\n return question_tensor\n\n\nimg_file_name = \"./data/girl.jpg\"\nimg_0 = PIL.Image.open(img_file_name)\nmodel = VGG16(\n weights=\"./data/vgg16_weights_tf_dim_ordering_tf_kernels.h5\", include_top=True\n)\nimage_features = get_image_features(img_file_name, model)\nprint(image_features.shape)\n\nquestion = \"Who is in this picture?\"\nlanguage_features = get_question_features(question)\nprint(language_features.shape)\n\n\ndef build_combine_model(\n num_of_lstm=3,\n num_of_lstm_hidden_units=512,\n num_of_dense_layer=3,\n num_of_hidden_units=1024,\n activation_func=\"tanh\",\n dropout=0.5,\n):\n # input image\n input_image = Input(shape=(length_vgg_features,), name=\"input_image\")\n model_image = Reshape((length_vgg_features,), input_shape=(length_vgg_features,),)(\n input_image\n )\n\n # input language\n input_language = Input(\n shape=(length_max_questions, length_spacy_features), name=\"input_language\"\n )\n model_language = LSTM(\n num_of_lstm_hidden_units, return_sequences=True, name=\"lstm_1\",\n )(input_language)\n model_language = LSTM(\n num_of_lstm_hidden_units, return_sequences=True, name=\"lstm_2\",\n )(model_language)\n model_language = LSTM(\n num_of_lstm_hidden_units, return_sequences=False, name=\"lstm_3\",\n )(model_language)\n\n # concatenate, dense, dropout\n model = concatenate([model_image, model_language])\n for _ in range(num_of_dense_layer):\n model = Dense(num_of_hidden_units, kernel_initializer=\"uniform\")(model)\n model = Dropout(dropout)(model)\n\n model = Dense(1000, activation=\"softmax\")(model)\n model = Model(inputs=[input_image, input_language], outputs=model)\n return model\n\n\ncombined_model = build_combine_model()\nprint(combined_model.summary())\ncombined_model.load_weights(VQA_weights_file)\ncombined_model.compile(loss=\"categorical_crossentropy\", optimizer=\"rmsprop\")\n\ny_output = combined_model.predict([image_features, language_features])\nfor label in reversed(np.argsort(y_output)[0, -5:]):\n print(str(round(y_output[0, label] * 100, 2)).zfill(5))\n\n# print label name from label encoder will fail because of sklearn version issue\nlabel_encoder = joblib.load(label_encoder_file)\n","sub_path":"visual_qa.py","file_name":"visual_qa.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"635909139","text":"import numpy as np\n\n\ndef n_squared_complexity(l1, l2) -> list:\n out = []\n big, small = (l1, l2) if len(l1) > len(l2) else (l2, l1)\n for e1 in small:\n for e2 in big:\n if (e1 == e2):\n out.append(e1)\n return out\n\n\ndef n_complexity(l1, l2) -> list:\n dict = {}\n big, small = (l1, l2) if len(l1) > len(l2) else (l2, l1)\n for e1 in big:\n dict[e1] = e1\n return [e for e in small if (dict.__contains__(e))]\n\n\nx = 1000\na = list(np.arange(x))\nb = list(np.random.randint(x, size=x))\nv = n_squared_complexity(a, b)\nv1 = n_complexity(a, b)\nprint(\"Common elements: {}: {}\".format(len(v), v))\nprint(\"Common elements: {}: {}\".format(len(v1), v1))\n","sub_path":"algo/common_lists.py","file_name":"common_lists.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"498035066","text":"from test_framework import generic_test\n\n\ndef matrix_in_spiral_order(square_matrix):\n SHIFT = ((0, 1), (1, 0), (0, -1), (-1, 0))\n direction = x = y = 0\n n = len(square_matrix)\n spiral_order = []\n\n for _ in range(n ** 2):\n spiral_order.append(square_matrix[x][y])\n square_matrix[x][y] = None\n next_x, next_y = x + SHIFT[direction][0], y + SHIFT[direction][1]\n\n if (next_x not in range(n)\n or next_y not in range(n)\n or square_matrix[next_x][next_y] is None):\n direction = (direction + 1) % 4\n next_x, next_y = x + SHIFT[direction][0], y + SHIFT[direction][1]\n\n x, y = next_x, next_y\n\n return spiral_order\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main(\"05-18-spiral_ordering_segments.py\",\n \"spiral_ordering_segments.tsv\",\n matrix_in_spiral_order))\n","sub_path":"Problems/EPI/epi_judge_python/05-18-spiral_ordering_segments.py","file_name":"05-18-spiral_ordering_segments.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"19134503","text":"__author__ = 'lmxiang'\r\n\r\nimport unittest\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom GUI.v2.lib.common_elements import *\r\nimport GUI.v2.site2cloud.s2c_diag as pages\r\nfrom tests.utils.test_utils import avx_logger\r\n\r\nclass S2C_Diag_Tests(unittest.TestCase):\r\n \"\"\"\r\n Run diagnostics on an existing Site2Cloud connection including:\r\n - show logs\r\n - show security association details\r\n - show service status\r\n - show configuration\r\n - show security policy details\r\n - restart service\r\n \"\"\"\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n\r\n cls.logger = avx_logger()\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"--disable-extensions\")\r\n cls.driver = webdriver.Chrome(chrome_options=chrome_options)\r\n cls.driver.maximize_window()\r\n\r\n def test_s2c_diag(self):\r\n action_list = [\"Show logs\", \"Show security association details\", \"Show service status\",\r\n \"Show configuration\", \"Show security policy details\", \"Restart service\"]\r\n\r\n s2c_view = pages.S2C_Diag(self.driver, login_required=True)\r\n time.sleep(10)\r\n\r\n self.logger.info(\"Navigating to Site2Cloud\")\r\n s2c_view.navigate_to_s2c()\r\n\r\n time.sleep(15)\r\n self.logger.info(\"Check if Site2Cloud is present in the current view area...\")\r\n assert s2c_view.match_view_title(),\"Site2Cloud view is not present\"\r\n\r\n time.sleep(5)\r\n self.logger.info(\"Select Site2Cloud Diagnostics\")\r\n assert s2c_view.select_tab(), \"Site2Cloud diagnostics is not present\"\r\n\r\n # Run all diagnostics functions for the following connection\r\n for action in action_list:\r\n s2c_action = {\"vpc_id\":\"vpc-54372631\",\r\n \"conn\":\"ProdConn\",\r\n \"gateway\":\"ProdGW\",\r\n \"action\":action\r\n }\r\n time.sleep(5)\r\n self.logger.info(\"Fill in fields for Site2Cloud diagnostics\")\r\n assert s2c_view.fill_conn_fields(**s2c_action),\"Fail to fill in Site2Cloud diagnostics fields\"\r\n s2c_view.ok_button = \"ok\"\r\n self.logger.info(\"Copy out the diagnostics results\")\r\n output = s2c_view.diag_result\r\n assert output, \"Fail to get diagnostics results\"\r\n self.logger.info(\"Diagnostics results: \" + output)\r\n\r\n @classmethod\r\n def tearDownClass(cls):\r\n cls.driver.close()\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()","sub_path":"autotest/frontend/site2cloud/s2c_conn_diag.py","file_name":"s2c_conn_diag.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"114774755","text":"# -*- coding:utf8\r\n\r\n# Python3\r\n# zhuny@kaist.ac.kr\r\n\r\n\r\nimport time\r\n\r\n\r\nclass Timer():\r\n def __init__(self,name):\r\n self.name = name\r\n self.start = 0.0\r\n\r\n def __enter__(self):\r\n self.start = time.time()\r\n\r\n def __exit__(self,ty,va,tr):\r\n print (self.name,time.time()-self.start)\r\n\r\n\r\nclass CodeJam():\r\n def __init__(self,input_data):\r\n output_data = self.get_output(input_data)\r\n with open(input_data) as f_in:\r\n with open(output_data,\"w\") as f_out:\r\n self.f_in = f_in\r\n self.f_out = f_out\r\n n = int(f_in.readline())\r\n with Timer(\"CodeJam Time :\"):\r\n for i in range(1,n+1):\r\n self.print_it(\"Case #{0}: \",i)\r\n self.do()\r\n\r\n def print_it(self,string=\"\",*argv,**kwargs):\r\n string = string.format(*argv,**kwargs)\r\n print (string, end=\"\")\r\n self.f_out.write(string)\r\n\r\n def print_ln(self,string=\"\",*argv,**kwargs):\r\n self.print_it(string + \"\\n\", *argv, **kwargs)\r\n\r\n def get_output(self,name):\r\n name_s = name.rsplit(\".\")\r\n name_s[-1] = \"out\"\r\n return \".\".join(name_s)\r\n\r\n def get_line(self):\r\n return self.f_in.readline().strip()\r\n\r\n def get_line_func(self,func):\r\n return func(self.get_line())\r\n\r\n def get_number(self):\r\n return self.get_line_func(int)\r\n\r\n def get_numbers(self):\r\n return self.get_line_func(lambda l:list(map(int,l.split())))\r\n\r\n def do(self):\r\n self.print_it(\"\\n\")\r\n\r\n\r\n\r\n\r\nclass CodeJamTest(CodeJam):\r\n def norm(self,l,state):\r\n while l and l[-1] == state:\r\n l.pop()\r\n\r\n def do(self):\r\n l = self.get_line()\r\n l = [(c==\"+\") for c in l]\r\n count = 0\r\n\r\n self.norm(l,True)\r\n state = True\r\n\r\n while l:\r\n first_count = 1\r\n for i,val in enumerate(l):\r\n if val == state:\r\n l[i] = not state\r\n first_count += 1\r\n else:\r\n break\r\n\r\n count += min(first_count,2)\r\n\r\n l.reverse()\r\n state = not state\r\n self.norm(l,state)\r\n\r\n self.print_ln(\"{0}\",count)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n\r\n if len(sys.argv) < 2:\r\n print (__file__, \"{input file}\")\r\n\r\n else:\r\n CodeJamTest(sys.argv[1])\r\n\r\n","sub_path":"solutions_5634697451274240_0/Python/zhuny/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"307656283","text":"import csv\n\ndef read_from_file(filename):\n data = []\n with open(filename, 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n headers = reader.next()\n for row in reader:\n item = row\n data.append(item)\n return data\n","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"29095859","text":"from scipy.sparse import csr_matrix\nfrom sklearn.linear_model import LogisticRegression\nfrom sdca_sparse import SDCA\nfrom math import log\nfrom csv import reader\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport time\n\ntrain = 'train.txt'\n\nprev_time = time.time()\ny = []\ny_test = []\n\nrows = []\ncols = []\ndata = []\nrows_test = []\ncols_test = []\ndata_test = []\n\ntest_count = 0\ntrain_count = 0\n\n# log loss function for loss of prediction values\ndef logloss(predictions, y):\n total = 0.0\n for i, yi in enumerate(y):\n p = max(min(predictions[i], 1. - 10e-12), 10e-12)\n total -= log(p) if yi == 1. else log(1. - p)\n return total / len(y)\n\n# read in data and hash in sparse format\nfor idx, row in enumerate(reader(open(train), delimiter='\\t')):\n if idx % 100 == 1: # test point\n y_test.append(1. if row[0] == '1' else 0.)\n del row[0]\n \n # append bias term\n rows_test.append(test_count)\n cols_test.append(13+26)\n data_test.append(1)\n\n # read in as sparse format, integerizing\n for i in range(13 + 26):\n if row[i] == '':\n rows_test.append(test_count)\n cols_test.append(i)\n data_test.append(-1)\n elif i < 13:\n val = int(row[i])\n if val != 0:\n rows_test.append(test_count)\n cols_test.append(i)\n data_test.append(val)\n else:\n # categorical, so convert from hex to int\n val = int(row[i], 16)\n rows_test.append(test_count)\n cols_test.append(i)\n data_test.append(val)\n\n test_count += 1\n elif idx % 10 == 0: # normal training point\n y.append(1. if row[0] == '1' else 0.)\n del row[0]\n\n # append bias term\n rows.append(train_count)\n cols.append(13+26)\n data.append(1)\n \n # read in as sparse format, integerizing\n for i in range(13 + 26):\n if row[i] == '':\n rows.append(train_count)\n cols.append(i)\n data.append(-1)\n elif i < 13:\n val = int(row[i])\n if val != 0:\n rows.append(train_count)\n cols.append(i)\n data.append(val)\n else:\n # categorical, so convert from hex to int\n val = int(row[i], 16)\n rows.append(train_count)\n cols.append(i)\n data.append(val)\n\n train_count += 1\n \nprint('data finished reading')\ncurr_time = time.time() - prev_time\nprint('time for read data step = %s seconds' % curr_time)\nprev_time = time.time()\nprint()\n\ntrain_len = len(data)\ntest_len = len(data_test)\n\n# find counts of values for each categorical feature in test+train data (+1 for bias feature)\ncounts = [{} for i in range(13+26+1)]\nfor i in range(train_len):\n col = cols[i]\n if col >= 13:\n val = data[i]\n if not val in counts[col]:\n counts[col][val] = 0\n counts[col][val] += 1\n\nfor i in range(test_len):\n col = cols_test[i]\n if col >= 13:\n val = data_test[i]\n if not val in counts[col]:\n counts[col][val] = 0\n counts[col][val] += 1\n\nprint('data finished counting')\ncurr_time = time.time() - prev_time\nprint('time for counting = %s seconds' % curr_time)\nprint()\n\nbuckets = [\n 0.0, 0.49, 0.99, 1.74, 2.865, 4.5525, 7.08375, 10.880625, 16.5759375,\n 25.11890625, 37.933359375, 57.1550390625, 85.98755859375, 129.236337890625,\n 194.1095068359375, 291.41926025390626, 437.3838903808594, 656.3308355712891,\n 984.7512533569336, 1477.3818800354004, 2216.3278200531004, 3324.7467300796507,\n 4987.375095119476, 7481.317642679214, 11222.231464018821, 16833.602196028234,\n 25250.65829404235, 37876.24244106352, 56814.61866159528, 85222.18299239293,\n 127833.5294885894, 191750.54923288408, 287626.0788493261, 431439.3732739892,\n 647159.3149109838, 970739.2273664756, 1456109.0960497134, 2184163.8990745707,\n 3276246.103611856, 4914369.410417783, 7371554.370626675]\nincrement = len(buckets) + 1\ncategorical_base = increment * 13\n\n# remove cols from counts if count is < 15, translate values to IDs\ncurr_id = categorical_base\n\ndef increment_and_return():\n global curr_id\n curr_id += 1\n return curr_id - 1\n\ncounts = [{val : increment_and_return() for val, count in a.items() if count >= 15} for a in counts]\n\ndef find_bucket(num):\n i = 0;\n while i < len(buckets) and num >= buckets[i]:\n i += 1\n return i;\n\ndef preprocess(rows, cols, data):\n for i in range(len(data)):\n col = cols[i]\n val = data[i]\n\n # bucketize integer values\n if col < 13:\n bucket = find_bucket(val)\n cols[i] = bucket + increment*col\n data[i] = 1\n\n # ignore rare categorical features and convert to ID, including bias term\n else:\n # default 0, out of bound col index if destined to be removed due to low count\n entry = 0\n index = curr_id\n if val in counts[col]:\n index = counts[col][val]\n entry = 1\n\n cols[i] = index\n data[i] = entry\n\npreprocess(rows, cols, data)\npreprocess(rows_test, cols_test, data_test)\n\n# make sure all vals are now either 1 or 0. 0's will be removed after turning into csr_matrix\nfor i in range(train_len):\n assert(data[i] == 1 or (data[i] == 0 and cols[i] == curr_id))\n\nX = csr_matrix((data, (rows, cols)))\nX.eliminate_zeros()\ndata, rows, cols = None, None, None\n\nX_test = csr_matrix((data_test, (rows_test, cols_test)))\nX_test.eliminate_zeros()\ndata_test, rows_test, cols_test = None, None, None\n\ny = np.array(y)\ny_test = np.array(y_test)\n\nprint('data finished preprocessing')\ncurr_time = time.time() - prev_time\nprint('time for preprocess data step = %s seconds' % curr_time)\nprev_time = time.time()\nprint('')\n\n# Get sklearn's value\nlr = LogisticRegression(solver = 'liblinear').fit(X,y)\n\nprint('sklearn log loss = ' + str(logloss(lr.predict_proba(X_test)[:,1].T,y_test)))\n\nprint('data finished sklearn lr')\ncurr_time = time.time() - prev_time\nprint('time for sklearn = %s seconds' % curr_time)\nprev_time = time.time()\nprint('')\n\n# Start training step\n\na_0 = csr_matrix([0.0 for i in y])\nlamb = 0.00001\n\nepochs_per_data = 1\nsdca = SDCA('log')\nepochs = []\nloss = []\nloss_test = []\n\ntrain_start = time.time()\nprev_time = train_start\ntimes = []\n\n# run SDCA training for some number of epochs\nfor i in range(1,21):\n w, a_0 = sdca.train(X, y, a_0, epochs_per_data, lamb = lamb)\n\n epochs.append(i * epochs_per_data)\n \n print('finished epoch ' + str(i * epochs_per_data))\n prev_time = time.time()\n since_start = prev_time - train_start\n print('time since start of training = %s seconds' % since_start)\n times.append(since_start)\n\n pvals = sdca.getpvals(X)\n loss.append(logloss(pvals, y))\n\n pvals_test = sdca.getpvals(X_test)\n loss_test.append(logloss(pvals_test, y_test))\n\n print('training log loss = ' + str(loss[-1]))\n print('test log loss = ' + str(loss_test[-1]))\n print('')\n\nfig = plt.figure(1)\nplt.plot(epochs, loss)\nplt.xlabel('num epochs')\nplt.ylabel('log loss')\nplt.title('training log loss on criteo vs epochs')\nplt.savefig('results/training_loss_epochs.png',dpi=300)\nplt.close(fig)\n\nfig = plt.figure(2)\nplt.plot(times, loss)\nplt.xlabel('time since training start')\nplt.ylabel('log loss')\nplt.title('training log loss on criteo vs time')\nplt.savefig('results/training_loss_time.png',dpi=300)\nplt.close(fig)\n\nfig = plt.figure(3)\nplt.plot(epochs, loss_test)\nplt.xlabel('num epochs')\nplt.ylabel('log loss')\nplt.title('test log loss on criteo vs epochs')\nplt.savefig('results/test_loss_epochs.png',dpi=300)\nplt.close(fig)\n\nfig = plt.figure(4)\nplt.plot(times, loss_test)\nplt.xlabel('time since training start')\nplt.ylabel('log loss')\nplt.title('test log loss on criteo vs time')\nplt.savefig('results/test_loss_time.png',dpi=300)\nplt.close(fig)\n\n# TODO: sklearn\n","sub_path":"criteo/criteo_sdca_google_preprocessing.py","file_name":"criteo_sdca_google_preprocessing.py","file_ext":"py","file_size_in_byte":8149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"39696873","text":"# 826. Most Profit Assigning Work\n\n# We have jobs: difficulty[i] is the difficulty of the \n# ith job, and profit[i] is the profit of the ith job. \n\n# Now we have some workers. worker[i] is the ability of \n# the ith worker, which means that this worker can only \n# complete a job with difficulty at most worker[i]. \n\n# Every worker can be assigned at most one job, but one \n# job can be completed multiple times.\n\n# For example, if 3 people attempt the same job that pays \n# $1, then the total profit will be $3. If a worker \n# cannot complete any job, his profit is $0.\n\n# What is the most profit we can make?\n\n# Input: difficulty = [2,4,6,8,10], \n# profit = [10,20,30,40,50], worker = [4,5,6,7]\n# Output: 100 \n# Explanation: Workers are assigned jobs of difficulty \n# [4,4,6,6] and they get profit of [20,20,30,30] seperately.\n\nclass Solution(object):\n def maxProfitAssignment(self, difficulty, profit, worker):\n jobs = zip(difficulty, profit)\n jobs.sort()\n i = 0\n prof = 0\n ans = 0\n for skills in sorted(worker):\n while i < len(worker) and skills >= jobs[i][0]:\n prof = max(prof, jobs[i][1])\n i+=1\n ans += prof\n return ans\n \n\n difficulty = [2,4,6,8,10]\n profit = [10,20,30,40,50]\n worker = [4,5,6,7]\n print(maxProfitAssignment(object, difficulty, profit, worker))","sub_path":"Most Profit Assigning Work/Most_Profit.py","file_name":"Most_Profit.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"476134453","text":"# Global Setting for the Database\n# PageSize, StartRID, etc..\n\n# Table values\nPAGE_SIZE = 4096\nPAGE_RECORD_SIZE = 8\nBASE_PAGE_COUNT = 16\nMETA_COLUMN_COUNT = 5\nENTRIES_PER_PAGE = int(PAGE_SIZE / PAGE_RECORD_SIZE)\nENTRIES_PER_PAGE_RANGE = ENTRIES_PER_PAGE * BASE_PAGE_COUNT\nSPECIAL_NULL_VALUE = pow(2, 64) - 1\n\n# Column Indices\nINDIRECTION = 0 # int\nRID_COLUMN = 1 # int\nBASE_RID_COLUMN = 2\nTIMESTAMP_COLUMN = 3 # datetime\nSCHEMA_ENCODING_COLUMN = 4 # string\nKEY_COLUMN = META_COLUMN_COUNT\n\n# Bufferpool\nBUFFERPOOL_FRAME_COUNT = 75 # 1 frame == 1 base page\n\n# Merge\nMERGE_COUNT_TRIGGER = 2048\n\n# QueCC\nBATCH_SIZE = 100\nPLANNER_THREAD_COUNT = 2\nQUEUES_PER_GROUP = 5\nPRIORITY_QUEUE_COUNT = PLANNER_THREAD_COUNT * QUEUES_PER_GROUP\n\n#QUERIES\nINSERT = 'insert'\nSELECT = 'select'\nUPDATE = 'update'\nDELETE = 'delete'\nSUM = 'sum'","sub_path":"lstore/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}