content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from django.db import models
from jsonfield import JSONField
from taggit.managers import TaggableManager
from slugify import slugify
#import editarea
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
33918,
3245,
1330,
19449,
15878,
198,
6738,
7621,
18300,
13,
805,
10321,
1330,
309,
9460,
540,
13511,
198,
6738,
31065,
1958,
1330,
31065,
1958,
198,
2,
11748,
4370,
20337,
628,
628,
... | 3.756098 | 41 |
# -*- coding: utf-8 -*-
import scrapy
import re
from locations.items import GeojsonPointItem
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
15881,
88,
198,
11748,
302,
198,
198,
6738,
7064,
13,
23814,
1330,
2269,
13210,
1559,
12727,
7449,
628
] | 2.878788 | 33 |
"""Implement an AST Traversal used by tests to introspect the output of the compiler"""
from supergsl.core.backend import BreadthFirstNodeFilteredPass
class TestOutputAstPass(BreadthFirstNodeFilteredPass):
"""AST Traversal used by Integration tests to introspect the output of the compiler."""
name = 'test'
def before_pass(self, ast):
"""Initialize the SBOL Document."""
pass
| [
37811,
3546,
26908,
281,
29273,
4759,
690,
282,
973,
416,
5254,
284,
18951,
4443,
262,
5072,
286,
262,
17050,
37811,
198,
198,
6738,
2208,
70,
6649,
13,
7295,
13,
1891,
437,
1330,
28731,
400,
5962,
19667,
11928,
4400,
14478,
628,
198,
... | 3.195313 | 128 |
# The Leginon software is Copyright 2004
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
import threading
import wx
import wx.lib.filebrowsebutton as filebrowse
import leginon.gui.wx.Events
import leginon.gui.wx.TargetPanel
import leginon.gui.wx.ImagePanelTools
import leginon.gui.wx.Settings
import leginon.gui.wx.TargetFinder
import leginon.gui.wx.ToolBar
import os.path
try:
import mlabraw as pymat
except:
pymat = None
if __name__ == '__main__':
app = App(0)
app.MainLoop()
| [
2,
383,
1004,
1655,
261,
3788,
318,
15069,
5472,
198,
2,
383,
1446,
14602,
82,
4992,
5136,
11,
4689,
449,
33011,
11,
7257,
198,
2,
1114,
2846,
286,
262,
5964,
4381,
198,
2,
766,
2638,
1378,
6277,
13,
1416,
14602,
82,
13,
15532,
14... | 2.806763 | 207 |
"""
PASSENGERS
"""
numPassengers = 4229
passenger_arriving = (
(4, 12, 12, 4, 5, 0, 7, 12, 10, 7, 3, 0), # 0
(8, 5, 6, 5, 2, 0, 5, 13, 7, 5, 3, 0), # 1
(3, 11, 7, 7, 1, 0, 7, 7, 12, 7, 0, 0), # 2
(4, 13, 11, 2, 1, 0, 8, 13, 10, 8, 3, 0), # 3
(8, 7, 13, 6, 5, 0, 11, 17, 9, 8, 2, 0), # 4
(4, 11, 8, 2, 2, 0, 7, 6, 5, 4, 1, 0), # 5
(5, 11, 6, 5, 2, 0, 11, 10, 7, 6, 0, 0), # 6
(2, 12, 7, 5, 5, 0, 10, 13, 7, 9, 2, 0), # 7
(5, 9, 20, 8, 2, 0, 5, 8, 8, 3, 2, 0), # 8
(6, 10, 7, 7, 2, 0, 8, 12, 7, 7, 2, 0), # 9
(7, 10, 16, 6, 4, 0, 13, 10, 5, 7, 2, 0), # 10
(5, 11, 6, 5, 3, 0, 8, 14, 7, 4, 4, 0), # 11
(4, 6, 13, 6, 1, 0, 7, 16, 15, 7, 4, 0), # 12
(3, 11, 10, 4, 2, 0, 14, 20, 8, 6, 2, 0), # 13
(5, 7, 9, 2, 3, 0, 9, 12, 6, 5, 4, 0), # 14
(4, 11, 5, 6, 1, 0, 8, 10, 11, 5, 3, 0), # 15
(9, 14, 12, 5, 3, 0, 10, 14, 12, 5, 4, 0), # 16
(6, 19, 13, 3, 3, 0, 6, 6, 6, 6, 6, 0), # 17
(6, 15, 8, 2, 8, 0, 9, 18, 8, 2, 2, 0), # 18
(8, 12, 9, 3, 2, 0, 1, 10, 6, 3, 1, 0), # 19
(5, 13, 10, 1, 3, 0, 12, 10, 9, 4, 2, 0), # 20
(6, 16, 12, 5, 3, 0, 9, 17, 2, 5, 5, 0), # 21
(11, 19, 10, 3, 2, 0, 14, 8, 7, 3, 6, 0), # 22
(4, 10, 10, 8, 2, 0, 14, 9, 6, 7, 4, 0), # 23
(7, 11, 9, 6, 2, 0, 12, 11, 6, 5, 1, 0), # 24
(3, 11, 5, 1, 2, 0, 7, 7, 7, 10, 1, 0), # 25
(2, 18, 9, 7, 4, 0, 10, 11, 4, 11, 3, 0), # 26
(5, 12, 9, 7, 2, 0, 8, 11, 8, 3, 4, 0), # 27
(2, 14, 5, 1, 5, 0, 8, 10, 15, 5, 0, 0), # 28
(7, 9, 9, 6, 3, 0, 9, 12, 5, 4, 4, 0), # 29
(6, 9, 4, 8, 1, 0, 7, 6, 5, 7, 5, 0), # 30
(6, 17, 7, 5, 3, 0, 15, 10, 5, 6, 3, 0), # 31
(5, 13, 10, 3, 4, 0, 6, 12, 14, 8, 3, 0), # 32
(6, 6, 5, 2, 3, 0, 5, 8, 10, 9, 4, 0), # 33
(9, 17, 17, 7, 2, 0, 5, 10, 9, 4, 2, 0), # 34
(4, 19, 9, 6, 1, 0, 6, 15, 13, 10, 2, 0), # 35
(6, 10, 11, 9, 0, 0, 12, 17, 8, 2, 3, 0), # 36
(12, 10, 9, 3, 4, 0, 12, 12, 13, 15, 0, 0), # 37
(5, 12, 8, 6, 5, 0, 9, 15, 10, 6, 3, 0), # 38
(8, 11, 12, 3, 1, 0, 11, 20, 6, 4, 3, 0), # 39
(11, 11, 13, 6, 4, 0, 6, 8, 6, 2, 2, 0), # 40
(6, 10, 12, 4, 5, 0, 5, 7, 9, 7, 3, 0), # 41
(3, 11, 8, 5, 2, 0, 10, 10, 10, 4, 6, 0), # 42
(10, 9, 5, 6, 2, 0, 5, 9, 8, 6, 3, 0), # 43
(6, 18, 6, 5, 1, 0, 8, 10, 8, 5, 2, 0), # 44
(4, 11, 13, 1, 4, 0, 9, 6, 8, 11, 1, 0), # 45
(6, 11, 11, 2, 2, 0, 6, 13, 8, 11, 1, 0), # 46
(10, 10, 9, 8, 4, 0, 5, 16, 9, 3, 4, 0), # 47
(9, 12, 8, 2, 6, 0, 7, 16, 8, 6, 6, 0), # 48
(5, 18, 7, 5, 2, 0, 6, 6, 6, 2, 2, 0), # 49
(6, 14, 12, 4, 4, 0, 6, 11, 10, 10, 5, 0), # 50
(8, 11, 7, 6, 4, 0, 6, 8, 8, 6, 0, 0), # 51
(4, 7, 5, 2, 3, 0, 11, 15, 7, 6, 4, 0), # 52
(3, 11, 5, 7, 1, 0, 10, 13, 7, 5, 4, 0), # 53
(8, 14, 8, 7, 3, 0, 10, 16, 11, 8, 2, 0), # 54
(3, 13, 11, 5, 2, 0, 11, 12, 2, 12, 2, 0), # 55
(4, 13, 12, 6, 3, 0, 7, 12, 9, 8, 1, 0), # 56
(6, 13, 16, 7, 1, 0, 8, 8, 10, 2, 6, 0), # 57
(5, 13, 12, 1, 3, 0, 10, 9, 4, 6, 3, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(4.769372805092186, 12.233629261363635, 14.389624839331619, 11.405298913043477, 12.857451923076923, 8.562228260869567), # 0
(4.81413961808604, 12.369674877683082, 14.46734796754499, 11.46881589673913, 12.953819711538461, 8.559309850543478), # 1
(4.8583952589991215, 12.503702525252525, 14.54322622107969, 11.530934782608696, 13.048153846153847, 8.556302173913043), # 2
(4.902102161984196, 12.635567578125, 14.617204169344474, 11.591602581521737, 13.14036778846154, 8.553205638586958), # 3
(4.94522276119403, 12.765125410353535, 14.689226381748071, 11.650766304347826, 13.230375, 8.550020652173911), # 4
(4.987719490781387, 12.892231395991162, 14.759237427699228, 11.708372961956522, 13.318088942307691, 8.546747622282608), # 5
(5.029554784899035, 13.01674090909091, 14.827181876606687, 11.764369565217393, 13.403423076923078, 8.54338695652174), # 6
(5.0706910776997365, 13.138509323705808, 14.893004297879177, 11.818703125, 13.486290865384618, 8.5399390625), # 7
(5.1110908033362605, 13.257392013888888, 14.956649260925452, 11.871320652173912, 13.56660576923077, 8.536404347826087), # 8
(5.1507163959613695, 13.373244353693181, 15.018061335154243, 11.922169157608696, 13.644281249999999, 8.532783220108696), # 9
(5.1895302897278315, 13.485921717171717, 15.077185089974291, 11.971195652173915, 13.719230769230771, 8.529076086956522), # 10
(5.227494918788412, 13.595279478377526, 15.133965094794343, 12.018347146739131, 13.791367788461539, 8.525283355978262), # 11
(5.2645727172958745, 13.701173011363636, 15.188345919023137, 12.063570652173912, 13.860605769230768, 8.521405434782608), # 12
(5.3007261194029835, 13.803457690183082, 15.240272132069407, 12.106813179347826, 13.926858173076925, 8.51744273097826), # 13
(5.335917559262511, 13.90198888888889, 15.289688303341899, 12.148021739130433, 13.99003846153846, 8.513395652173912), # 14
(5.370109471027217, 13.996621981534089, 15.336539002249355, 12.187143342391304, 14.050060096153846, 8.509264605978261), # 15
(5.403264288849868, 14.087212342171718, 15.380768798200515, 12.224124999999999, 14.10683653846154, 8.50505), # 16
(5.4353444468832315, 14.173615344854797, 15.422322260604112, 12.258913722826087, 14.16028125, 8.500752241847827), # 17
(5.46631237928007, 14.255686363636363, 15.461143958868895, 12.291456521739132, 14.210307692307696, 8.496371739130435), # 18
(5.496130520193152, 14.333280772569443, 15.4971784624036, 12.321700407608695, 14.256829326923079, 8.491908899456522), # 19
(5.524761303775241, 14.40625394570707, 15.530370340616965, 12.349592391304348, 14.299759615384616, 8.487364130434782), # 20
(5.552167164179106, 14.47446125710227, 15.56066416291774, 12.375079483695652, 14.339012019230768, 8.482737839673913), # 21
(5.578310535557506, 14.537758080808082, 15.588004498714653, 12.398108695652175, 14.374499999999998, 8.47803043478261), # 22
(5.603153852063214, 14.595999790877526, 15.612335917416454, 12.418627038043478, 14.40613701923077, 8.473242323369567), # 23
(5.62665954784899, 14.649041761363636, 15.633602988431875, 12.43658152173913, 14.433836538461538, 8.468373913043479), # 24
(5.648790057067603, 14.696739366319445, 15.651750281169667, 12.451919157608696, 14.457512019230768, 8.463425611413044), # 25
(5.669507813871817, 14.738947979797977, 15.66672236503856, 12.464586956521739, 14.477076923076922, 8.458397826086957), # 26
(5.688775252414398, 14.77552297585227, 15.6784638094473, 12.474531929347828, 14.492444711538463, 8.453290964673915), # 27
(5.7065548068481124, 14.806319728535353, 15.68691918380463, 12.481701086956523, 14.503528846153845, 8.448105434782608), # 28
(5.722808911325724, 14.831193611900254, 15.69203305751928, 12.486041440217392, 14.510242788461538, 8.44284164402174), # 29
(5.7375, 14.85, 15.69375, 12.4875, 14.512500000000001, 8.4375), # 30
(5.751246651214834, 14.865621839488634, 15.692462907608693, 12.487236580882353, 14.511678590425532, 8.430077267616193), # 31
(5.7646965153452685, 14.881037215909092, 15.68863804347826, 12.486451470588234, 14.509231914893617, 8.418644565217393), # 32
(5.777855634590792, 14.896244211647728, 15.682330027173915, 12.485152389705883, 14.50518630319149, 8.403313830584706), # 33
(5.790730051150895, 14.91124090909091, 15.67359347826087, 12.483347058823531, 14.499568085106382, 8.38419700149925), # 34
(5.803325807225064, 14.926025390624996, 15.662483016304348, 12.481043198529411, 14.492403590425532, 8.361406015742128), # 35
(5.815648945012788, 14.940595738636366, 15.649053260869564, 12.478248529411767, 14.48371914893617, 8.335052811094453), # 36
(5.8277055067135555, 14.954950035511365, 15.63335883152174, 12.474970772058823, 14.47354109042553, 8.305249325337332), # 37
(5.839501534526853, 14.969086363636364, 15.615454347826088, 12.471217647058824, 14.461895744680852, 8.272107496251873), # 38
(5.851043070652174, 14.983002805397728, 15.595394429347825, 12.466996875000001, 14.44880944148936, 8.23573926161919), # 39
(5.862336157289003, 14.99669744318182, 15.573233695652176, 12.462316176470589, 14.434308510638296, 8.196256559220389), # 40
(5.873386836636828, 15.010168359374997, 15.549026766304348, 12.457183272058824, 14.418419281914893, 8.153771326836583), # 41
(5.88420115089514, 15.023413636363639, 15.522828260869566, 12.451605882352942, 14.401168085106384, 8.108395502248875), # 42
(5.894785142263428, 15.03643135653409, 15.494692798913043, 12.445591727941178, 14.38258125, 8.060241023238381), # 43
(5.905144852941176, 15.049219602272727, 15.464675, 12.439148529411764, 14.36268510638298, 8.009419827586207), # 44
(5.915286325127877, 15.061776455965909, 15.432829483695656, 12.43228400735294, 14.341505984042554, 7.956043853073464), # 45
(5.925215601023019, 15.074100000000003, 15.39921086956522, 12.425005882352941, 14.319070212765958, 7.90022503748126), # 46
(5.934938722826087, 15.086188316761364, 15.363873777173913, 12.417321874999999, 14.295404122340427, 7.842075318590705), # 47
(5.944461732736574, 15.098039488636365, 15.326872826086957, 12.409239705882353, 14.27053404255319, 7.7817066341829095), # 48
(5.953790672953963, 15.10965159801136, 15.288262635869566, 12.400767095588236, 14.24448630319149, 7.71923092203898), # 49
(5.96293158567775, 15.121022727272724, 15.248097826086958, 12.391911764705883, 14.217287234042553, 7.65476011994003), # 50
(5.971890513107417, 15.132150958806818, 15.206433016304347, 12.38268143382353, 14.188963164893616, 7.588406165667167), # 51
(5.980673497442456, 15.143034375, 15.163322826086954, 12.373083823529411, 14.159540425531915, 7.5202809970015), # 52
(5.989286580882353, 15.153671058238638, 15.118821875, 12.363126654411765, 14.129045345744682, 7.450496551724138), # 53
(5.9977358056266, 15.164059090909088, 15.072984782608694, 12.352817647058824, 14.09750425531915, 7.379164767616192), # 54
(6.00602721387468, 15.174196555397728, 15.02586616847826, 12.342164522058825, 14.064943484042553, 7.306397582458771), # 55
(6.014166847826087, 15.184081534090907, 14.977520652173913, 12.331175, 14.031389361702129, 7.232306934032984), # 56
(6.022160749680308, 15.193712109375003, 14.92800285326087, 12.319856801470587, 13.996868218085105, 7.15700476011994), # 57
(6.030014961636829, 15.203086363636363, 14.877367391304347, 12.308217647058825, 13.961406382978723, 7.0806029985007495), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(4, 12, 12, 4, 5, 0, 7, 12, 10, 7, 3, 0), # 0
(12, 17, 18, 9, 7, 0, 12, 25, 17, 12, 6, 0), # 1
(15, 28, 25, 16, 8, 0, 19, 32, 29, 19, 6, 0), # 2
(19, 41, 36, 18, 9, 0, 27, 45, 39, 27, 9, 0), # 3
(27, 48, 49, 24, 14, 0, 38, 62, 48, 35, 11, 0), # 4
(31, 59, 57, 26, 16, 0, 45, 68, 53, 39, 12, 0), # 5
(36, 70, 63, 31, 18, 0, 56, 78, 60, 45, 12, 0), # 6
(38, 82, 70, 36, 23, 0, 66, 91, 67, 54, 14, 0), # 7
(43, 91, 90, 44, 25, 0, 71, 99, 75, 57, 16, 0), # 8
(49, 101, 97, 51, 27, 0, 79, 111, 82, 64, 18, 0), # 9
(56, 111, 113, 57, 31, 0, 92, 121, 87, 71, 20, 0), # 10
(61, 122, 119, 62, 34, 0, 100, 135, 94, 75, 24, 0), # 11
(65, 128, 132, 68, 35, 0, 107, 151, 109, 82, 28, 0), # 12
(68, 139, 142, 72, 37, 0, 121, 171, 117, 88, 30, 0), # 13
(73, 146, 151, 74, 40, 0, 130, 183, 123, 93, 34, 0), # 14
(77, 157, 156, 80, 41, 0, 138, 193, 134, 98, 37, 0), # 15
(86, 171, 168, 85, 44, 0, 148, 207, 146, 103, 41, 0), # 16
(92, 190, 181, 88, 47, 0, 154, 213, 152, 109, 47, 0), # 17
(98, 205, 189, 90, 55, 0, 163, 231, 160, 111, 49, 0), # 18
(106, 217, 198, 93, 57, 0, 164, 241, 166, 114, 50, 0), # 19
(111, 230, 208, 94, 60, 0, 176, 251, 175, 118, 52, 0), # 20
(117, 246, 220, 99, 63, 0, 185, 268, 177, 123, 57, 0), # 21
(128, 265, 230, 102, 65, 0, 199, 276, 184, 126, 63, 0), # 22
(132, 275, 240, 110, 67, 0, 213, 285, 190, 133, 67, 0), # 23
(139, 286, 249, 116, 69, 0, 225, 296, 196, 138, 68, 0), # 24
(142, 297, 254, 117, 71, 0, 232, 303, 203, 148, 69, 0), # 25
(144, 315, 263, 124, 75, 0, 242, 314, 207, 159, 72, 0), # 26
(149, 327, 272, 131, 77, 0, 250, 325, 215, 162, 76, 0), # 27
(151, 341, 277, 132, 82, 0, 258, 335, 230, 167, 76, 0), # 28
(158, 350, 286, 138, 85, 0, 267, 347, 235, 171, 80, 0), # 29
(164, 359, 290, 146, 86, 0, 274, 353, 240, 178, 85, 0), # 30
(170, 376, 297, 151, 89, 0, 289, 363, 245, 184, 88, 0), # 31
(175, 389, 307, 154, 93, 0, 295, 375, 259, 192, 91, 0), # 32
(181, 395, 312, 156, 96, 0, 300, 383, 269, 201, 95, 0), # 33
(190, 412, 329, 163, 98, 0, 305, 393, 278, 205, 97, 0), # 34
(194, 431, 338, 169, 99, 0, 311, 408, 291, 215, 99, 0), # 35
(200, 441, 349, 178, 99, 0, 323, 425, 299, 217, 102, 0), # 36
(212, 451, 358, 181, 103, 0, 335, 437, 312, 232, 102, 0), # 37
(217, 463, 366, 187, 108, 0, 344, 452, 322, 238, 105, 0), # 38
(225, 474, 378, 190, 109, 0, 355, 472, 328, 242, 108, 0), # 39
(236, 485, 391, 196, 113, 0, 361, 480, 334, 244, 110, 0), # 40
(242, 495, 403, 200, 118, 0, 366, 487, 343, 251, 113, 0), # 41
(245, 506, 411, 205, 120, 0, 376, 497, 353, 255, 119, 0), # 42
(255, 515, 416, 211, 122, 0, 381, 506, 361, 261, 122, 0), # 43
(261, 533, 422, 216, 123, 0, 389, 516, 369, 266, 124, 0), # 44
(265, 544, 435, 217, 127, 0, 398, 522, 377, 277, 125, 0), # 45
(271, 555, 446, 219, 129, 0, 404, 535, 385, 288, 126, 0), # 46
(281, 565, 455, 227, 133, 0, 409, 551, 394, 291, 130, 0), # 47
(290, 577, 463, 229, 139, 0, 416, 567, 402, 297, 136, 0), # 48
(295, 595, 470, 234, 141, 0, 422, 573, 408, 299, 138, 0), # 49
(301, 609, 482, 238, 145, 0, 428, 584, 418, 309, 143, 0), # 50
(309, 620, 489, 244, 149, 0, 434, 592, 426, 315, 143, 0), # 51
(313, 627, 494, 246, 152, 0, 445, 607, 433, 321, 147, 0), # 52
(316, 638, 499, 253, 153, 0, 455, 620, 440, 326, 151, 0), # 53
(324, 652, 507, 260, 156, 0, 465, 636, 451, 334, 153, 0), # 54
(327, 665, 518, 265, 158, 0, 476, 648, 453, 346, 155, 0), # 55
(331, 678, 530, 271, 161, 0, 483, 660, 462, 354, 156, 0), # 56
(337, 691, 546, 278, 162, 0, 491, 668, 472, 356, 162, 0), # 57
(342, 704, 558, 279, 165, 0, 501, 677, 476, 362, 165, 0), # 58
(342, 704, 558, 279, 165, 0, 501, 677, 476, 362, 165, 0), # 59
)
passenger_arriving_rate = (
(4.769372805092186, 9.786903409090908, 8.63377490359897, 4.56211956521739, 2.5714903846153843, 0.0, 8.562228260869567, 10.285961538461537, 6.843179347826086, 5.755849935732647, 2.446725852272727, 0.0), # 0
(4.81413961808604, 9.895739902146465, 8.680408780526994, 4.587526358695651, 2.5907639423076922, 0.0, 8.559309850543478, 10.363055769230769, 6.881289538043478, 5.786939187017995, 2.4739349755366162, 0.0), # 1
(4.8583952589991215, 10.00296202020202, 8.725935732647814, 4.612373913043478, 2.609630769230769, 0.0, 8.556302173913043, 10.438523076923076, 6.918560869565217, 5.817290488431875, 2.500740505050505, 0.0), # 2
(4.902102161984196, 10.1084540625, 8.770322501606683, 4.636641032608694, 2.628073557692308, 0.0, 8.553205638586958, 10.512294230769232, 6.954961548913042, 5.846881667737789, 2.527113515625, 0.0), # 3
(4.94522276119403, 10.212100328282828, 8.813535829048842, 4.66030652173913, 2.6460749999999997, 0.0, 8.550020652173911, 10.584299999999999, 6.990459782608696, 5.875690552699228, 2.553025082070707, 0.0), # 4
(4.987719490781387, 10.313785116792928, 8.855542456619537, 4.6833491847826085, 2.663617788461538, 0.0, 8.546747622282608, 10.654471153846153, 7.025023777173913, 5.90369497107969, 2.578446279198232, 0.0), # 5
(5.029554784899035, 10.413392727272727, 8.896309125964011, 4.705747826086957, 2.680684615384615, 0.0, 8.54338695652174, 10.72273846153846, 7.058621739130436, 5.930872750642674, 2.603348181818182, 0.0), # 6
(5.0706910776997365, 10.510807458964646, 8.935802578727506, 4.72748125, 2.697258173076923, 0.0, 8.5399390625, 10.789032692307693, 7.0912218750000005, 5.95720171915167, 2.6277018647411614, 0.0), # 7
(5.1110908033362605, 10.60591361111111, 8.97398955655527, 4.7485282608695645, 2.7133211538461537, 0.0, 8.536404347826087, 10.853284615384615, 7.122792391304347, 5.982659704370181, 2.6514784027777774, 0.0), # 8
(5.1507163959613695, 10.698595482954543, 9.010836801092546, 4.768867663043478, 2.7288562499999993, 0.0, 8.532783220108696, 10.915424999999997, 7.153301494565217, 6.007224534061697, 2.6746488707386358, 0.0), # 9
(5.1895302897278315, 10.788737373737373, 9.046311053984574, 4.7884782608695655, 2.743846153846154, 0.0, 8.529076086956522, 10.975384615384616, 7.182717391304348, 6.030874035989716, 2.697184343434343, 0.0), # 10
(5.227494918788412, 10.87622358270202, 9.080379056876605, 4.807338858695652, 2.7582735576923074, 0.0, 8.525283355978262, 11.03309423076923, 7.2110082880434785, 6.053586037917737, 2.719055895675505, 0.0), # 11
(5.2645727172958745, 10.960938409090907, 9.113007551413881, 4.825428260869565, 2.7721211538461534, 0.0, 8.521405434782608, 11.088484615384614, 7.238142391304347, 6.0753383676092545, 2.740234602272727, 0.0), # 12
(5.3007261194029835, 11.042766152146465, 9.144163279241644, 4.8427252717391305, 2.7853716346153847, 0.0, 8.51744273097826, 11.141486538461539, 7.264087907608696, 6.096108852827762, 2.760691538036616, 0.0), # 13
(5.335917559262511, 11.121591111111112, 9.173812982005138, 4.859208695652173, 2.7980076923076918, 0.0, 8.513395652173912, 11.192030769230767, 7.288813043478259, 6.115875321336759, 2.780397777777778, 0.0), # 14
(5.370109471027217, 11.19729758522727, 9.201923401349612, 4.874857336956521, 2.810012019230769, 0.0, 8.509264605978261, 11.240048076923076, 7.312286005434782, 6.134615600899742, 2.7993243963068175, 0.0), # 15
(5.403264288849868, 11.269769873737372, 9.228461278920308, 4.88965, 2.8213673076923076, 0.0, 8.50505, 11.28546923076923, 7.334474999999999, 6.152307519280206, 2.817442468434343, 0.0), # 16
(5.4353444468832315, 11.338892275883836, 9.253393356362468, 4.903565489130434, 2.83205625, 0.0, 8.500752241847827, 11.328225, 7.3553482336956515, 6.168928904241644, 2.834723068970959, 0.0), # 17
(5.46631237928007, 11.40454909090909, 9.276686375321336, 4.916582608695652, 2.842061538461539, 0.0, 8.496371739130435, 11.368246153846156, 7.374873913043479, 6.184457583547558, 2.8511372727272724, 0.0), # 18
(5.496130520193152, 11.466624618055553, 9.298307077442159, 4.928680163043477, 2.8513658653846155, 0.0, 8.491908899456522, 11.405463461538462, 7.393020244565217, 6.198871384961439, 2.866656154513888, 0.0), # 19
(5.524761303775241, 11.525003156565655, 9.318222204370178, 4.939836956521739, 2.859951923076923, 0.0, 8.487364130434782, 11.439807692307692, 7.409755434782609, 6.212148136246785, 2.8812507891414136, 0.0), # 20
(5.552167164179106, 11.579569005681815, 9.336398497750643, 4.95003179347826, 2.8678024038461536, 0.0, 8.482737839673913, 11.471209615384614, 7.425047690217391, 6.224265665167096, 2.894892251420454, 0.0), # 21
(5.578310535557506, 11.630206464646465, 9.352802699228791, 4.95924347826087, 2.8748999999999993, 0.0, 8.47803043478261, 11.499599999999997, 7.438865217391305, 6.235201799485861, 2.907551616161616, 0.0), # 22
(5.603153852063214, 11.67679983270202, 9.367401550449872, 4.967450815217391, 2.8812274038461534, 0.0, 8.473242323369567, 11.524909615384614, 7.451176222826087, 6.244934366966581, 2.919199958175505, 0.0), # 23
(5.62665954784899, 11.719233409090908, 9.380161793059125, 4.974632608695652, 2.8867673076923075, 0.0, 8.468373913043479, 11.54706923076923, 7.461948913043478, 6.25344119537275, 2.929808352272727, 0.0), # 24
(5.648790057067603, 11.757391493055556, 9.391050168701799, 4.980767663043478, 2.8915024038461534, 0.0, 8.463425611413044, 11.566009615384614, 7.471151494565217, 6.260700112467866, 2.939347873263889, 0.0), # 25
(5.669507813871817, 11.79115838383838, 9.400033419023135, 4.985834782608695, 2.8954153846153843, 0.0, 8.458397826086957, 11.581661538461537, 7.478752173913043, 6.266688946015424, 2.947789595959595, 0.0), # 26
(5.688775252414398, 11.820418380681815, 9.40707828566838, 4.989812771739131, 2.8984889423076923, 0.0, 8.453290964673915, 11.593955769230769, 7.484719157608696, 6.271385523778919, 2.9551045951704538, 0.0), # 27
(5.7065548068481124, 11.84505578282828, 9.412151510282778, 4.992680434782609, 2.9007057692307687, 0.0, 8.448105434782608, 11.602823076923075, 7.489020652173913, 6.274767673521851, 2.96126394570707, 0.0), # 28
(5.722808911325724, 11.864954889520202, 9.415219834511568, 4.994416576086956, 2.902048557692307, 0.0, 8.44284164402174, 11.608194230769229, 7.491624864130435, 6.276813223007712, 2.9662387223800506, 0.0), # 29
(5.7375, 11.879999999999999, 9.41625, 4.995, 2.9025, 0.0, 8.4375, 11.61, 7.4925, 6.277499999999999, 2.9699999999999998, 0.0), # 30
(5.751246651214834, 11.892497471590906, 9.415477744565216, 4.994894632352941, 2.9023357180851064, 0.0, 8.430077267616193, 11.609342872340426, 7.492341948529411, 6.276985163043476, 2.9731243678977264, 0.0), # 31
(5.7646965153452685, 11.904829772727274, 9.413182826086956, 4.994580588235293, 2.901846382978723, 0.0, 8.418644565217393, 11.607385531914892, 7.49187088235294, 6.275455217391303, 2.9762074431818184, 0.0), # 32
(5.777855634590792, 11.916995369318181, 9.40939801630435, 4.994060955882353, 2.9010372606382977, 0.0, 8.403313830584706, 11.60414904255319, 7.491091433823529, 6.272932010869566, 2.9792488423295453, 0.0), # 33
(5.790730051150895, 11.928992727272727, 9.40415608695652, 4.993338823529412, 2.899913617021276, 0.0, 8.38419700149925, 11.599654468085104, 7.490008235294118, 6.269437391304347, 2.9822481818181816, 0.0), # 34
(5.803325807225064, 11.940820312499996, 9.39748980978261, 4.9924172794117645, 2.898480718085106, 0.0, 8.361406015742128, 11.593922872340425, 7.488625919117647, 6.264993206521739, 2.985205078124999, 0.0), # 35
(5.815648945012788, 11.952476590909091, 9.389431956521738, 4.9912994117647065, 2.896743829787234, 0.0, 8.335052811094453, 11.586975319148936, 7.486949117647059, 6.259621304347825, 2.988119147727273, 0.0), # 36
(5.8277055067135555, 11.96396002840909, 9.380015298913044, 4.989988308823529, 2.8947082180851056, 0.0, 8.305249325337332, 11.578832872340422, 7.484982463235293, 6.253343532608695, 2.9909900071022726, 0.0), # 37
(5.839501534526853, 11.97526909090909, 9.369272608695653, 4.988487058823529, 2.89237914893617, 0.0, 8.272107496251873, 11.56951659574468, 7.4827305882352935, 6.246181739130434, 2.9938172727272727, 0.0), # 38
(5.851043070652174, 11.986402244318182, 9.357236657608695, 4.98679875, 2.8897618882978717, 0.0, 8.23573926161919, 11.559047553191487, 7.480198125, 6.23815777173913, 2.9966005610795454, 0.0), # 39
(5.862336157289003, 11.997357954545455, 9.343940217391305, 4.984926470588235, 2.886861702127659, 0.0, 8.196256559220389, 11.547446808510635, 7.477389705882353, 6.22929347826087, 2.999339488636364, 0.0), # 40
(5.873386836636828, 12.008134687499997, 9.329416059782607, 4.982873308823529, 2.8836838563829783, 0.0, 8.153771326836583, 11.534735425531913, 7.474309963235294, 6.219610706521738, 3.002033671874999, 0.0), # 41
(5.88420115089514, 12.01873090909091, 9.31369695652174, 4.980642352941176, 2.880233617021277, 0.0, 8.108395502248875, 11.520934468085107, 7.4709635294117644, 6.209131304347826, 3.0046827272727277, 0.0), # 42
(5.894785142263428, 12.02914508522727, 9.296815679347825, 4.978236691176471, 2.8765162499999994, 0.0, 8.060241023238381, 11.506064999999998, 7.467355036764706, 6.1978771195652165, 3.0072862713068176, 0.0), # 43
(5.905144852941176, 12.03937568181818, 9.278805, 4.975659411764705, 2.8725370212765955, 0.0, 8.009419827586207, 11.490148085106382, 7.4634891176470575, 6.1858699999999995, 3.009843920454545, 0.0), # 44
(5.915286325127877, 12.049421164772726, 9.259697690217394, 4.972913602941176, 2.8683011968085106, 0.0, 7.956043853073464, 11.473204787234042, 7.459370404411764, 6.1731317934782615, 3.0123552911931815, 0.0), # 45
(5.925215601023019, 12.059280000000001, 9.239526521739132, 4.970002352941176, 2.8638140425531913, 0.0, 7.90022503748126, 11.455256170212765, 7.455003529411765, 6.159684347826087, 3.0148200000000003, 0.0), # 46
(5.934938722826087, 12.06895065340909, 9.218324266304347, 4.966928749999999, 2.859080824468085, 0.0, 7.842075318590705, 11.43632329787234, 7.450393124999999, 6.145549510869564, 3.0172376633522724, 0.0), # 47
(5.944461732736574, 12.07843159090909, 9.196123695652174, 4.9636958823529405, 2.854106808510638, 0.0, 7.7817066341829095, 11.416427234042551, 7.445543823529412, 6.130749130434782, 3.0196078977272727, 0.0), # 48
(5.953790672953963, 12.087721278409088, 9.17295758152174, 4.960306838235294, 2.8488972606382976, 0.0, 7.71923092203898, 11.39558904255319, 7.4404602573529415, 6.115305054347826, 3.021930319602272, 0.0), # 49
(5.96293158567775, 12.096818181818177, 9.148858695652175, 4.956764705882353, 2.8434574468085105, 0.0, 7.65476011994003, 11.373829787234042, 7.43514705882353, 6.099239130434783, 3.0242045454545443, 0.0), # 50
(5.971890513107417, 12.105720767045453, 9.123859809782608, 4.953072573529411, 2.837792632978723, 0.0, 7.588406165667167, 11.351170531914892, 7.429608860294118, 6.082573206521738, 3.026430191761363, 0.0), # 51
(5.980673497442456, 12.114427499999998, 9.097993695652173, 4.949233529411764, 2.8319080851063827, 0.0, 7.5202809970015, 11.32763234042553, 7.4238502941176465, 6.065329130434781, 3.0286068749999995, 0.0), # 52
(5.989286580882353, 12.122936846590909, 9.071293125, 4.945250661764706, 2.8258090691489364, 0.0, 7.450496551724138, 11.303236276595745, 7.417875992647058, 6.04752875, 3.030734211647727, 0.0), # 53
(5.9977358056266, 12.13124727272727, 9.043790869565216, 4.941127058823529, 2.8195008510638297, 0.0, 7.379164767616192, 11.278003404255319, 7.411690588235294, 6.0291939130434775, 3.0328118181818176, 0.0), # 54
(6.00602721387468, 12.139357244318182, 9.015519701086955, 4.93686580882353, 2.8129886968085103, 0.0, 7.306397582458771, 11.251954787234041, 7.405298713235295, 6.010346467391304, 3.0348393110795455, 0.0), # 55
(6.014166847826087, 12.147265227272724, 8.986512391304348, 4.9324699999999995, 2.8062778723404254, 0.0, 7.232306934032984, 11.225111489361701, 7.398705, 5.991008260869565, 3.036816306818181, 0.0), # 56
(6.022160749680308, 12.154969687500001, 8.95680171195652, 4.927942720588234, 2.7993736436170207, 0.0, 7.15700476011994, 11.197494574468083, 7.391914080882352, 5.9712011413043475, 3.0387424218750003, 0.0), # 57
(6.030014961636829, 12.16246909090909, 8.926420434782608, 4.923287058823529, 2.792281276595744, 0.0, 7.0806029985007495, 11.169125106382976, 7.384930588235295, 5.950946956521738, 3.0406172727272724, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
66, # 1
)
| [
198,
37811,
198,
47924,
26808,
4877,
198,
37811,
198,
198,
22510,
14478,
9302,
796,
604,
23539,
198,
198,
6603,
6540,
62,
283,
380,
1075,
796,
357,
198,
197,
7,
19,
11,
1105,
11,
1105,
11,
604,
11,
642,
11,
657,
11,
767,
11,
1105,... | 2.096154 | 18,148 |
from __future__ import unicode_literals
from prompt_toolkit.contrib.regular_languages.compiler import compile
from .commands import get_commands_taking_locations
#: The compiled grammar for the Vim command line.
COMMAND_GRAMMAR = compile(r"""
# Allow leading colons and whitespace. (They are ignored.)
:*
\s*
(
# Substitute command
((?P<range_start>\d+)(,(?P<range_end>\d+))?)? (?P<command>s|substitute) \s* / (?P<search>[^/]*) ( / (?P<replace>[^/]*) (?P<flags> /(g)? )? )? |
# Commands accepting a location.
(?P<command>%(commands_taking_locations)s)(?P<force>!?) \s+ (?P<location>[^\s]+) |
# Commands accepting a buffer.
(?P<command>b|buffer)(?P<force>!?) \s+ (?P<buffer_name>[^\s]+) |
# Jump to line numbers.
(?P<go_to_line>\d+) |
# Set operation
(?P<command>set) \s+ (?P<set_option>[^\s=]+)
(=(?P<set_value>[^\s]+))? |
# Colorscheme command
(?P<command>colorscheme) \s+ (?P<colorscheme>[^\s]+) |
# Shell command
!(?P<shell_command>.*) |
# Any other normal command.
(?P<command>[^\s!]+)(?P<force>!?) |
# Accept the empty input as well. (Ignores everything.)
#(?P<command>colorscheme.+) (?P<colorscheme>[^\s]+) |
)
# Allow trailing space.
\s*
""" % {
'commands_taking_locations': '|'.join(get_commands_taking_locations()),
})
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
6152,
62,
25981,
15813,
13,
3642,
822,
13,
16338,
62,
75,
33213,
13,
5589,
5329,
1330,
17632,
198,
198,
6738,
764,
9503,
1746,
1330,
651,
62,
9503,
1746,
62,
... | 1.946716 | 807 |
# -*- coding: utf-8 -*-
# Wiggle Sort I
# 如果i是奇数,nums[i] >= nums[i - 1]
# 如果i是偶数,nums[i] <= nums[i - 1]
# 不满足上述条件交换就行了
test = Solution()
print test.wiggleSort([1, 5, 1, 1, 6, 4]) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
370,
24082,
33947,
314,
198,
2,
10263,
99,
224,
162,
252,
250,
72,
42468,
25001,
229,
46763,
108,
171,
120,
234,
77,
5700,
58,
72,
60,
18189,
997,
82,
58,
72,
5... | 1.335821 | 134 |
"""
:testcase_name module_call_2
:author Sriteja Kummita
:script_type Module
:description This program contains two functions 'add' and 'multiply'. There is a call to function 'add' from
the function 'multiply'.
"""
if __name__ == '__main__':
multiply(4, 4)
| [
37811,
198,
25,
9288,
7442,
62,
3672,
8265,
62,
13345,
62,
17,
198,
25,
9800,
311,
6525,
6592,
19162,
2781,
64,
198,
25,
12048,
62,
4906,
19937,
198,
25,
11213,
770,
1430,
4909,
734,
5499,
705,
2860,
6,
290,
705,
16680,
541,
306,
... | 3.022727 | 88 |
"""COMMAND : .cname"""
import asyncio
import time
from telethon.errors import FloodWaitError
from telethon.tl import functions
from userbot import ALIVE_NAME
from userbot.utils import lightning_cmd
DEL_TIME_OUT = 60
DEFAULTUSER = (
str(ALIVE_NAME) if ALIVE_NAME else "Set ALIVE_NAME in config vars in Heroku"
)
@borg.on(lightning_cmd(pattern="cname")) # pylint:disable=E0602
| [
37811,
9858,
44,
6981,
1058,
764,
66,
3672,
37811,
198,
198,
11748,
30351,
952,
198,
11748,
640,
198,
198,
6738,
5735,
400,
261,
13,
48277,
1330,
25588,
21321,
12331,
198,
6738,
5735,
400,
261,
13,
28781,
1330,
5499,
198,
198,
6738,
2... | 2.824818 | 137 |
import unittest
from unittest.mock import Mock, patch
import requests
from src.fuzzingtool.conn.requesters.requester import Requester
from src.fuzzingtool.objects.fuzz_word import FuzzWord
from src.fuzzingtool.utils.consts import (FUZZING_MARK, UNKNOWN_FUZZING, HTTP_METHOD_FUZZING,
PATH_FUZZING, DATA_FUZZING)
from src.fuzzingtool.exceptions.request_exceptions import RequestException
from src.fuzzingtool.utils.http_utils import get_host
from ...mock_utils.response_mock import ResponseMock
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
11,
8529,
198,
198,
11748,
7007,
198,
198,
6738,
12351,
13,
69,
4715,
278,
25981,
13,
37043,
13,
8897,
8586,
13,
8897,
7834,
1330,
9394,
7834,
198,
6738,
1235... | 2.637255 | 204 |
# -*- coding: utf-8 -*-
from model.group import Group
#def test_add_empty_group(app):
#app.session.login(username="admin",password="secret")
#app.open_home_page_group()
#old_groups = app.group.get_group_list()
#group = Group(name="", header="", footer="")
#app.group.init_creation()
#app.group.fill_form(group)
#app.group.submit_creation()
#app.group.returt_to_groups_page()
#new_groups = app.group.get_group_list()
#assert len(old_groups) + 1 == len(new_groups)
#old_groups.append(group)
#assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key =Group.id_or_max)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
2746,
13,
8094,
1330,
4912,
628,
198,
198,
2,
4299,
1332,
62,
2860,
62,
28920,
62,
8094,
7,
1324,
2599,
198,
220,
220,
220,
220,
220,
220,
220,
1303,
1324,
13,... | 2.218241 | 307 |
import os
from sklearn.preprocessing import LabelEncoder
from tensorflow_core.python.keras.layers import RepeatVector, BatchNormalization, Dropout
from tensorflow_core.python.keras.utils import to_categorical
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto(allow_soft_placement=False)
config.gpu_options.per_process_gpu_memory_fraction = 0.9
#config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
from random import random
from random import randint
from numpy import array
from numpy import zeros
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import TimeDistributed
from math import sin, cos, log10, sqrt, ceil,pow
from math import pi
from math import exp
from random import random
from random import randint
from random import uniform
from numpy import array
from matplotlib import pyplot
from numpy import zeros
from random import randint
from random import random
from matplotlib import pyplot
import numpy as np
tag= {0:'NailWashLeft',1:'NailWashRight',2:'ThumbFingureWash',3:'ForeFingureWash'}
inv_tag = {v: k for k, v in tag.items()}
# generate damped sine wave in [0,1]
# generate input and output pairs of damped sine waves
# X, y = generate_examples(20, 5, 5)
# for i in range(len(X)):
# pyplot.plot([x for x in X[i, :, 0]] + [x for x in y[i]],'-o')
# pyplot.show()
###########################################################################################
# generate the next frame in the sequence
# generate a sequence of frames of a dot moving across an image
# generate sequence of frames
size = 30
frames, right = build_frames(size)
# plot all feames
'''
f=pyplot.figure(figsize=(5,5))
for seq in range(4):
for i in range((size ) ):
# create a grayscale subplot for each frame
ax=f.add_subplot(1, (size +1) * 4 , (size +1) * seq +i +1)
ax.imshow(frames[(size ) * seq +i], cmap='Greys')
# turn of the scale to make it cleaer
#ax = pyplot.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# show the plot
pyplot.show()
pyplot.savefig('fig.png')
'''
f, ax = pyplot.subplots(2, (size +1) * 4 ,figsize=((size +1) * 4 , 20), sharey=True)
# make a little extra space between the subplots
f.subplots_adjust(hspace=0.5)
#ax[0, 0].set_title("Image A", fontsize=15)
for i in range((size +1) * 4):
ax[1, i].set_axis_off()
for row in range(0, 1):
for seq in range(4):
for i in range((size)):
ax[row, (size +1) * seq +i ].imshow(frames[(size) * seq + i], cmap='Greys')
ax[row, (size +1) * seq +i ].set_axis_off()
ax[row, (size+1) * seq +i+1].set_axis_off()
#pyplot.show()
#pyplot.savefig('fig.png')
# generate multiple sequences of frames and reshape for network input
# configure problem
size = 50
# define the model
'''model = Sequential()
model.add(TimeDistributed(Conv2D(2, (2, 2), activation='relu'), input_shape=(None, size, size, 1)))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(50))
model.add(Dense(size*4, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])'''
# define LSTM
model = Sequential()
model.add(TimeDistributed(Conv2D(16, (2, 2), activation='relu'), input_shape=(None, size, size, 1)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(Dropout(0.25))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(75))
#model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(RepeatVector(4))
model.add(LSTM(50, return_sequences=True))
#model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(TimeDistributed(Dense(4, activation= 'softmax' )))
model.compile(loss= 'categorical_crossentropy' , optimizer= 'adam' , metrics=[ 'accuracy' ])
print(model.summary())
from tensorflow.keras.utils import plot_model
plot_model(model, show_shapes = True, to_file='modelWH.png')
from ilab.utils import plot_segm_history
from tensorflow.keras.callbacks import ModelCheckpoint
import os.path
from os import path
if path.exists('lstm_model_vsalad33.h5'):
os.remove("lstm_model_vsalad33.h5")
model_filename = 'lstm_model_vsalad33.h5'
callback_checkpoint = ModelCheckpoint(
model_filename,
verbose=1,
monitor='val_loss',
save_best_only=True,
)
# fit model
for i in range(100):
print('begin gen')
X, y = generate_examples(size, 2000)
print('begin fit{}/{}'.format(i,10))
if path.exists('lstm_model_vsalad33.h5'):
model.load_weights('lstm_model_vsalad33.h5')
history = model.fit(X, y, batch_size=32, epochs=25,validation_split=0.25,shuffle=False,callbacks=[callback_checkpoint])
plot_segm_history(history, metrics=['loss', 'val_loss'], fileName1='loss33.png', fileName2='acc33.png')
# evaluate model
X, y = generate_examples(size, 500)
loss, acc = model.evaluate(X, y, verbose=0)
print('loss: %f, acc: %f' % (loss, acc * 100))
for i in range(10):
# prediction on new data
X, Y = generate_examples(size, 1)
yhat = model.predict_classes(X, verbose=0)
expected = [np.argmax(y, axis=1, out=None) for y in Y]
predicted = yhat
print('Expected: %s, Predicted: %s ' % (expected, predicted))
| [
11748,
28686,
198,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
36052,
27195,
12342,
198,
6738,
11192,
273,
11125,
62,
7295,
13,
29412,
13,
6122,
292,
13,
75,
6962,
1330,
30021,
38469,
11,
347,
963,
26447,
1634,
11,
14258,
448,
198,
... | 2.644465 | 2,132 |
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import sys
__version__ = "0.3b2"
VERSION = __version__.split(".")
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
834,
9641,
834,
796,
366,
15,
13,
18,
65,
17,
1,
... | 3.185185 | 54 |
from numpy.polynomial.polynomial import Polynomial
from functools import reduce
| [
6738,
299,
32152,
13,
35428,
26601,
498,
13,
35428,
26601,
498,
1330,
12280,
26601,
498,
198,
6738,
1257,
310,
10141,
1330,
4646,
628
] | 3.521739 | 23 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: PortEnumsProto.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='PortEnumsProto.proto',
package='net.device',
syntax='proto3',
serialized_pb=_b('\n\x14PortEnumsProto.proto\x12\nnet.device*b\n\rPortTypeProto\x12\n\n\x06\x43OPPER\x10\x00\x12\t\n\x05\x46IBER\x10\x01\x12\n\n\x06PACKET\x10\x02\x12\n\n\x06ODUCLT\x10\x03\x12\x07\n\x03OCH\x10\x04\x12\x07\n\x03OMS\x10\x05\x12\x10\n\x0cVIRTUAL_PORT\x10\x06\x42(\n&org.onosproject.grpc.net.device.modelsb\x06proto3')
)
_PORTTYPEPROTO = _descriptor.EnumDescriptor(
name='PortTypeProto',
full_name='net.device.PortTypeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='COPPER', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FIBER', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PACKET', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ODUCLT', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OCH', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OMS', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VIRTUAL_PORT', index=6, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=36,
serialized_end=134,
)
_sym_db.RegisterEnumDescriptor(_PORTTYPEPROTO)
PortTypeProto = enum_type_wrapper.EnumTypeWrapper(_PORTTYPEPROTO)
COPPER = 0
FIBER = 1
PACKET = 2
ODUCLT = 3
OCH = 4
OMS = 5
VIRTUAL_PORT = 6
DESCRIPTOR.enum_types_by_name['PortTypeProto'] = _PORTTYPEPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n&org.onosproject.grpc.net.device.models'))
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
4347,
4834,
5700,
2964,
1462,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9641,
62,
10951,
58,
15,
60,
27,
18,
... | 2.281665 | 1,129 |
from .SocketCore import Socket
from .Packet import Packet
from .Packet import ACK, SYN, FIN, NUL, BEG, END
class RDTPSocket(Socket):
"""This class implements the Stop and Wait kind of protocol over the
sockets by overriding the send_stream and inbound_stream methods.
"""
# implement packet division, transfer, in-order and reliability
#################### Sender Methods ####################
################### Receiver Methods ################### | [
6738,
764,
39105,
14055,
1330,
47068,
198,
6738,
764,
47,
8317,
1330,
6400,
316,
198,
6738,
764,
47,
8317,
1330,
7125,
42,
11,
19704,
45,
11,
33642,
11,
399,
6239,
11,
347,
7156,
11,
23578,
198,
198,
4871,
371,
24544,
3705,
5459,
7,... | 3.62406 | 133 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import os
from libmozdata import utils as lmdutils
from . import mail, utils
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Manage logs")
parser.add_argument(
"-c", "--clean", dest="clean", action="store_true", help="Remove the log files"
)
parser.add_argument(
"-s",
"--send",
dest="send",
action="store_true",
help="Send the log if not empty",
)
args = parser.parse_args()
if args.clean:
clean()
if args.send:
send()
| [
2,
770,
8090,
6127,
5178,
318,
2426,
284,
262,
2846,
286,
262,
29258,
5094,
198,
2,
13789,
11,
410,
13,
362,
13,
15,
13,
1002,
257,
4866,
286,
262,
4904,
43,
373,
407,
9387,
351,
428,
2393,
11,
198,
2,
921,
460,
7330,
530,
379,
... | 2.529801 | 302 |
# Copyright 2014 Hewlett-Packard
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
| [
2,
15069,
1946,
30446,
15503,
12,
11869,
446,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
... | 3.680233 | 172 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 01 22:30:59 2018
@author: donny
"""
aStr = "Hello, World!"
bStr = aStr[:7] + "Python!"
count = 0
for ch in bStr[:]:
if ch in ',.!?':
count += 1
print('There are {0:d} punctuation marks.'.format(count)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2365,
5534,
2534,
25,
1270,
25,
3270,
2864,
198,
198,
31,
9800,
25,
836,
3281,
198,
37811,
198,
198,
64,
13290,
796,
366,
15496,
11,
2159... | 2.278261 | 115 |
import argparse
import os
import glob
import yaml
import itertools
import datetime
from collections import OrderedDict
import shutil
from joblib import Parallel, delayed
import subprocess
import numpy as np # used by config
from tqdm import tqdm
def set_config_value_unknown(config_dict, find_key, new_value, unknown):
"""for a given dictionary, it will recursively search for the key.
if a key is found it will assign the new value
if the key is not found, it will assign the key-value pair to the
unknown key in the dictionary
"""
does_item_exist_already = _finditem(config_dict, find_key)
if does_item_exist_already is None:
# if the parameter does not exist, add it to the unknown key
config_dict[unknown][find_key] = new_value
return(config_dict)
else:
# if it does exist, update the key
new_config = _set_config_value(config_dict, find_key, new_value)
return(new_config)
HERE = os.path.abspath(os.path.dirname(__file__))
now = datetime.datetime.now()
current_time = now.strftime("%Y-%m-%d_%H:%M:%S")
parser = argparse.ArgumentParser(description="Run single MANN2 simulation.")
parser.add_argument('base_sim_folder')
args = parser.parse_args()
base_sim_folder = args.base_sim_folder
base_sim_output = os.path.join('..', '..', '..', 'mann2_output')
config_file = glob.glob('{}/config*.yaml'.format(base_sim_folder))[0]
config_file_basename = os.path.basename(config_file)
sim_script = glob.glob('{}/run_model_*.py'.format(base_sim_folder))[0]
sim_script_basename = os.path.basename(sim_script)
with open(config_file, 'r') as config_yaml:
config = yaml.load(config_yaml)
batch_config = config['batch_sim']
batch_config_eval = {x: eval(batch_config[x]) for x in batch_config}
batch_config_eval = OrderedDict(batch_config_eval)
# runs_str_fmt = 'r{{:0>{}d}}'.format(len(str(runs - 1)))
batch_sweeps = (itertools.product(*batch_config_eval.values()))
batch_sweep_keys = batch_config_eval.keys()
run_counter = 0
dest_base_path = os.path.join('{}'.format(base_sim_output),
'{}_batch_{}'.format(current_time,
base_sim_folder))
# setup folders in output directory
for bs in tqdm(batch_sweeps):
new_folder_name = ''
for bs_i, bs_key in enumerate(batch_sweep_keys):
update_value = bs[bs_i]
update_key = bs_key[:-1]
set_config_value_unknown(
config, update_key, update_value, 'sim_generated_configs')
new_folder_name = new_folder_name + \
str(update_key[0:1]) + str(update_value) + '_'
config['sim_generated_configs']['run_number'] = run_counter
new_folder_name = new_folder_name[0:-1]
src = base_sim_folder
dest = os.path.join(dest_base_path, new_folder_name)
shutil.copytree(src, dest)
new_config_path = os.path.join('{}'.format(dest), config_file_basename)
with open(new_config_path, 'w') as f:
f.write(yaml.dump(config, default_flow_style=False))
run_counter += 1
# find the simulation folders of interest and run the simulation
sim_folders = glob.glob('{}/*'.format(dest_base_path))
# run the simulations
# for sim in tqdm(sim_folders):
# run_simulation(sim)
Parallel(n_jobs=-2)(delayed(run_simulation)(sim) for sim in sim_folders)
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
15095,
198,
11748,
331,
43695,
198,
11748,
340,
861,
10141,
198,
11748,
4818,
8079,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
4423,
346,
198,
6738,
1693,
8019,
1330,
42945... | 2.472552 | 1,348 |
# -*- coding: utf-8 -*-
"""
[EN] --- MOIRE CALIPER
[FR] --- PIED A COULISSE A MOIRE
-----------------------------------------------------------------------------
Author: Mojoptix
Website: www.mojoptix.com
Email: julldozer@mojoptix.com
Date: 02 march 2018
License: MIT License
Copyright (c) 2018 Mojoptix
-----------------------------------------------------------------------------
[EN] This script can be used to generate Moire images to build a Moire Caliper.
An episode of Mojoptix describes the Moire Caliper in details:
http://www.mojoptix.com/?p=178
[FR] Ce script permet de generer des images Moires pour construire un Pied a
Coulisse a Moire.
Un episode de MakerLambda (la version francophone de Mojoptix) decrit le
Pied a Coulisse a Moire en details:
http://www.mojoptix.com/?p=182
"""
import numpy as np
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import matplotlib.pyplot as plt
# parameters
img_dpi = 2400
my_font_filename = "Carlito-Bold.ttf" # this is an open source font from Google that is equivalent to Calibri: https://www.ecsoft2.org/carlito-fonts
#my_font_filename = "calibrib.ttf" # uncomment this line if you'd prefer to use the Windows font
# create triangular moire image
def create_triangular_moire_img(img_size_mm=[120.0,20.0], pitch_mm=2.0, line_thickness_mm1=1.9, line_thickness_mm2=1.0, offset_mm=0.0):
'''Create a moire pattern made of black vertical lines with a line thickness that is changing from bottom to top.
Input:
img_size_mm: [mm] size of the resulting image,
pitch_mm: [mm] distance between the center of the lines,
line_thickness_mm1: [mm] line thickness at the bottom of the image,
line_thickness_mm2: [mm] line thickness at the top of the image,
offset_mm: [mm] translate the moire pattern by this amount.
return:
numpy array for the moire image.
'''
img_size_pixel = [int(img_size_mm[1]*img_dpi/25.4), int(img_size_mm[0]*img_dpi/25.4)] # swap x and y so that Ox is "horizontal" and Oy is "vertical"
img = 255*np.ones(img_size_pixel) # set the image to all transparent/white
line_thickness_mm = np.linspace(line_thickness_mm1, line_thickness_mm2, img.shape[0])
for xx in np.arange(img.shape[0]):
line_th = np.max([0.0, line_thickness_mm[xx]])
line_th = np.min([pitch_mm, line_thickness_mm[xx]])
Tyy = np.arange(img.shape[1])*25.4/img_dpi # pixel y-coordinates in [mm] across the image
tmp = np.mod( Tyy-offset_mm , pitch_mm) # pixel y-coordinates in [mm] across each line pair (eg: {dark line + transparent line})
tmp_min = 0.5*pitch_mm-0.5*line_th # pixel y-coordinates in [mm] of the left side of the dark line (inside a line pair)
tmp_max = 0.5*pitch_mm+0.5*line_th # pixel y-coordinates in [mm] of the right side of the dark line (inside a line pair)
tmp2 = np.logical_and( tmp>tmp_min, tmp<tmp_max ) # find all the pixels in between
img[xx,np.where(tmp2)] = 0 # and turn them to black
return img
# create moire scale
def create_moire_scale_img(img_size_mm=[60.0,5.0], scale_start_mm=5.0, scale_end_mm=55.0, font_size=0.15*img_dpi, tick_thickness_mm=0.1, text_height_mm=0.0):
'''Create the scale for a moire (100 tick marks).
Input:
img_size_mm: [mm] size of the image,
scale_start_mm: [mm] distance from the left side of the image to the 1st tick,
scale_end_mm: [mm] distance from the right side of the image to the last tick,
font_size: [pixels] size of the font,
tick_thickness_mm: [mm] width of the tick marks,
Ouput:
numpy array for the image.
'''
img_size_pixel = [int(img_size_mm[1]*img_dpi/25.4), int(img_size_mm[0]*img_dpi/25.4)] # swap x and y so that Ox is "horizontal" and Oy is "vertical"
img = 255*np.ones(img_size_pixel)
scale_start_pixel = int(scale_start_mm*img_dpi/25.4)
scale_end_pixel = int(scale_end_mm*img_dpi/25.4)
tick_center_pixel = np.linspace(scale_start_pixel, scale_end_pixel, 101).astype(int)
# ADD THE TEXT
text_height_pixel = int(text_height_mm*img_dpi/25.4)
font_filename = my_font_filename
font_size=int(font_size)
## Create a temporary PIL image to work on the text
im = Image.fromarray(img.astype(np.uint8), mode="L") # array filled with 255
font = ImageFont.truetype(font_filename, font_size)
draw = ImageDraw.Draw(im)
## write the numbers
draw.text((tick_center_pixel[0]-font.getsize("0")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"0",0,font=font)
draw.text((tick_center_pixel[10]-font.getsize("1")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"1",0,font=font)
draw.text((tick_center_pixel[20]-font.getsize("2")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"2",0,font=font)
draw.text((tick_center_pixel[30]-font.getsize("3")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"3",0,font=font)
draw.text((tick_center_pixel[40]-font.getsize("4")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"4",0,font=font)
draw.text((tick_center_pixel[50]-font.getsize("5")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"5",0,font=font)
draw.text((tick_center_pixel[60]-font.getsize("6")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"6",0,font=font)
draw.text((tick_center_pixel[70]-font.getsize("7")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"7",0,font=font)
draw.text((tick_center_pixel[80]-font.getsize("8")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"8",0,font=font)
draw.text((tick_center_pixel[90]-font.getsize("9")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"9",0,font=font)
draw.text((tick_center_pixel[100]-font.getsize("10")[0]/2, int(0.25*img.shape[0]-text_height_pixel)),"10",0,font=font)
## apply the text to the numpy array
img[np.where(np.asarray(im)==0)] = 0
# ADD THE TICK MARKS
tick_height_pixel = 0.15*img.shape[0]*np.ones(101) # the mini ticks
tick_height_pixel[5*np.arange(21)] = 0.2*img.shape[0] # every 5 ticks
tick_height_pixel[10*np.arange(11)] = 0.25*img.shape[0] # every 10 ticks
tick_thickness_pixel = tick_thickness_mm*img_dpi/25.4 *np.ones_like(tick_height_pixel) # the mini ticks
tick_thickness_pixel[5*np.arange(21)] = 1.5* tick_thickness_mm*img_dpi/25.4 # every 5 ticks
for ii in np.arange(len(tick_center_pixel)):
img[0:int(tick_height_pixel[ii]),
int(tick_center_pixel[ii]-0.5*tick_thickness_pixel[ii]):int(tick_center_pixel[ii]+0.5*tick_thickness_pixel[ii]+1)] = 0
return img
# create ruler index image
def create_ruler_index_img(img_size_mm=[60.0,2.0], index_xx_mm=5.0, index_depth_mm=2.0, triangle_tip_angle_degrees=45):
'''Create a triangular index for the scale ruler.
Input:
img_size_mm: [mm] size of the image,
index_xx_mm: [mm] position of the tip of the index.
index_depth_mm: [mm] yy dimension of the triangular shape.
triangle_tip_angle_degrees: [degrees] angle of the tip of the triangular shape.
Output:
the numpy array of the resulting image.
'''
img_size_pixel = [int(img_size_mm[1]*img_dpi/25.4), int(img_size_mm[0]*img_dpi/25.4)] # swap x and y so that Ox is "horizontal" and Oy is "vertical"
img = 255*np.ones(img_size_pixel)
index_xx_pixel = int(index_xx_mm*img_dpi/25.4)
yy_flat_surface_triangle_pixel = int((img_size_mm[1]-index_depth_mm)*img_dpi/25.4)
Tyy = np.arange(img.shape[0]-1, yy_flat_surface_triangle_pixel, -1)
for ii in np.arange(len(Tyy)):
triangle_half_width = int( ii*np.tan(np.radians(triangle_tip_angle_degrees/2.0)) )
img[ Tyy[ii], index_xx_pixel-triangle_half_width:index_xx_pixel+triangle_half_width+1 ] = 0
return img
# create ruler image
def create_ruler_img(img_size_mm=[200.0,10.0], scale_start_mm=10.0, scale_end_mm=190.00, font_size=0.25*img_dpi, tick_thickness_mm=0.3, text_offset_mm=0.0, number_spacing_mm=10.0, tick_marks_tip_angle_degrees=45):
'''Create a ruler scale (10 ticks/number).
Input:
img_size_mm: [mm] size of the image,
scale_start_mm: [mm] distance from the left side of the image to the 1st tick,
scale_end_mm: [mm] distance from the right side of the image to the last tick,
font_size: [pixels] size of the font,
tick_thickness_mm: [mm] width of the tick marks,
text_offset_mm: [mm] add a vertical offset to the text,
number_spacing_mm: [mm] spacing between the numbers (eg: use 10.0 for cm, and 25.4 for inches)
tick_marks_tip_angle_degrees: [degrees] angle of the tip of the tick marks.
Ouput:
numpy array for the image.
'''
img_size_pixel = [int(img_size_mm[1]*img_dpi/25.4), int(img_size_mm[0]*img_dpi/25.4)] # swap x and y so that Ox is "horizontal" and Oy is "vertical"
img = 255*np.ones(img_size_pixel)
nb_numbers = 1+int(np.floor((scale_end_mm-scale_start_mm)/number_spacing_mm))
tick_spacing_mm = number_spacing_mm/10.0
nb_ticks = 1+np.floor((scale_end_mm-scale_start_mm)/tick_spacing_mm)
scale_start_pixel = int(scale_start_mm*img_dpi/25.4)
scale_end_pixel = int(scale_end_mm*img_dpi/25.4)
tick_center_pixel = np.linspace(scale_start_pixel, scale_end_pixel, nb_ticks).astype(int)
# ADD THE TEXT
text_offset_pixel = int(text_offset_mm*img_dpi/25.4)
font_filename = my_font_filename
font_size=int(font_size)
## Create a temporary PIL image to work on the text
im = Image.fromarray(img.astype(np.uint8), mode="L") # array filled with 255
font = ImageFont.truetype(font_filename, font_size)
draw = ImageDraw.Draw(im)
## write the numbers
for ii in np.arange(nb_numbers):
draw.text((tick_center_pixel[10*ii]-font.getsize("%d"%ii)[0]/2, int(0.4*img.shape[0]-text_offset_pixel)),"%d"%ii,0,font=font)
## apply the text to the numpy array
img[np.where(np.asarray(im)==0)] = 0
# ADD THE TICK MARKS (with pointy heads)
tick_height_pixel = 0.2*img.shape[0]*np.ones_like(tick_center_pixel) # the mini ticks
tick_height_pixel[(5*np.arange(nb_ticks/5.0)).astype(int)] = 0.35*img.shape[0] # every 5 ticks
tick_height_pixel[(10*np.arange(nb_ticks/10.0)).astype(int)] = 0.45*img.shape[0] # every 10 ticks
tick_thickness_pixel = tick_thickness_mm*img_dpi/25.4 *np.ones_like(tick_height_pixel) # the mini ticks
tick_thickness_pixel[(5*np.arange(nb_ticks/5.0)).astype(int)] = 1.5* tick_thickness_mm*img_dpi/25.4 # every 5 ticks
for ii in np.arange(len(tick_center_pixel)):
for jj in np.arange(int(tick_height_pixel[ii])):
tick_width = min([ int( jj*np.tan(np.radians(tick_marks_tip_angle_degrees/2.0)) ), tick_thickness_pixel[ii]])
img[jj, int(tick_center_pixel[ii]-0.5*tick_width):int(tick_center_pixel[ii]+0.5*tick_width+1)] = 0
return img
# save image
def save_img(img, filename='test.png'):
'''Save an image to disk.
Input:
img: the numpy array for the image,
filename: the filename.
'''
im = Image.fromarray(img.astype(np.uint8), mode="L")
im.save(filename, dpi=(img_dpi, img_dpi))
# stitch images
def stitch_img(img1, img2):
'''Stitch 2 images, the 1st image on top of the 2nd.
Input:
img1: the numpy array for the image on top,
img2: the numpy array for the image below (must be of the same width).
Output:
the numpy array for the resulting image.
'''
img = np.zeros((img1.shape[0]+img2.shape[0], img1.shape[1]))
img[:img1.shape[0],:] = np.copy(img1)
img[img1.shape[0]:,:] = np.copy(img2)
return img
# create an image with some text
def create_text_img(img_size_mm=[60.0,10.0], ttext="Hello World!", font_size=0.1*img_dpi, text_xx_center_mm=30.0, text_yy_center_mm=5.0, text_graylevel=0):
'''Create an image some text on.
Input:
img_size_mm: [mm] size of the image,
ttext: a string of text,
font_size: [pixels] size of the font,
text_xx_center_mm: [mm] the center of the text in the xx direction,
text_yy_center_mm: [mm] the center of the text in the yy direction,
text_graylevel: [0-255] graylevel of the text.
Ouput:
numpy array for the image.
'''
img_size_pixel = [int(img_size_mm[1]*img_dpi/25.4), int(img_size_mm[0]*img_dpi/25.4)] # swap x and y so that Ox is "horizontal" and Oy is "vertical"
img = 255*np.ones(img_size_pixel)
# ADD THE TEXT
text_xx_center_pixel = int(text_xx_center_mm*img_dpi/25.4)
text_yy_center_pixel = int(text_yy_center_mm*img_dpi/25.4)
font_filename = my_font_filename
font_size=int(font_size)
## Create a temporary PIL image to work on the text
im = Image.fromarray(img.astype(np.uint8), mode="L") # array filled with 255
font = ImageFont.truetype(font_filename, font_size)
draw = ImageDraw.Draw(im)
## write the text
draw.text((text_xx_center_pixel-font.getsize(ttext)[0]/2, text_yy_center_pixel-font.getsize(ttext)[1]/2),ttext,0,font=font)
## apply the text to the numpy array
img[np.where(np.asarray(im)==0)] = text_graylevel
return img
# create alignement line
def create_alignement_line_img(img_size_mm=[200.0,1.5], line_thickness_mm=0.5, dash_length_mm=2.5, offset_mm=0.0, graylevel=0):
'''Create a image with a dashed line that can be used as an alignement target.
Input:
img_size_mm: [mm] size of the image,
line_thickness_mm: [mm] thickness of the line,
dash_length_mm: [mm] length of a dash,
offset_mm: [mm] offset of the dash,
graylevel:[0-255] gray level of the line.
Ouput:
numpy array for the image.
'''
img_size_pixel = [int(img_size_mm[1]*img_dpi/25.4), int(img_size_mm[0]*img_dpi/25.4)] # swap x and y so that Ox is "horizontal" and Oy is "vertical"
img = 255*np.ones(img_size_pixel)
line_thickness_pixel = line_thickness_mm*img_dpi/25.4
dash_length_pixel = dash_length_mm*img_dpi/25.4
offset_pixel = offset_mm*img_dpi/25.4
line_yy = np.arange( int(img_size_pixel[0]*0.5-line_thickness_pixel*0.5), int(img_size_pixel[0]*0.5+line_thickness_pixel*0.5))
for xx in np.arange(img_size_pixel[1]):
if np.mod(xx-offset_pixel, dash_length_pixel) < (0.5*dash_length_pixel):
img[line_yy,xx] = graylevel
return img
# add images
def add_images(img01, img02):
'''Add two images of the same size. The images are assumed to be using only two colors: Black(0) and Transparent(255).
Input:
img01: the numpy array for the 1st image,
img02: the numpy array for the 2nd image,
Output:
the numpy array for the resulting image.
'''
img = 255*np.ones_like(img01)
img[np.where(np.logical_or((img01==0),(img02==0)))] = 0
return img
# display image
def display_img(img, figureNb=1001):
'''Display an image.
Input:
img: the numpy array for the image,
figureNb: an ID number the displayed image.
'''
plt.figure(figureNb)
plt.clf()
plt.imshow(img, cmap='gray')
# add a frame to an image
def add_frame(img_original, frame_thickness_mm = [10,10,10,10], line_thickness_mm=0.5, line_graylevel=0):
'''Adds a white frame to an image, with a line on the outside of the frame.
Note: the line is drawn inside the frame thickness (eg: line_thickness does not change the size of the final image).
Input:
img_original: the original image,
frame_thickness_mm: [mm] the thickness of the frame on the [left, right, top, bottom] sides,
line_thickness_mm: [mm] thickness of the line,
line_gray_level: [0-255] gray level of the line.
'''
img_width_pixel = img_original.shape[1] + int((frame_thickness_mm[0]+frame_thickness_mm[1])*img_dpi/25.4)
img_height_pixel= img_original.shape[0] + int((frame_thickness_mm[2]+frame_thickness_mm[3])*img_dpi/25.4)
img_size_pixel = [img_height_pixel, img_width_pixel] # swap x and y so that Ox is "horizontal" and Oy is "vertical"
img = 255*np.ones(img_size_pixel)
# draw the lines
line_thickness_pixel = int(line_thickness_mm*img_dpi/25.4)
img[0:line_thickness_pixel, :] = line_graylevel
img[-line_thickness_pixel:, :] = line_graylevel
img[:, 0:line_thickness_pixel] = line_graylevel
img[:, -line_thickness_pixel:] = line_graylevel
# place the original image in the frame
img_00_pixel = [ int(frame_thickness_mm[0]*img_dpi/25.4), int(frame_thickness_mm[2]*img_dpi/25.4) ] # (remember that we swapped Ox and Oy !!!)
img[ img_00_pixel[1]:(img_00_pixel[1]+img_original.shape[0]), img_00_pixel[0]:(img_00_pixel[0]+img_original.shape[1]) ] = img_original
return img
# Build metric caliper
def build_metric_caliper(filename_base="metric_", measuring_length_mm=100.0):
'''Build the bottom and top images for a metric caliper.
Note: the width of the clear aperture for the top frame should be 24.5mm (-ish).
input:
filename_base: 1st part of the filename for the images
measuring_length_mm: [mm] measuring length at full resolution (note: an additional 50mm will be available at coarse resolution)
return:
img_bottom: the numpy array for the saved image for the bottom moire,
img_top: the numpy array for the saved image for the top moire.
output:
saves the two PNG images to disk (bottom and top moires)
'''
## BUILD THE BOTTOM IMAGE
img_b_niet_1_5mm = 255*np.ones( [int(1.5*img_dpi/25.4),int(70*img_dpi/25.4)] ) # a transparent band
img_b_dashed_line = create_alignement_line_img([70,1.5], line_thickness_mm=0.25, dash_length_mm=2.5, offset_mm=1.25, graylevel=0)
img_b_moire_pattern = create_triangular_moire_img(img_size_mm=[70.0,5.0], pitch_mm=1.0*50.0/51.0, line_thickness_mm1=0.6*50.0/51.0, line_thickness_mm2=0.93*50.0/51.0, offset_mm=10.0)
img_b_moire_scale = create_moire_scale_img(img_size_mm=[70.0,5.0], scale_start_mm=10.0, scale_end_mm=60.0, font_size=0.15*img_dpi, tick_thickness_mm=0.1, text_height_mm=0.0)
img_b_ruler_index = create_ruler_index_img(img_size_mm=[70.0,2.0], index_xx_mm=10.0, index_depth_mm=2.0, triangle_tip_angle_degrees=45)
img_b_text = create_text_img(img_size_mm=[70.0,2.0], ttext=u"Moiré Caliper", font_size=0.075*img_dpi, text_xx_center_mm=30.0, text_yy_center_mm=1.0, text_graylevel=0)
img_b_text2 = create_text_img(img_size_mm=[70.0,2.0], ttext=u"by Mojoptix", font_size=0.075*img_dpi, text_xx_center_mm=55.0, text_yy_center_mm=1.0, text_graylevel=0)
img_b_ruler_text = add_images(img_b_ruler_index, img_b_text)
img_b_ruler_text = add_images(img_b_ruler_text, img_b_text2)
img_b_niet_11_5mm = 255*np.ones( [int(11.5*img_dpi/25.4),int(70*img_dpi/25.4)] ) # a transparent band
img_bottom = stitch_img(img_b_niet_1_5mm, img_b_dashed_line)
img_bottom = stitch_img(img_bottom, img_b_moire_pattern)
img_bottom = stitch_img(img_bottom, img_b_moire_scale)
img_bottom = stitch_img(img_bottom, img_b_ruler_text)
img_bottom = stitch_img(img_bottom, img_b_niet_11_5mm)
# Add the cut-here lines
img_bottom_final = add_frame(img_bottom, frame_thickness_mm = [15.0,15.0,6.5,6.5], line_thickness_mm=0.25, line_graylevel=50)
display_img(img_bottom_final, 101)
save_img(img_bottom_final, "%sbottom.png"%filename_base)
## BUILD THE TOP IMAGE
full_length_mm = measuring_length_mm+50.0 # add 50.0mm for the bottom image moire
full_length_mm = full_length_mm +10.0 # add 5.0mm white space on each side, to have some more room for the moire
img_t_outside_line01 = create_alignement_line_img([full_length_mm,1.15], line_thickness_mm=1.15,dash_length_mm=1.0, graylevel=0)
img_t_outside_line02 = create_alignement_line_img([full_length_mm,0.35], line_thickness_mm=0.35,dash_length_mm=10000.0, graylevel=0)
img_t_dashed_line = create_alignement_line_img([full_length_mm,1.5], line_thickness_mm=0.25,dash_length_mm=2.5,graylevel=100)
img_t_moire_pattern = create_triangular_moire_img(img_size_mm=[full_length_mm,5.0], pitch_mm=1.0, line_thickness_mm1=0.6, line_thickness_mm2=0.93, offset_mm=0.0)
img_t_niet_7mm = 255*np.ones( [int(7.0*img_dpi/25.4),int(full_length_mm*img_dpi/25.4)] ) # a transparent band
img_t_ruler = create_ruler_img(img_size_mm=[full_length_mm,10.0], scale_start_mm=5.0, scale_end_mm=full_length_mm-5.0, font_size=0.25*img_dpi, tick_thickness_mm=0.3, text_offset_mm=0.0, number_spacing_mm=10.0)
img_top = stitch_img(img_t_outside_line01, img_t_outside_line02) # alignement target for top image Vs top frame
img_top = stitch_img(img_top, img_t_dashed_line) # alignement target for bottom image Vs top image
img_top = stitch_img(img_top, img_t_moire_pattern)
img_top = stitch_img(img_top, img_t_niet_7mm)
img_top = stitch_img(img_top, img_t_ruler)
img_top = stitch_img(img_top, img_t_outside_line02) # alignement target for top image Vs top frame
img_top = stitch_img(img_top, img_t_outside_line01) # alignement target for top image Vs top frame
# Flip the top image: it will be printed on a transparency that will be used facing down
img_top_fliplr = np.fliplr(img_top)
# Add the cut-here lines
img_top_final = add_frame(img_top_fliplr, frame_thickness_mm = [7.0,7.0,6,6], line_thickness_mm=0.25, line_graylevel=50)
display_img(img_top_final, 102)
save_img(img_top_final, "%stop.png"%filename_base)
# Return
return (img_bottom_final, img_top_final)
# Build imperial caliper
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Build the images for a Metric Caliper and display the moires obtained at 0.0 mm and at 0.5mm
# -----------------------------------------------------------------------------
(img_bottom, img_top) = build_metric_caliper();
## Display the Caliper at 0.000mm
img_test_top = np.fliplr(img_top)
yy_offset_pixel = int(0.5*img_dpi/25.4)
xx_offset_pixel = int(5.0*img_dpi/25.4)
img_test = add_images(img_bottom[yy_offset_pixel:(yy_offset_pixel+img_top.shape[0]),xx_offset_pixel:], img_test_top[:,0:(img_bottom.shape[1]-xx_offset_pixel)])
display_img(img_test, 103)
## Display the Caliper at 0.500mm
xx_offset_pixel = int(10.5*img_dpi/25.4)
img_test = add_images(img_bottom[yy_offset_pixel:(yy_offset_pixel+img_top.shape[0]),xx_offset_pixel:], img_test_top[:,0:(img_bottom.shape[1]-xx_offset_pixel)])
display_img(img_test, 104)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
58,
1677,
60,
220,
11420,
220,
220,
13070,
41736,
33290,
4061,
1137,
201,
198,
58,
10913,
60,
220,
11420,
220,
220,
350,
19767,
317,
327,
2606,
4... | 2.280182 | 10,097 |
data = (
'Song ', # 0x00
'Wei ', # 0x01
'Hong ', # 0x02
'Wa ', # 0x03
'Lou ', # 0x04
'Ya ', # 0x05
'Rao ', # 0x06
'Jiao ', # 0x07
'Luan ', # 0x08
'Ping ', # 0x09
'Xian ', # 0x0a
'Shao ', # 0x0b
'Li ', # 0x0c
'Cheng ', # 0x0d
'Xiao ', # 0x0e
'Mang ', # 0x0f
'Fu ', # 0x10
'Suo ', # 0x11
'Wu ', # 0x12
'Wei ', # 0x13
'Ke ', # 0x14
'Lai ', # 0x15
'Chuo ', # 0x16
'Ding ', # 0x17
'Niang ', # 0x18
'Xing ', # 0x19
'Nan ', # 0x1a
'Yu ', # 0x1b
'Nuo ', # 0x1c
'Pei ', # 0x1d
'Nei ', # 0x1e
'Juan ', # 0x1f
'Shen ', # 0x20
'Zhi ', # 0x21
'Han ', # 0x22
'Di ', # 0x23
'Zhuang ', # 0x24
'E ', # 0x25
'Pin ', # 0x26
'Tui ', # 0x27
'Han ', # 0x28
'Mian ', # 0x29
'Wu ', # 0x2a
'Yan ', # 0x2b
'Wu ', # 0x2c
'Xi ', # 0x2d
'Yan ', # 0x2e
'Yu ', # 0x2f
'Si ', # 0x30
'Yu ', # 0x31
'Wa ', # 0x32
'[?] ', # 0x33
'Xian ', # 0x34
'Ju ', # 0x35
'Qu ', # 0x36
'Shui ', # 0x37
'Qi ', # 0x38
'Xian ', # 0x39
'Zhui ', # 0x3a
'Dong ', # 0x3b
'Chang ', # 0x3c
'Lu ', # 0x3d
'Ai ', # 0x3e
'E ', # 0x3f
'E ', # 0x40
'Lou ', # 0x41
'Mian ', # 0x42
'Cong ', # 0x43
'Pou ', # 0x44
'Ju ', # 0x45
'Po ', # 0x46
'Cai ', # 0x47
'Ding ', # 0x48
'Wan ', # 0x49
'Biao ', # 0x4a
'Xiao ', # 0x4b
'Shu ', # 0x4c
'Qi ', # 0x4d
'Hui ', # 0x4e
'Fu ', # 0x4f
'E ', # 0x50
'Wo ', # 0x51
'Tan ', # 0x52
'Fei ', # 0x53
'Wei ', # 0x54
'Jie ', # 0x55
'Tian ', # 0x56
'Ni ', # 0x57
'Quan ', # 0x58
'Jing ', # 0x59
'Hun ', # 0x5a
'Jing ', # 0x5b
'Qian ', # 0x5c
'Dian ', # 0x5d
'Xing ', # 0x5e
'Hu ', # 0x5f
'Wa ', # 0x60
'Lai ', # 0x61
'Bi ', # 0x62
'Yin ', # 0x63
'Chou ', # 0x64
'Chuo ', # 0x65
'Fu ', # 0x66
'Jing ', # 0x67
'Lun ', # 0x68
'Yan ', # 0x69
'Lan ', # 0x6a
'Kun ', # 0x6b
'Yin ', # 0x6c
'Ya ', # 0x6d
'Ju ', # 0x6e
'Li ', # 0x6f
'Dian ', # 0x70
'Xian ', # 0x71
'Hwa ', # 0x72
'Hua ', # 0x73
'Ying ', # 0x74
'Chan ', # 0x75
'Shen ', # 0x76
'Ting ', # 0x77
'Dang ', # 0x78
'Yao ', # 0x79
'Wu ', # 0x7a
'Nan ', # 0x7b
'Ruo ', # 0x7c
'Jia ', # 0x7d
'Tou ', # 0x7e
'Xu ', # 0x7f
'Yu ', # 0x80
'Wei ', # 0x81
'Ti ', # 0x82
'Rou ', # 0x83
'Mei ', # 0x84
'Dan ', # 0x85
'Ruan ', # 0x86
'Qin ', # 0x87
'Hui ', # 0x88
'Wu ', # 0x89
'Qian ', # 0x8a
'Chun ', # 0x8b
'Mao ', # 0x8c
'Fu ', # 0x8d
'Jie ', # 0x8e
'Duan ', # 0x8f
'Xi ', # 0x90
'Zhong ', # 0x91
'Mei ', # 0x92
'Huang ', # 0x93
'Mian ', # 0x94
'An ', # 0x95
'Ying ', # 0x96
'Xuan ', # 0x97
'Jie ', # 0x98
'Wei ', # 0x99
'Mei ', # 0x9a
'Yuan ', # 0x9b
'Zhen ', # 0x9c
'Qiu ', # 0x9d
'Ti ', # 0x9e
'Xie ', # 0x9f
'Tuo ', # 0xa0
'Lian ', # 0xa1
'Mao ', # 0xa2
'Ran ', # 0xa3
'Si ', # 0xa4
'Pian ', # 0xa5
'Wei ', # 0xa6
'Wa ', # 0xa7
'Jiu ', # 0xa8
'Hu ', # 0xa9
'Ao ', # 0xaa
'[?] ', # 0xab
'Bou ', # 0xac
'Xu ', # 0xad
'Tou ', # 0xae
'Gui ', # 0xaf
'Zou ', # 0xb0
'Yao ', # 0xb1
'Pi ', # 0xb2
'Xi ', # 0xb3
'Yuan ', # 0xb4
'Ying ', # 0xb5
'Rong ', # 0xb6
'Ru ', # 0xb7
'Chi ', # 0xb8
'Liu ', # 0xb9
'Mei ', # 0xba
'Pan ', # 0xbb
'Ao ', # 0xbc
'Ma ', # 0xbd
'Gou ', # 0xbe
'Kui ', # 0xbf
'Qin ', # 0xc0
'Jia ', # 0xc1
'Sao ', # 0xc2
'Zhen ', # 0xc3
'Yuan ', # 0xc4
'Cha ', # 0xc5
'Yong ', # 0xc6
'Ming ', # 0xc7
'Ying ', # 0xc8
'Ji ', # 0xc9
'Su ', # 0xca
'Niao ', # 0xcb
'Xian ', # 0xcc
'Tao ', # 0xcd
'Pang ', # 0xce
'Lang ', # 0xcf
'Nao ', # 0xd0
'Bao ', # 0xd1
'Ai ', # 0xd2
'Pi ', # 0xd3
'Pin ', # 0xd4
'Yi ', # 0xd5
'Piao ', # 0xd6
'Yu ', # 0xd7
'Lei ', # 0xd8
'Xuan ', # 0xd9
'Man ', # 0xda
'Yi ', # 0xdb
'Zhang ', # 0xdc
'Kang ', # 0xdd
'Yong ', # 0xde
'Ni ', # 0xdf
'Li ', # 0xe0
'Di ', # 0xe1
'Gui ', # 0xe2
'Yan ', # 0xe3
'Jin ', # 0xe4
'Zhuan ', # 0xe5
'Chang ', # 0xe6
'Ce ', # 0xe7
'Han ', # 0xe8
'Nen ', # 0xe9
'Lao ', # 0xea
'Mo ', # 0xeb
'Zhe ', # 0xec
'Hu ', # 0xed
'Hu ', # 0xee
'Ao ', # 0xef
'Nen ', # 0xf0
'Qiang ', # 0xf1
'Ma ', # 0xf2
'Pie ', # 0xf3
'Gu ', # 0xf4
'Wu ', # 0xf5
'Jiao ', # 0xf6
'Tuo ', # 0xf7
'Zhan ', # 0xf8
'Mao ', # 0xf9
'Xian ', # 0xfa
'Xian ', # 0xfb
'Mo ', # 0xfc
'Liao ', # 0xfd
'Lian ', # 0xfe
'Hua ', # 0xff
)
| [
7890,
796,
357,
198,
6,
44241,
46083,
220,
220,
220,
1303,
657,
87,
405,
198,
6,
1135,
72,
46083,
220,
220,
220,
1303,
657,
87,
486,
198,
6,
48559,
46083,
220,
220,
220,
1303,
657,
87,
2999,
198,
6,
33484,
46083,
220,
220,
220,
... | 1.513055 | 3,064 |
import discord
from discord import Embed
from discord.ext import commands
import asyncpraw
import random
| [
11748,
36446,
198,
6738,
36446,
1330,
13302,
276,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
11748,
355,
2047,
13155,
1831,
198,
11748,
4738,
628
] | 4.24 | 25 |
import numpy as np
from numpy import exp, sin, pi
from numpy.random import uniform
def dosc(a, d, f, p, t):
"""
d -- damping parameter. Typically 0 < d < 1.
f -- frequency.
p -- phase, 0 <= p <= 2 * pi.
t -- time.
"""
return a * exp(-d*t) * sin(f * t + p)
if __name__ == '__main__':
main(10)
# def soln0(L0, L1, p0y, p1x):
# pmag2 = p0y**2 + p1x**2
# L2diff = L0**2 - L1**2
# Ldiff2 = (L0 - L1)**2
# Lsum2 = (L0 + L1)**2
# zx = (L2diff/2 - p0y**2/2 + p0y*(p0y*(pmag2 - L2diff) -
# sqrt(p1x**2*(pmag2 - Ldiff2)*(Lsum2 - pmag2)))/(2*pmag2) + p1x**2/2)/p1x
# zy = (p0y*(pmag2 - L2diff)/2 - sqrt(p1x**2*(pmag2 - Ldiff2)*(Lsum2 - pmag2))/2)/pmag2
# return zx, zy
# def soln1(L0, L1, p0y, p1x):
# pmag2 = p0y**2 + p1x**2
# L2diff = L0**2 - L1**2
# Ldiff2 = (L0 - L1)**2
# Lsum2 = (L0 + L1)**2
# zx = (L2diff/2 - p0y**2/2 + p0y*(p0y*(pmag2 - L2diff) +
# sqrt(p1x**2*(pmag2 - Ldiff2)*(Lsum2 - pmag2)))/(2*pmag2) + p1x**2/2)/p1x
# zy = (p0y*(pmag2 - L2diff)/2 + sqrt(p1x**2*(pmag2 - Ldiff2)*(Lsum2 - pmag2))/2)/pmag2
# return zx, zy
# soln = [
# {
# zx: (L0**2/2 - L1**2/2 - p0y**2/2 + p0y*(p0y*(-L0**2 + L1**2 + p0y**2 + p1x**2) - sqrt(p1x**2*(-L0**2 + 2*L0*L1 - L1**2 + p0y**2 + p1x**2)*(L0**2 + 2*L0*L1 + L1**2 - p0y**2 - p1x**2)))/(2*(p0y**2 + p1x**2)) + p1x**2/2)/p1x,
# zy: (p0y*(-L0**2 + L1**2 + p0y**2 + p1x**2)/2 - sqrt(p1x**2*(-L0**2 + 2*L0*L1 - L1**2 + p0y**2 + p1x**2)*(L0**2 + 2*L0*L1 + L1**2 - p0y**2 - p1x**2))/2)/(p0y**2 + p1x**2)},
# {
# zx: (L0**2/2 - L1**2/2 - p0y**2/2 + p0y*(p0y*(-L0**2 + L1**2 + p0y**2 + p1x**2) + sqrt(p1x**2*(-L0**2 + 2*L0*L1 - L1**2 + p0y**2 + p1x**2)*(L0**2 + 2*L0*L1 + L1**2 - p0y**2 - p1x**2)))/(2*(p0y**2 + p1x**2)) + p1x**2/2)/p1x,
# zy: (p0y*(-L0**2 + L1**2 + p0y**2 + p1x**2)/2 + sqrt(p1x**2*(-L0**2 + 2*L0*L1 - L1**2 + p0y**2 + p1x**2)*(L0**2 + 2*L0*L1 + L1**2 - p0y**2 - p1x**2))/2)/(p0y**2 + p1x**2)}]
| [
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
1033,
11,
7813,
11,
31028,
198,
6738,
299,
32152,
13,
25120,
1330,
8187,
198,
198,
4299,
288,
17500,
7,
64,
11,
288,
11,
277,
11,
279,
11,
256,
2599,
198,
220,
220,
220,
... | 1.53609 | 1,330 |
# -*- coding: utf-8 -*-
#███╗ ███╗ █████╗ ███╗ ██╗██╗ ██████╗ ██████╗ ███╗ ███╗██╗ ██████╗
#████╗ ████║██╔══██╗████╗ ██║██║██╔════╝██╔═══██╗████╗ ████║██║██╔═══██╗
#██╔████╔██║███████║██╔██╗ ██║██║██║ ██║ ██║██╔████╔██║██║██║ ██║
#██║╚██╔╝██║██╔══██║██║╚██╗██║██║██║ ██║ ██║██║╚██╔╝██║██║██║ ██║
#██║ ╚═╝ ██║██║ ██║██║ ╚████║██║╚██████╗╚██████╔╝██║ ╚═╝ ██║██║╚██████╔╝
#╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═════╝
# [+] @GorpoOrko 2020 - Telegram Bot and Personal Assistant [+]
# | TCXS Project Hacker Team - https://tcxsproject.com.br |
# | Telegram: @GorpoOrko Mail:gorpoorko@protonmail.com |
# [+] Github Gorpo Dev: https://github.com/gorpo [+]
import os
from PIL import Image, ImageDraw, ImageFont
from config import bot
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
9968,
8115,
22880,
245,
220,
220,
23287,
9968,
22880,
245,
23287,
20503,
22880,
245,
23287,
9968,
22880,
245,
220,
220,
23287,
8115,
22880,
245,
9968,
22880,
245,
23287... | 1.408935 | 582 |
#!/usr/bin/python
#
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
The topology is expressed via a group variable that looks like this:
topology:
control_planes:
- name: ccp
services:
- name: keystone
components:
- name: keystone-api
hosts:
- host1
- host2
- host3
- name: foundation
components:
- name: mysql
hosts:
- host1
- host2
- host3
- name: rcp01
services:
- name: nova
components:
- name: nova-api
hosts:
- host4
- host5
- host6
- name: nova-scheduler
hosts:
- host4
- host5
- host6
The following filters are provided for correct navigation of that structure:
topology_filter_control_planes: yields a set of (control-plane) named tuples
topology_filter_services: yields a set of (control-plane, service) named tuples
topology_filter_components: yields a set of (control-plane, service, service-element) named tuples
topology_filter_hosts: yields a set of (control-plane, service, service-element, host) named tuples
"""
import collections
control_plane = ['control_plane']
service = control_plane + ['service']
component = service + ['component']
host = component + ['host']
def descend(dictionary, path, remaining, tuple):
""" Descend one level into a dictionary. """
if not remaining:
return [tuple(*path)]
accessor, collect = remaining[0]
if callable(collect):
function = collect
else:
function = lambda item: item[collect]
results = []
for item in dictionary[accessor]:
results.extend(descend(item, path + [function(item)], remaining[1:], tuple))
return results
if __name__ == '__main__':
import yaml
test = """
---
topology:
control_planes:
- name: ccp
services:
- name: keystone
components:
- name: keystone-api
hosts:
- host1
- host2
- host3
- name: foundation
components:
- name: mysql
hosts:
- host1
- host2
- host3
- name: rcp01
services:
- name: nova
components:
- name: nova-api
hosts:
- host4
- host5
- host6
- name: nova-scheduler
hosts:
- host4
- host5
- host6
"""
topology = yaml.safe_load(test)['topology']
assert topology_filter_control_planes(topology) == [
{'control_plane':'ccp'},
{'control_plane': 'rcp01'}]
assert topology_filter_services(topology) == [
{'control_plane': 'ccp', 'service': 'keystone'},
{'control_plane': 'ccp', 'service': 'foundation'},
{'control_plane': 'rcp01', 'service': 'nova'}]
assert topology_filter_components(topology) == [
{'control_plane': 'ccp', 'service': 'keystone', 'component': 'keystone-api'},
{'control_plane': 'ccp', 'service': 'foundation', 'component': 'mysql'},
{'control_plane': 'rcp01', 'service': 'nova', 'component': 'nova-api'},
{'control_plane': 'rcp01', 'service': 'nova', 'component': 'nova-scheduler'}]
assert topology_filter_hosts(topology) == [
{'control_plane': 'ccp', 'service': 'keystone', 'component': 'keystone-api', 'host': 'host1'},
{'control_plane': 'ccp', 'service': 'keystone', 'component': 'keystone-api', 'host': 'host2'},
{'control_plane': 'ccp', 'service': 'keystone', 'component': 'keystone-api', 'host': 'host3'},
{'control_plane': 'ccp', 'service': 'foundation', 'component': 'mysql', 'host': 'host1'},
{'control_plane': 'ccp', 'service': 'foundation', 'component': 'mysql', 'host': 'host2'},
{'control_plane': 'ccp', 'service': 'foundation', 'component': 'mysql', 'host': 'host3'},
{'control_plane': 'rcp01', 'service': 'nova', 'component': 'nova-api', 'host': 'host4'},
{'control_plane': 'rcp01', 'service': 'nova', 'component': 'nova-api', 'host': 'host5'},
{'control_plane': 'rcp01', 'service': 'nova', 'component': 'nova-api', 'host': 'host6'},
{'control_plane': 'rcp01', 'service': 'nova', 'component': 'nova-scheduler', 'host': 'host4'},
{'control_plane': 'rcp01', 'service': 'nova', 'component': 'nova-scheduler', 'host': 'host5'},
{'control_plane': 'rcp01', 'service': 'nova', 'component': 'nova-scheduler', 'host': 'host6'}]
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
357,
66,
8,
15069,
1853,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
357,
66,
8,
15069,
2177,
311,
19108,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
106... | 2.19992 | 2,491 |
from .utils import *
from .checkpoint import *
| [
6738,
764,
26791,
1330,
1635,
198,
6738,
764,
9122,
4122,
1330,
1635,
198
] | 3.615385 | 13 |
import math
import random
class Polygon:
"""
Class used for testing only
"""
| [
11748,
10688,
198,
11748,
4738,
628,
198,
4871,
12280,
14520,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
220,
220,
220,
5016,
973,
329,
4856,
691,
198,
220,
220,
220,
37227,
198
] | 2.714286 | 35 |
# Exercise_1
#1
# Print the first 5 rows of the text column
print(speech_df['text'].head())
#2
# Replace all non letter characters with a whitespace
speech_df['text_clean'] = speech_df['text'].str.replace('[^a-zA-Z]', ' ')
# Change to lower case
speech_df['text_clean'] = speech_df['text_clean'].str.lower()
# Print the first 5 rows of the text_clean column
print(speech_df['text_clean'].head())
--------------------------------------------------
# Exercise_2
# Find the length of each text
speech_df['char_cnt'] = speech_df['text_clean'].str.len()
# Count the number of words in each text
speech_df['word_cnt'] = speech_df['text_clean'].str.split().str.len()
# Find the average length of word
speech_df['avg_word_length'] = speech_df['char_cnt'] / speech_df['word_cnt']
# Print the first 5 rows of these columns
print(speech_df[['text_clean', 'char_cnt', 'word_cnt', 'avg_word_length']])
--------------------------------------------------
# Exercise_3
# Import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
# Instantiate CountVectorizer
cv = CountVectorizer()
# Fit the vectorizer
cv.fit(speech_df['text_clean'])
# Print feature names
print(cv.get_feature_names())
--------------------------------------------------
# Exercise_4
#1
# Apply the vectorizer
cv_transformed = cv.transform(speech_df['text_clean'])
# Print the full array
cv_array = cv_transformed.toarray()
print(cv_array)
#2
# Apply the vectorizer
cv_transformed = cv.transform(speech_df['text_clean'])
# Print the full array
cv_array = cv_transformed.toarray()
# Print the shape of cv_array
print(cv_array.shape)
--------------------------------------------------
# Exercise_5
# Import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
# Specify arguements to limit the number of features generated
cv = CountVectorizer(min_df= 0.2, max_df= 0.8)
# Fit, transform, and convert into array
cv_transformed = cv.fit_transform(speech_df['text_clean'])
cv_array = cv_transformed.toarray()
# Print the array shape
print(cv_array.shape)
--------------------------------------------------
# Exercise_6
# Create a DataFrame with these features
cv_df = pd.DataFrame(cv_array,
columns=cv.get_feature_names()).add_prefix('Counts_')
# Add the new columns to the original DataFrame
speech_df_new = pd.concat([speech_df, cv_df], axis=1, sort=False)
print(speech_df_new.head())
--------------------------------------------------
# Exercise_7
# Import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
# Instantiate TfidfVectorizer
tv = TfidfVectorizer(max_features=100, stop_words='english')
# Fit the vectroizer and transform the data
tv_transformed = tv.fit_transform(speech_df['text_clean'])
# Create a DataFrame with these features
tv_df = pd.DataFrame(tv_transformed.toarray(),
columns=tv.get_feature_names()).add_prefix('TFIDF_')
print(tv_df.head())
--------------------------------------------------
# Exercise_8
# Isolate the row to be examined
sample_row = tv_df.iloc[0]
# Print the top 5 words of the sorted output
print(sample_row.sort_values(ascending=False).head())
--------------------------------------------------
# Exercise_9
# Instantiate TfidfVectorizer
tv = TfidfVectorizer(max_features=100, stop_words='english')
# Fit the vectroizer and transform the data
tv_transformed = tv.fit_transform(train_speech_df['text_clean'])
# Transform test data
test_tv_transformed = tv.transform(test_speech_df['text_clean'])
# Create new features for the test set
test_tv_df = pd.DataFrame(test_tv_transformed.toarray(),
columns=tv.get_feature_names()).add_prefix('TFIDF_')
print(test_tv_df.head())
--------------------------------------------------
# Exercise_10
# Import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
# Instantiate a trigram vectorizer
cv_trigram_vec = CountVectorizer(max_features=100,
stop_words='english',ngram_range=(3,3))
# Fit and apply trigram vectorizer
cv_trigram = cv_trigram_vec.fit_transform(speech_df['text_clean'])
# Print the trigram features
print(cv_trigram_vec.get_feature_names())
--------------------------------------------------
# Exercise_11
# Create a DataFrame of the features
cv_tri_df = pd.DataFrame(cv_trigram.toarray(),
columns=cv_trigram_vec.get_feature_names()).add_prefix('Counts_')
# Print the top 5 words in the sorted output
print(cv_tri_df.sum().sort_values(ascending=False).head())
--------------------------------------------------
| [
2,
32900,
62,
16,
220,
198,
2,
16,
198,
2,
12578,
262,
717,
642,
15274,
286,
262,
2420,
5721,
198,
4798,
7,
45862,
62,
7568,
17816,
5239,
6,
4083,
2256,
28955,
198,
2,
17,
198,
2,
40177,
477,
1729,
3850,
3435,
351,
257,
13216,
1... | 3.003232 | 1,547 |
# Generated by Django 3.2.12 on 2022-02-10 05:32
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
1065,
319,
33160,
12,
2999,
12,
940,
8870,
25,
2624,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.840909 | 44 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from django.utils.translation import ugettext_lazy as _
from shop import messages
from shop.exceptions import ProductNotAvailable
from shop.money import AbstractMoney, Money
from shop.modifiers.base import BaseCartModifier
class DefaultCartModifier(BaseCartModifier):
"""
This modifier is required for almost every shopping cart. It handles the most basic
calculations, ie. multiplying the items unit prices with the chosen quantity.
Since this modifier sets the cart items line total, it must be listed as the first
entry in `SHOP_CART_MODIFIERS`.
"""
identifier = 'default'
def pre_process_cart_item(self, cart, cart_item, request, raise_exception=False):
"""
Limit the ordered quantity in the cart to the availability in the inventory.
"""
kwargs = {'product_code': cart_item.product_code}
kwargs.update(cart_item.extra)
availability = cart_item.product.get_availability(request, **kwargs)
if cart_item.quantity > availability.quantity:
if raise_exception:
raise ProductNotAvailable(cart_item.product)
cart_item.quantity = availability.quantity
cart_item.save(update_fields=['quantity'])
message = _("The ordered quantity for item '{product_name}' has been adjusted to "\
"{quantity} which is the maximum, currently available in stock.").\
format(product_name=cart_item.product.product_name, quantity=availability.quantity)
messages.info(request, message, title=_("Verify Quantity"), delay=5)
return super(DefaultCartModifier, self).pre_process_cart_item(cart, cart_item, request, raise_exception)
class WeightedCartModifier(BaseCartModifier):
"""
This modifier is required for all shopping cart where we are interested into its weight.
It sums up the weight of all articles, ie. multiplying the items weight with the chosen
quantity.
If this modifier is used, the classes implementing the product shall override their
method ``get_weight()``, which must return the weight in kg as Decimal type.
"""
identifier = 'weights'
initial_weight = Decimal(0.01) # in kg
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
... | 2.951654 | 786 |
import os
import json
from argparse import ArgumentParser
from ml_tracking_ops.ml_tracking_ops import app
from ml_tracking_ops.experiment.experiment_tracking import HyperparameterSweep
from ml_tracking_ops.experiment.utils import get_hyperparameter_samplers
| [
11748,
28686,
198,
11748,
33918,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
6738,
25962,
62,
36280,
62,
2840,
13,
4029,
62,
36280,
62,
2840,
1330,
598,
198,
6738,
25962,
62,
36280,
62,
2840,
13,
23100,
3681,
13,
23100,
3681,
... | 3.589041 | 73 |
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../../build/common.gypi',
],
'conditions': [
[ 'OS=="win"', {
'targets': [
{
'target_name': 'activex_shim_dll',
'type': 'loadable_module',
'dependencies': [
'../../base/base.gyp:base',
'../../third_party/npapi/npapi.gyp:npapi',
'../activex_shim/activex_shim.gyp:activex_shim',
],
'product_name': 'npaxshim',
'msvs_guid': '494E414B-1655-48CE-996D-6413ECFB7829',
'msvs_settings': {
'VCLinkerTool': {
'RegisterOutput': 'false',
},
},
'sources': [
'activex_shim_dll.cc',
'activex_shim_dll.def',
'activex_shim_dll.rc',
'resource.h',
],
'link_settings': {
'libraries': [
'-lurlmon.lib',
],
},
},
],
}],
],
}
| [
2,
15069,
357,
66,
8,
3717,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
1... | 1.781874 | 651 |
from __future__ import absolute_import
from populus.utils.module_loading import (
import_string,
)
from populus.utils.types import (
is_string,
)
from populus.config.helpers import (
ClassImportPath,
)
from .base import Config
UNSUPPORTED_BACKEND_IDENTIFIER_MSG = (
"Unsupported type. Must be either a backend class, a dot "
"separated python path to a backend class, or one of {0}"
)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
16595,
385,
13,
26791,
13,
21412,
62,
25138,
1330,
357,
198,
220,
220,
220,
1330,
62,
8841,
11,
198,
8,
198,
6738,
16595,
385,
13,
26791,
13,
19199,
1330,
357,
198,
220... | 2.935714 | 140 |
#Aqui es donde se establece la conección a la base de datos
from sqlalchemy import create_engine #El create engine es el que hace a coneccion a la base de datos
from sqlalchemy.orm import sessionmaker #orm convierte a modelo relacional. El sessionmaker crea las sesiones
#de tipo orm
from sqlalchemy.ext.declarative import declarative_base #Creamos una clase que se decore con este
#declarative_base y creamos las bases
from app.core.config import settings #Importamos nuestros settings
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
#Vemos que create_engine recibe como primer parametro la cadena de coneccion a la base de datos
#El pool pre ping hace un ping a la base de datos antes de establecer la conección
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
#Un objeto SessionLocal creado con la funcion sessionmaker. engine es la variable que acabamos de crear para
#crear la concección
Base = declarative_base()
| [
2,
32,
421,
72,
1658,
288,
14378,
384,
1556,
540,
344,
8591,
27763,
35764,
18840,
257,
8591,
2779,
390,
4818,
418,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
1303,
9527,
2251,
3113,
1658,
1288,
8358,
289,
558,
257,
27763,
53... | 3.158065 | 310 |
"""Custom Jinja2 filters and tags.
"""
__all__ = (
"convert_py_to_cpp_namespace_code",
"convert_py_namespace_to_cpp_header_def",
"convert_py_to_cpp_namespace",
"convert_py_namespace_to_includes_dir",
"convert_py_namespace_to_header_filename",
"escape_yaml_doublequoted",
)
import os
import jinja2
from jinja2.ext import Extension
class TemplatekitExtension(Extension):
"""Custom Jinja2 extensions for use in LSST cookiecutter templates.
Parameters
----------
environment : `jinja2.Environment`
Jinja2 environment.
Notes
-----
**Using these extensions in cookiecutter**
Use these extensions in Cookiecutter by adding the name of this class
to the ``_extensions`` field array in the ``cookiecutter.json`` file.
For example:
.. code-block:: json
{
'_extensions': ['templatekit.TemplatekitExtension']
}
**Included filters**
- ``convert_py_to_cpp_namespace_code`` (`convert_py_to_cpp_namespace`)
- ``convert_py_namespace_to_cpp_header_def``
(`convert_py_namespace_to_cpp_header_def`)
- ``convert_py_to_cpp_namespace`` (`convert_py_to_cpp_namespace`)
- ``convert_py_namespace_to_includes_dir``
(`convert_py_namespace_to_includes_dir`)
- ``convert_py_namespace_to_header_filename``
(`convert_py_namespace_to_header_filename`)
- ``escape_yaml_doublequoted``
(`escape_yaml_doublequoted`)
"""
def convert_py_to_cpp_namespace_code(python_namespace: str) -> str:
"""Convert a Python namespace to C++ namespace code.
Parameters
----------
python_namespace : `str`
A string describing a Python namespace. For example,
``'lsst.example'``.
Returns
-------
cpp_namespace_code : `str`
C++ namespace code block. For example, ``'lsst.example'`` becomes:
.. code-block:: cpp
namespace lsst { example {
}} // lsst::example
Notes
-----
Use this filter in a Cookiecutter template like this::
{{ 'lsst.example' | convert_py_to_cpp_namespace_code }}
"""
name = python_namespace.replace(".", "::")
namespace_parts = python_namespace.split(".")
opening = "namespace " + " { ".join(namespace_parts) + " {\n"
closing = "}" * len(namespace_parts) + " // {}".format(name)
return "\n".join((opening, closing))
def convert_py_namespace_to_cpp_header_def(python_namespace: str) -> str:
"""Convert a Python namespace into a C++ header def token.
Parameters
----------
python_namespace : `str`
A string describing a Python namespace. For example,
``'lsst.example'``.
Returns
-------
cpp_header_def : `str`
C++ header def, such as '`'LSST_EXAMPLE_H'``.
"""
return python_namespace.upper().replace(".", "_") + "_H"
def convert_py_to_cpp_namespace(python_namespace: str) -> str:
"""Convert a Python namespace name to a C++ namespace.
Parameters
----------
python_namespace : `str`
A string describing a Python namespace. For example,
``'lsst.example'``.
Returns
-------
cpp_namespace : `str`
A C++ namespace. For example: ``'lsst::example'``.
"""
return python_namespace.replace(".", "::")
def convert_py_namespace_to_includes_dir(python_namespace: str) -> str:
"""Convert a Python namespace into a C++ header def token.
Parameters
----------
python_namespace : `str`
A string describing a Python namespace. For example,
``'lsst.example'``.
Returns
-------
includes_dir : `str`
The includes directory.
"""
parts = python_namespace.split(".")
return os.path.join(*parts[:-1])
def convert_py_namespace_to_header_filename(python_namespace: str) -> str:
"""Convert a Python namespace to the name of the root C++ header file.
Parameters
----------
python_namespace : `str`
A string describing a Python namespace. For example,
``'lsst.example'``.
Returns
-------
header_filename : `str`
Filename of the root header file.
"""
parts = python_namespace.split(".")
return parts[-1] + ".h"
def escape_yaml_doublequoted(string: str) -> str:
r"""Escape the content of a double-quoted YAML string.
Parameters
----------
string : `str`
A string.
Returns
-------
escaped_string : `str`
A string escaped so it can be safely inserted into a double-quoted
YAML string.
Notes
-----
To escape a double-quoted YAML string:
- Replace ``\`` with ``\\``.
- Replace ``"`` with ``"\``.
"""
return string.replace("\\", "\\\\").replace('"', '\\"')
| [
37811,
15022,
17297,
6592,
17,
16628,
290,
15940,
13,
198,
37811,
198,
198,
834,
439,
834,
796,
357,
198,
220,
220,
220,
366,
1102,
1851,
62,
9078,
62,
1462,
62,
20322,
62,
14933,
10223,
62,
8189,
1600,
198,
220,
220,
220,
366,
1102... | 2.553157 | 1,853 |
print(squareRoot(50)) | [
4798,
7,
23415,
30016,
7,
1120,
4008
] | 3 | 7 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx_bsl.tfxio.record_based_tfxio."""
import os
import tempfile
from absl import flags
import apache_beam as beam
from apache_beam.testing import util as beam_test_util
import pyarrow as pa
import tensorflow as tf
from tfx_bsl.tfxio import record_based_tfxio
from absl.testing import absltest
from absl.testing import parameterized
FLAGS = flags.FLAGS
if __name__ == "__main__":
absltest.main()
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.37415 | 294 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Dependencies Solver """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import six
import qisys.sort
from qisys.qixml import etree
class DepsSolver(object):
""" Solve dependencies across projects in a build worktree and packages in a toolchain. """
def __init__(self, build_worktree):
""" DepsSolver Init """
self.build_worktree = build_worktree
def get_dep_projects(self, projects, dep_types, reverse=False):
"""
Solve the dependencies of the list of projects
:param: dep_types A list of dependencies types
(``["build"]``, ``["runtime", "test"]``, etc.)
:return: a list of projects in the build worktree
"""
sorted_names = self._get_sorted_names(projects, dep_types,
reverse=reverse)
dep_projects = list()
for name in sorted_names:
dep_project = self.build_worktree.get_build_project(name,
raises=False)
if dep_project:
dep_projects.append(dep_project)
return dep_projects
def get_dep_packages(self, projects, dep_types):
"""
Solve the dependencies of the list of projects
:param: dep_types A list of dependencies types
(``["build"]``, ``["runtime", "test"]``, etc.)
:return: a list of packages in the build worktree's toolchain
"""
sorted_names = self._get_sorted_names(projects, dep_types)
toolchain = self.build_worktree.toolchain
if not toolchain:
return list()
build_project_names = [x.name for x in
self.build_worktree.build_projects]
dep_packages = list()
for name in sorted_names:
dep_package = toolchain.get_package(name, raises=False)
if dep_package:
dep_packages.append(dep_package)
res = toolchain.solve_deps(dep_packages, dep_types=dep_types)
res = [p for p in res if p.name not in build_project_names]
return res
def get_sdk_dirs(self, project, dep_types):
"""
Get the list of build/sdk dirs on which the project depends
Those will then be written in build/dependencies.cmake and added
to CMAKE_PREFIX_PATH by qibuild-config.cmake
"""
res = list()
dep_projects = self.get_dep_projects([project], dep_types)
for dep_project in dep_projects:
if dep_project.name == project.name:
continue
res.append(dep_project.sdk_directory)
return res
def get_host_projects(self, projects):
""" Get a sorted list of all the projects listed as host dependencies. """
host_deps = set()
dep_projects = self.get_dep_projects(projects, ["build", "runtime", "test"])
for project in dep_projects:
host_deps = host_deps.union(project.host_depends)
host_projects = [self.build_worktree.get_build_project(x, raises=False)
for x in host_deps]
if six.PY3:
host_projects = list(filter(None, host_projects))
else:
host_projects = filter(None, host_projects)
return host_projects
def _get_sorted_names(self, projects, dep_types, reverse=False):
""" Helper for get_dep_* functions. """
if reverse:
reverse_deps = set()
for project in self.build_worktree.build_projects:
if "build" in dep_types:
if any(x.name in project.build_depends for x in projects):
reverse_deps.add(project.name)
if "runtime" in dep_types:
if any(x.name in project.run_depends for x in projects):
reverse_deps.add(project.name)
if "test" in dep_types:
if any(x.name in project.test_depends for x in projects):
reverse_deps.add(project.name)
return sorted(list(reverse_deps))
to_sort = dict()
# first, fill up dict with packages dependencies ...
toolchain = self.build_worktree.toolchain
if toolchain:
for package in toolchain.packages:
package.load_deps()
package_deps = gen_deps(toolchain.packages, dep_types)
to_sort.update(package_deps)
# then with project dependencies
project_deps = gen_deps(self.build_worktree.build_projects, dep_types)
to_sort.update(project_deps)
return qisys.sort.topological_sort(to_sort, [x.name for x in projects])
def read_deps_from_xml(target, xml_elem):
"""
Read all the ``<depends />`` tags in the xml element and set
``target.build_depends``, ``target.run_depends``, ``target.test_depends``.
"""
depends_trees = xml_elem.findall("depends")
for depends_tree in depends_trees:
buildtime = qisys.qixml.parse_bool_attr(depends_tree, "buildtime")
runtime = qisys.qixml.parse_bool_attr(depends_tree, "runtime")
testtime = qisys.qixml.parse_bool_attr(depends_tree, "testtime")
host = qisys.qixml.parse_bool_attr(depends_tree, "host")
dep_names = qisys.qixml.parse_list_attr(depends_tree, "names")
for dep_name in dep_names:
if buildtime:
target.build_depends.add(dep_name)
if runtime:
target.run_depends.add(dep_name)
if testtime:
target.test_depends.add(dep_name)
if host:
target.host_depends.add(dep_name)
def dump_deps_to_xml(subject, xml_elem):
""" Dump Dependencies To XML """
if subject.build_depends:
build_dep_elem = etree.SubElement(xml_elem, "depends")
build_dep_elem.set("buildtime", "true")
build_dep_elem.set("names", " ".join(subject.build_depends))
if subject.run_depends:
runtime_dep_elem = etree.SubElement(xml_elem, "depends")
runtime_dep_elem.set("runtime", "true")
runtime_dep_elem.set("names", " ".join(subject.run_depends))
if subject.test_depends:
test_dep_elem = etree.SubElement(xml_elem, "depends")
test_dep_elem.set("testtime", "true")
test_dep_elem.set("names", " ".join(subject.test_depends))
def gen_deps(objects_with_dependencies, dep_types):
"""
Generate a dictionary name -> dependencies for the objects
passed as parameters (projects or packages).
"""
res = dict()
for object_with_dependencies in objects_with_dependencies:
deps = set()
if "build" in dep_types:
deps.update(object_with_dependencies.build_depends)
if "runtime" in dep_types:
deps.update(object_with_dependencies.run_depends)
if "test" in dep_types:
deps.update(object_with_dependencies.test_depends)
res[object_with_dependencies.name] = deps
return res
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
2321,
12,
1238,
2481,
8297,
28650,
47061,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
... | 2.212591 | 3,288 |
import unittest
from ..autoforecast_bitcoin import * # noqa: F401
| [
11748,
555,
715,
395,
198,
198,
6738,
11485,
23736,
754,
2701,
62,
35395,
1330,
1635,
220,
1303,
645,
20402,
25,
376,
21844,
628
] | 3 | 23 |
from django import forms
from django.db.models import QuerySet
from airmozilla.base.forms import BaseForm
from airmozilla.main.models import Event, Channel, Tag
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
43301,
7248,
198,
198,
6738,
1633,
5908,
16496,
13,
8692,
13,
23914,
1330,
7308,
8479,
198,
6738,
1633,
5908,
16496,
13,
12417,
13,
27530,
1330,
8558,
1... | 3.565217 | 46 |
import time
import gym
import pybullet as p
import peg_in_hole_gym
from peg_in_hole_gym.envs.base_env import TASK_LIST
from env import ArtForce
from test_env import TestForce
from tqdm import tqdm
from icecream import install
install()
TASK_LIST['art-force'] = ArtForce
TASK_LIST['test-force'] = TestForce
object_list = ["microwave", "toaster", "drawer", "cabinet", "cabinet2", "refrigerator"]
if __name__ == '__main__':
env = gym.make('peg-in-hole-v0', client=p.GUI, task="art-force", task_num=1, offset = [2.,3.,0.],args=[object_list[0], True, True], is_test=True)
# env = gym.make('peg-in-hole-v0', client=p.GUI, task="test-force", task_num=1, offset = [2.,3.,0.],args=[object_list[0], False, True], is_test=True)
env.reset()
# env.step([[1]])
# while True:
# env.step([[1]])
# time.sleep(0.01)
cnt = 8000
for i in tqdm(range(cnt)):
env.step([[1]])
time.sleep(0.01)
env.render()
| [
11748,
640,
198,
11748,
11550,
198,
11748,
12972,
15065,
1616,
355,
279,
198,
11748,
41350,
62,
259,
62,
13207,
62,
1360,
76,
198,
6738,
41350,
62,
259,
62,
13207,
62,
1360,
76,
13,
268,
14259,
13,
8692,
62,
24330,
1330,
309,
1921,
... | 2.294686 | 414 |
x1 = 1
y1 = 2
print(f"This is the sum: {x1}, {y1}, {add(x1,y1)}")
| [
198,
198,
87,
16,
796,
352,
198,
88,
16,
796,
362,
198,
4798,
7,
69,
1,
1212,
318,
262,
2160,
25,
1391,
87,
16,
5512,
1391,
88,
16,
5512,
1391,
2860,
7,
87,
16,
11,
88,
16,
38165,
4943,
198
] | 1.7 | 40 |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build vocab and cache it so we don't have to keep running."""
import collections
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
flags.DEFINE_string('vocab_file_path', '/tmp/lra_data/aan',
'Path for vocab file output.')
FLAGS = flags.FLAGS
DATASET_PATHS = '/tmp/dataset'
def whitespace_tokenize(text):
"""Splits an input into tokens by whitespace."""
return text.strip().split()
def build_vocab(datasets,
special_tokens=(b'<pad>', b'<unk>', b'<s>', b'</s>'),
min_freq=10,
text_keys=None):
"""Returns a vocabulary of tokens with optional minimum frequency."""
# Count the tokens in the datasets.
logging.info('Building Vocab...')
counter = collections.Counter()
num_processed = 0
for dataset in datasets:
for example in tfds.as_numpy(dataset):
# logging.info(example)
for k in text_keys[:1]:
# logging.info(example[k])
counter.update(whitespace_tokenize(example[k][:100]))
num_processed += 1
if num_processed % 100 == 0:
logging.info('Processed %d', num_processed)
# Add special tokens to the start of vocab.
vocab = collections.OrderedDict()
for token in special_tokens:
vocab[token] = len(vocab)
# Add all other tokens to the vocab if their frequency is >= min_freq.
for token in sorted(list(counter.keys())):
if counter[token] >= min_freq:
vocab[token] = len(vocab)
logging.info('Number of unfiltered tokens: %d', len(counter))
logging.info('Vocabulary size: %d', len(vocab))
return vocab
def get_tsv_dataset(file_path, batch_size):
"""Preprocess dataset."""
tf.logging.info(file_path)
# sel_cols = ['label', 'id1', 'id2']
col_defaults = [tf.string, tf.string, tf.string, tf.string, tf.string]
col_names = ['label', 'id1', 'id2', 'text1', 'text2']
ds = tf.data.experimental.make_csv_dataset([file_path],
batch_size,
column_names=col_names,
column_defaults=col_defaults,
use_quote_delim=False,
field_delim='\t',
shuffle=False,
header=False,
num_epochs=1)
ds = ds.unbatch()
return ds
def get_dataset(batch_size):
"""Get dataset from matching datasets converts into src/tgt pairs."""
train_fps = DATASET_PATHS + '.train.tsv'
train = get_tsv_dataset(train_fps, batch_size)
train = train.map(adapt_example)
train = train.prefetch(tf.data.experimental.AUTOTUNE)
return train
if __name__ == '__main__':
app.run(main)
| [
2,
15069,
12131,
3012,
11419,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
... | 2.317907 | 1,491 |
from itertools import product
nucleotides = "ACGT" # this order of nucleotides is important for reversing
mutation_contexts = [a + b for a in nucleotides for b in nucleotides]
amino_acids = "ACDEFGHIKLMNPQRSTVWY"
amino_acids_with_stop = amino_acids + "*"
# complementary_nucleotides = reversed(nucleotides)
complementary_nucleotide = dict(zip(nucleotides, reversed(nucleotides)))
complementary_nucleotide['N'] = 'N'
complementary_context = {x: complementary_nucleotide[x[1]] + complementary_nucleotide[x[0]] for x in mutation_contexts}
complementary_trinucleotide = dict(zip(
["".join(x) for x in product(nucleotides, repeat=3)],
["".join(reversed(x)) for x in product(reversed(nucleotides), repeat=3)]))
bases_dict = {
"A": "A", "G": "G", "T": "T", "C": "C",
"W": "AT", "S": "CG", "M": "AC", "K": "GT", "R": "AG", "Y": "CT",
"B": "TCG", "D": "AGT", "H": "ACT", "V": "ACG", "N": "ATGC"}
extended_nucleotides = "ACTGWSMKRYBDHVN"
complementary_extended_nucleotide = dict(zip(extended_nucleotides, "TGACWSKMYRVHDBN"))
comp_dict = {
"A": "T", "T": "A", "C": "G", "G": "C",
"W": "AT", "S": "CG", "K": "AC", "M": "GT", "Y": "AG", "R": "CT",
"V": "TCG", "H": "AGT", "D": "ACT", "B": "ACG", "N": "ATGC"}
codon_table = {
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"TTA": "L", "TTG": "L", "CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R",
"AAA": "K", "AAG": "K",
"AAT": "N", "AAC": "N",
"ATG": "M",
"GAT": "D", "GAC": "D",
"TTT": "F", "TTC": "F",
"TGT": "C", "TGC": "C",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAA": "Q", "CAG": "Q",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S",
"GAA": "E", "GAG": "E",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
"TGG": "W",
"CAT": "H", "CAC": "H",
"TAT": "Y", "TAC": "Y",
"ATT": "I", "ATC": "I", "ATA": "I",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"TAG": "*", "TGA": "*", "TAA": "*"
}
exome_trinucleotides = {
"GCA": 1870205, "ACT": 1317607, "GCC": 2018826, "CCT": 2045943, "GTC": 1272257, "ATT": 1226179, "CTC": 1989259,
"ACA": 1614930, "ATC": 1343451, "ACG": 602918, "TTC": 1901940, "GTT": 1149725, "GCG": 857006, "GTG": 1685105,
"ACC": 1369960, "CCA": 2359526, "TTG": 1588902, "ATA": 841828, "TCA": 1853413, "CCG": 1009679, "TTA": 774505,
"TCG": 640487, "ATG": 1654761, "GTA": 798348, "CTT": 1881403, "GCT": 1983552, "CTA": 713831, "TTT": 1756413,
"CCC": 1827705, "TCC": 2026380, "TCT": 2000322, "CTG": 2769315}
aa_short = amino_acids_with_stop
aa_long = ["Ala", "Leu", "Pro", "Gly", "Met", "Ser", "Thr", "Trp", "Ile", "Val", "Cys", "Asp", "Glu", "Phe", "His", "Lys", "Asn", "Gln", "Arg", "Tyr", "STOP"]
aa_dict = dict(zip(aa_short, aa_long))
bases_dict = {"A": "A", "G": "G", "T": "T", "C": "C", "W": "AT", "S": "CG", "M": "AC", "K": "GT", "R": "AG", "Y": "CT",
"B": "TCG", "D": "AGT", "H": "ACT", "V": "ACG", "N": "ATGC"}
comp_dict = {"A": "T", "T": "A", "C": "G", "G": "C", "W": "AT", "S": "CG", "K": "AC", "M": "GT", "Y": "AG",
"R": "CT", "V": "TCG", "H": "AGT", "D": "ACT", "B": "ACG", "N": "ATGC"}
chromosome_name_mapping = {
"chr23": "chrX",
"chr24": "chrY",
"chr25": "chrXY",
"chr26": "chrM",
}
| [
6738,
340,
861,
10141,
1330,
1720,
198,
198,
77,
14913,
313,
1460,
796,
366,
2246,
19555,
1,
220,
1303,
428,
1502,
286,
17751,
313,
1460,
318,
1593,
329,
37556,
198,
76,
7094,
62,
22866,
82,
796,
685,
64,
1343,
275,
329,
257,
287,
... | 1.918033 | 1,769 |
from airflow.operators.dummy_operator import DummyOperator
from dagger.dag_creator.airflow.operator_creator import OperatorCreator
from dagger.dag_creator.airflow.operator_creators import (
airflow_op_creator,
athena_transform_creator,
batch_creator,
dummy_creator,
python_creator,
redshift_load_creator,
redshift_transform_creator,
redshift_unload_creator,
spark_creator,
sqoop_creator,
)
from dagger.dag_creator.airflow.utils.operator_factories import make_control_flow
| [
6738,
45771,
13,
3575,
2024,
13,
67,
13513,
62,
46616,
1330,
360,
13513,
18843,
1352,
198,
6738,
31322,
13,
67,
363,
62,
45382,
13,
958,
11125,
13,
46616,
62,
45382,
1330,
35946,
16719,
273,
198,
6738,
31322,
13,
67,
363,
62,
45382,
... | 2.903955 | 177 |
#!/usr/bin/env python
"""Job Run Report"""
### usage: ./jobRunReport.py -v mycluster -u admin [-d domain]
### import pyhesity wrapper module
from pyhesity import *
### command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True)
parser.add_argument('-u', '--username', type=str, required=True)
parser.add_argument('-d', '--domain', type=str, default='local')
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
### authenticate
apiauth(vip, username, domain)
### find protectionRuns for last 24 hours
runs = api('get', 'protectionRuns?startTimeUsecs=%s&numRuns=100000' % timeAgo('24', 'hours'))
seen = {}
print("{:>20} {:>10} {:25}".format('JobName', 'Status ', 'StartTime'))
print("{:>20} {:>10} {:25}".format('-------', '--------', '---------'))
for run in runs:
jobName = run['jobName']
status = run['backupRun']['status']
startTime = usecsToDate(run['backupRun']['stats']['startTimeUsecs'])
if jobName not in seen:
seen[jobName] = True
print("{:>20} {:>10} {:25}".format(jobName, status, startTime))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
33308,
5660,
6358,
37811,
198,
198,
21017,
8748,
25,
24457,
21858,
10987,
19100,
13,
9078,
532,
85,
616,
565,
5819,
532,
84,
13169,
25915,
67,
7386,
60,
198,
198,
21017,
1330,
1... | 2.703271 | 428 |
# Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tables for L3out clones in AIM LIB
Revision ID: 07113feba145
Revises: 8e313fbeb93b
Create Date: 2016-08-08 16:23:26.119724
"""
# revision identifiers, used by Alembic.
revision = '07113feba145'
down_revision = '8e313fbeb93b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
| [
2,
15069,
357,
66,
8,
1584,
28289,
11998,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
7... | 3.025397 | 315 |
from qgis._core import QgsVectorLayer, QgsFeature, QgsGeometry
from qgis.core import (QgsTask,
QgsMessageLog,
Qgis,
QgsSpatialIndex,
QgsPointXY,
QgsProject, QgsApplication)
class LancamentoRamal(QgsTask):
"""
Adiciona 'ramais' como links entre redes e hidrômetros
"""
| [
6738,
10662,
70,
271,
13557,
7295,
1330,
1195,
14542,
38469,
49925,
11,
1195,
14542,
38816,
11,
1195,
14542,
10082,
15748,
198,
6738,
10662,
70,
271,
13,
7295,
1330,
357,
48,
14542,
25714,
11,
198,
220,
220,
220,
220,
220,
220,
220,
2... | 1.780269 | 223 |
import datetime as dt
from django.http import HttpResponse, JsonResponse
from django.db import connections
from django.shortcuts import render
from data.metadata import team_abbrevs, team_colors
import os
from collections import defaultdict
import boto3
from boto3.dynamodb.conditions import Key
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table_name = os.getenv('DYNAMODB_TABLE_NAME')
table = dynamodb.Table(table_name)
| [
11748,
4818,
8079,
355,
288,
83,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
449,
1559,
31077,
198,
6738,
42625,
14208,
13,
9945,
1330,
8787,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
6738,
136... | 3.041096 | 146 |
from tool.runners.python import SubmissionPy
from statistics import median
def test_jon():
"""
Run `python -m pytest ./day-07/part-2/jon.py` to test the submission.
"""
assert (
JonSubmission().run(
"""
16,1,2,0,4,2,7,1,2,14
""".strip()
)
== 168
)
| [
6738,
2891,
13,
36740,
13,
29412,
1330,
42641,
20519,
198,
6738,
7869,
1330,
14288,
628,
628,
198,
4299,
1332,
62,
46286,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5660,
4600,
29412,
532,
76,
12972,
9288,
24457,
820,
12,
2... | 2.138889 | 144 |
import colorama
'''
Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
Style: DIM, NORMAL, BRIGHT, RESET_ALL
'''
switcher = {
'r':colorama.Fore.RED,
'bk':colorama.Fore.BLACK,
'b':colorama.Fore.BLUE,
'g':colorama.Fore.GREEN,
'y':colorama.Fore.YELLOW,
'm':colorama.Fore.MAGENTA,
'c':colorama.Fore.CYAN,
'w':colorama.Fore.WHITE
}
def paint(str,color='r'):
'''Utility func, for printing colorful logs in console...
@args:
--
str : String to be modified.
color : color code to which the string will be formed. default is 'r'=RED
@returns:
--
str : final modified string with foreground color as per parameters.
'''
if color in switcher:
str = switcher[color]+str+colorama.Style.RESET_ALL
return str | [
11748,
3124,
1689,
198,
198,
7061,
6,
198,
16351,
25,
31963,
11,
23848,
11,
47606,
11,
575,
23304,
3913,
11,
9878,
8924,
11,
28263,
3525,
32,
11,
30440,
1565,
11,
44925,
11,
15731,
2767,
13,
198,
7282,
25,
31963,
11,
23848,
11,
4760... | 2.398892 | 361 |
"""
This module implements helper functions to calculate interpolated states
between successive napari views states
"""
# Author: Guillaume Witz, Science IT Support, Bern University, 2019
# License: BSD3 License
import copy, re
import numpy as np
from pyquaternion import Quaternion as pyQuaternion
from vispy.util.quaternion import Quaternion
def interpolate(states_dict):
"""Calculate interpolations for all states
Returns
-------
interpolated: dict
dictionary defining interpolated states. Each element is a list of length N
frames. Keys are:
'rotate': list of pyquaternions
'translate': list of tuple defining camera center
'zoom': list of floats defining camera zoom
'vis': list of boolean lists defining layer visibility
'time': list of int defining time-point
"""
interpolated = {}
interpolated['rotate'] = interpolate_rotation(states_dict)
interpolated['translate'] = interpolate_translation(states_dict)
interpolated['zoom'] = interpolate_scales(states_dict)
interpolated['vis'] = interpolate_visibility(states_dict)
interpolated['time'] = interpolate_time(states_dict)
return interpolated
def interpolate_rotation(states_dict):
"""Interpolate rotation states as quaternions
Parameters
----------
states_dict: list of dicts
list of states dictionaries generated by scriptcommands.create_frame_commandlist()
and naparimovie.create_state_dict()
Returns
-------
all_states: list of pyquaternions
list of rotation states of length N frames
"""
frames_rot = [[x['frame'], x['rotate']] for x in states_dict if x['rotate']]
all_states = {x: [] for x in range(frames_rot[0][0],frames_rot[-1][0]+1)}
for i in range(len(frames_rot)-1):
q0 = pyQuaternion(frames_rot[i][1].w, frames_rot[i][1].x,
frames_rot[i][1].y,frames_rot[i][1].z)
q1 = pyQuaternion(frames_rot[i+1][1].w, frames_rot[i+1][1].x,
frames_rot[i+1][1].y,frames_rot[i+1][1].z)
num_frames = frames_rot[i+1][0]-frames_rot[i][0]-1
for ind, q in enumerate(pyQuaternion.intermediates(q0, q1,num_frames, include_endpoints=True)):
all_states[frames_rot[i][0]+ind] = q
all_states = [all_states[x] for x in all_states.keys()]
return all_states
def interpolate_translation(states_dict):
"""Interpolate camera center views
Parameters
----------
states_dict: list of dicts
list of states dictionaries generated by scriptcommands.create_frame_commandlist()
and naparimovie.create_state_dict()
Returns
-------
center_interp: list of tuples
list of tuples defining camera center view of length N frames
"""
frames_trans = np.array([np.concatenate(([x['frame']], np.array(x['translate'])))
for x in states_dict if x['translate']])
all_frames = np.array([x['frame'] for x in states_dict])
center_interp = [np.interp(x=all_frames,xp = frames_trans[:,0],
fp = frames_trans[:,c+1]) for c in range(3)]
center_interp = np.stack(center_interp,axis = 1)
center_interp = [tuple(x) for x in center_interp]
return center_interp
def interpolate_scales(states_dict):
"""Interpolate camera zoom states
Parameters
----------
states_dict: list of dicts
list of states dictionaries generated by scriptcommands.create_frame_commandlist()
and naparimovie.create_state_dict()
Returns
-------
scales_interp: list of floats
list of floats defining camera zoom of length N frames
"""
frames = [x['frame'] for x in states_dict]
all_scales = np.array([[x['frame'], x['zoom']] for x in states_dict if x['zoom']])
scales_interp = np.interp(x=frames,xp = all_scales[:,0], fp = all_scales[:,1])
return scales_interp
def interpolate_visibility(states_dict):
"""Interpolate visibility states of layers
Parameters
----------
states_dict: list of dicts
list of states dictionaries generated by scriptcommands.create_frame_commandlist()
and naparimovie.create_state_dict()
Returns
-------
frame_make: list of lists
list of lists defining layer visibility of length N frames. e.g. [[True, False],[True, False]....]
for 2 layers
"""
frame_make = np.array([np.concatenate(([x['frame']], x['vis'])) for x in states_dict if x['vis']])
frame_make = np.concatenate([[frame_make[x,1::] for i in range(frame_make[x,0],frame_make[x+1,0])]
for x in range(len(frame_make)-1)]+[[frame_make[-1,1::]]]).astype(bool)
return frame_make
def interpolate_time(states_dict):
"""Interpolate time frames for 4D data
Parameters
----------
states_dict: list of dicts
list of states dictionaries generated by scriptcommands.create_frame_commandlist()
and naparimovie.create_state_dict()
Returns
-------
time_interp: list of ints
list of time points of length N frames.
"""
frames = [x['frame'] for x in states_dict]
all_scales = np.array([[x['frame'], x['time']] for x in states_dict if type(x['time']) is not list])
time_interp = None
if len(all_scales)>0:
time_interp = np.interp(x=frames,xp = all_scales[:,0], fp = all_scales[:,1]).astype(int)
return time_interp
| [
37811,
198,
1212,
8265,
23986,
31904,
5499,
284,
15284,
39555,
515,
2585,
198,
23395,
25175,
25422,
2743,
5009,
2585,
198,
37811,
198,
2,
6434,
25,
1962,
5049,
2454,
370,
4224,
11,
5800,
7283,
7929,
11,
6206,
2059,
11,
13130,
198,
2,
... | 2.41478 | 2,341 |
from django.urls import path
from . import views
app_name = 'predict'
urlpatterns = [
path('predict/', views.PredictView.as_view()),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
79,
17407,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
79,
17407,
14,
3256,
5009,
13,
47,
1... | 2.641509 | 53 |
""" Download Synthetic Dataset from Unity Simulation Platform
[Unity Simulation](https://unity.com/products/simulation) provides a powerful
platform for running simulations at large scale. This script provides basic
functionality that allow users to download generated synthetic dataset.
"""
import argparse
import logging
import os
from pathlib import Path
import datasetinsights.constants as const
from datasetinsights.data.simulation.download import (
Downloader,
download_manifest,
)
logging.basicConfig(
level=logging.INFO,
format=(
"%(levelname)s | %(asctime)s | %(name)s | %(threadName)s | "
"%(message)s"
),
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
args = parse_args()
run(args)
| [
37811,
10472,
26375,
6587,
16092,
292,
316,
422,
18714,
41798,
19193,
198,
198,
58,
35955,
41798,
16151,
5450,
1378,
9531,
13,
785,
14,
29498,
14,
14323,
1741,
8,
3769,
257,
3665,
198,
24254,
329,
2491,
27785,
379,
1588,
5046,
13,
770,
... | 2.937269 | 271 |
#!/usr/bin/python | [
2,
48443,
14629,
14,
8800,
14,
29412
] | 2.428571 | 7 |
# Copyright 2019 California Institute of Technology
# ------------------------------------------------------------------
import os
import os.path as _osp
lib_dir = _osp.abspath(_osp.dirname(__file__))
__version__ = '1.7'
# from .wfirst_phaseb import wfirst_phaseb
# from .wfirst_phaseb_compact import wfirst_phaseb_compact
from .trim import trim
from .polmap import polmap
from .ffts import ffts
from .mft2 import mft2
from .copy_here import copy_here
from .copy_examples_here import copy_examples_here
from .set_data_dir import set_data_dir
map_dir = '/maps/'
polfile = '/pol/new_toma'
data_dir ="/Users/ajriggs/Repos/proper-models/wfirst_cgi/data_phaseb"
| [
2,
220,
220,
15069,
13130,
3442,
5136,
286,
8987,
198,
2,
16529,
438,
198,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
4808,
2117,
198,
198,
8019,
62,
15908,
796,
4808,
2117,
13,
397,
2777,
776,
28264,
2117,
13,
15908,
3672,
... | 2.96 | 225 |
import morepath
import pytest
from hashlib import sha256
from onegov.core import Framework
from onegov.core.security import Public, Private, Secret
from onegov.core.utils import scan_morepath_modules, module_path
from onegov.user import Auth, UserApp
from tests.shared.glauth import GLAuth
from tests.shared.client import Client
from unittest.mock import MagicMock
@pytest.fixture(scope='function')
@pytest.fixture(scope='function')
| [
11748,
517,
6978,
198,
11748,
12972,
9288,
198,
198,
6738,
12234,
8019,
1330,
427,
64,
11645,
198,
6738,
530,
9567,
13,
7295,
1330,
25161,
198,
6738,
530,
9567,
13,
7295,
13,
12961,
1330,
5094,
11,
15348,
11,
3943,
198,
6738,
530,
956... | 3.456693 | 127 |
"""
Django settings for pydis_site project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import secrets
import sys
import typing
import environ
import sentry_sdk
from django.contrib.messages import constants as messages
from sentry_sdk.integrations.django import DjangoIntegration
from pydis_site.constants import GIT_SHA
if typing.TYPE_CHECKING:
from django.contrib.auth.models import User
from wiki.models import Article
env = environ.Env(
DEBUG=(bool, False),
SITE_DSN=(str, "")
)
sentry_sdk.init(
dsn=env('SITE_DSN'),
integrations=[DjangoIntegration()],
send_default_pii=True,
release=f"site@{GIT_SHA}"
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = env('DEBUG')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if DEBUG:
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', default=['*'])
SECRET_KEY = "yellow polkadot bikini" # noqa: S105
elif 'CI' in os.environ:
ALLOWED_HOSTS = ['*']
SECRET_KEY = secrets.token_urlsafe(32)
else:
ALLOWED_HOSTS = env.list(
'ALLOWED_HOSTS',
default=[
'pythondiscord.com',
'admin.pythondiscord.com',
'api.pythondiscord.com',
'staff.pythondiscord.com',
'pydis.com',
'api.pydis.com',
'admin.pydis.com',
'staff.pydis.com',
'api.site',
]
)
SECRET_KEY = env('SECRET_KEY')
# Application definition
INSTALLED_APPS = [
'pydis_site.apps.api',
'pydis_site.apps.home',
'pydis_site.apps.staff',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize.apps.HumanizeConfig',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites.apps.SitesConfig',
'django.contrib.staticfiles',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.discord',
'allauth.socialaccount.providers.github',
'django_hosts',
'django_filters',
'django_nyt.apps.DjangoNytConfig',
'django_simple_bulma',
'mptt',
'rest_framework',
'rest_framework.authtoken',
'sekizai',
'sorl.thumbnail',
'wiki.apps.WikiConfig',
'wiki.plugins.images.apps.ImagesConfig',
'wiki.plugins.links.apps.LinksConfig',
'wiki.plugins.redlinks.apps.RedlinksConfig',
'wiki.plugins.notifications.apps.NotificationsConfig', # Required for migrations
]
MIDDLEWARE = [
'django_hosts.middleware.HostsRequestMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_hosts.middleware.HostsResponseMiddleware',
]
ROOT_URLCONF = 'pydis_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'pydis_site', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'builtins': [
'django_hosts.templatetags.hosts_override',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
"sekizai.context_processors.sekizai",
"pydis_site.context_processors.git_sha_processor"
],
},
},
]
WSGI_APPLICATION = 'pydis_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': env.db(),
'metricity': env.db('METRICITY_DB_URL'),
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'pydis_site', 'static')]
STATIC_ROOT = env('STATIC_ROOT', default='/app/staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = env('MEDIA_ROOT', default='/site/media')
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django_simple_bulma.finders.SimpleBulmaFinder',
]
# django-hosts
# https://django-hosts.readthedocs.io/en/latest/
ROOT_HOSTCONF = 'pydis_site.hosts'
DEFAULT_HOST = 'home'
if DEBUG:
PARENT_HOST = env('PARENT_HOST', default='pythondiscord.local:8000')
if ":" in PARENT_HOST:
ALLOWED_HOSTS.append(PARENT_HOST.split(":", 1)[0])
else:
ALLOWED_HOSTS.append(PARENT_HOST)
else:
PARENT_HOST = env('PARENT_HOST', default='pythondiscord.com')
# Django REST framework
# http://www.django-rest-framework.org
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.DjangoModelPermissions',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json'
}
# Logging
# https://docs.djangoproject.com/en/2.1/topics/logging/
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': (
'%(asctime)s | %(process)d:%(thread)d | %(module)s | %(levelname)-8s | %(message)s'
)
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler'
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'level': env(
'LOG_LEVEL',
default=(
# If there is no explicit `LOG_LEVEL` set,
# use `DEBUG` if we're running in debug mode but not
# testing. Use `ERROR` if we're running tests, else
# default to using `WARN`.
'INFO'
if DEBUG and 'test' not in sys.argv
else (
'ERROR'
if 'test' in sys.argv
else 'WARN'
)
)
)
}
}
}
# Django Messages framework config
MESSAGE_TAGS = {
messages.DEBUG: 'primary',
messages.INFO: 'info',
messages.SUCCESS: 'success',
messages.WARNING: 'warning',
messages.ERROR: 'danger',
}
# Custom settings for django-simple-bulma
BULMA_SETTINGS = {
"variables": { # If you update these colours, please update the notification.css file
"primary": "#7289DA", # Discord blurple
# "orange": "", # Apparently unused, but the default is fine
# "yellow": "", # The default yellow looks pretty good
"green": "#32ac66", # Colour picked after Discord discussion
"turquoise": "#7289DA", # Blurple, because Bulma uses this regardless of `primary` above
"blue": "#2482c1", # Colour picked after Discord discussion
"cyan": "#2482c1", # Colour picked after Discord discussion (matches the blue)
"purple": "#aa55e4", # Apparently unused, but changed for consistency
"red": "#d63852", # Colour picked after Discord discussion
"link": "$primary",
"dimensions": "16 24 32 48 64 96 128 256 512", # Possible image dimensions
"navbar-height": "4.75rem",
"footer-padding": "1rem 1.5rem 1rem",
}
}
# Required for the wiki
LOGIN_URL = "/admin/login" # Update this when the real login system is in place
SITE_ID = 1
WIKI_ACCOUNT_HANDLING = False
WIKI_ACCOUNT_SIGNUP_ALLOWED = False
WIKI_ANONYMOUS = True
WIKI_ANONYMOUS_WRITE = False
WIKI_MARKDOWN_KWARGS = {
"extension_configs": {
"wiki.plugins.macros.mdx.toc": {
"anchorlink": True,
"baselevel": 2
}
}, "extensions": [
"markdown.extensions.abbr",
"markdown.extensions.attr_list",
"markdown.extensions.extra",
"markdown.extensions.footnotes",
"markdown.extensions.nl2br",
"markdown.extensions.sane_lists",
"wiki.core.markdown.mdx.codehilite",
"wiki.core.markdown.mdx.previewlinks",
"wiki.core.markdown.mdx.responsivetable",
"wiki.plugins.macros.mdx.toc",
"wiki.plugins.macros.mdx.wikilinks",
]
}
WIKI_MESSAGE_TAG_CSS_CLASS = {
messages.DEBUG: "", # is-info isn't distinctive enough from blurple
messages.ERROR: "is-danger",
messages.INFO: "is-primary",
messages.SUCCESS: "is-success",
messages.WARNING: "is-warning",
}
WIKI_MARKDOWN_SANITIZE_HTML = False
# Wiki permissions
def WIKI_CAN_DELETE(article: "Article", user: "User") -> bool: # noqa: N802
"""Check whether a user may delete an article."""
return user.has_perm('wiki.delete_article')
def WIKI_CAN_MODERATE(article: "Article", user: "User") -> bool: # noqa: N802
"""Check whether a user may moderate an article."""
return user.has_perm('wiki.moderate')
def WIKI_CAN_WRITE(article: "Article", user: "User") -> bool: # noqa: N802
"""Check whether a user may create or edit an article."""
return user.has_perm('wiki.change_article')
# Django Allauth stuff
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_ADAPTER = "pydis_site.utils.account.AccountAdapter"
ACCOUNT_EMAIL_REQUIRED = False # Undocumented allauth setting; don't require emails
ACCOUNT_EMAIL_VERIFICATION = "none" # No verification required; we don't use emails for anything
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
# We use this validator because Allauth won't let us actually supply a list with no validators
# in it, and we can't just give it a lambda - that'd be too easy, I suppose.
ACCOUNT_USERNAME_VALIDATORS = "pydis_site.VALIDATORS"
LOGIN_REDIRECT_URL = "home"
SOCIALACCOUNT_ADAPTER = "pydis_site.utils.account.SocialAccountAdapter"
SOCIALACCOUNT_PROVIDERS = {
"discord": {
"SCOPE": [
"identify",
],
"AUTH_PARAMS": {"prompt": "none"}
}
}
| [
37811,
198,
35,
73,
14208,
6460,
329,
279,
5173,
271,
62,
15654,
1628,
13,
198,
198,
8645,
515,
416,
705,
28241,
14208,
12,
28482,
923,
16302,
6,
1262,
37770,
362,
13,
16,
13,
198,
198,
1890,
517,
1321,
319,
428,
2393,
11,
766,
19... | 2.265368 | 5,238 |
import dvision
model = "run_0923_1"
iteration = "630000"
net_path = "/groups/turaga/home/grisaitisw/experiments/{}/net_test_big.prototxt".format(model)
caffemodel_path = "/groups/turaga/home/grisaitisw/experiments/{}/net_iter_{}.caffemodel".format(model, iteration)
net_output_shape = (116,) * 3
dname = "fib25-e402c09"
image = dvision.DVIDDataInstance(
"slowpoke3",
32773,
"e402c09ddd0f45e980d9be6e9fcb9bd0",
"grayscale"
)
| [
11748,
288,
10178,
628,
198,
19849,
796,
366,
5143,
62,
2931,
1954,
62,
16,
1,
198,
2676,
341,
796,
366,
5066,
2388,
1,
198,
198,
3262,
62,
6978,
796,
12813,
24432,
14,
36590,
8126,
14,
11195,
14,
2164,
271,
4548,
271,
86,
14,
231... | 2.187192 | 203 |
from typing import Optional
from pydantic import BaseModel
from src.schema.call import Call
from src.schema.tile import Tile
| [
6738,
19720,
1330,
32233,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
198,
6738,
12351,
13,
15952,
2611,
13,
13345,
1330,
4889,
198,
6738,
12351,
13,
15952,
2611,
13,
40927,
1330,
47870,
628
] | 3.657143 | 35 |
import argparse
import os
import time
import torch
import numpy as np
from torch.utils.data import DataLoader
import torch.optim
from torch.nn.utils import clip_grad_norm_
from data import TrainStation
from motsynth import MOTSynth, MOTSynthBlackBG
from log_utils import log_summary
from utils import save_ckpt, load_ckpt, print_scalor
from utils import spatial_transform, visualize
from common import *
import parse
import pickle
import json
import skimage.transform as st
from pycocotools import mask as coco_mask
from tensorboardX import SummaryWriter
from skimage import img_as_bool
from skimage.transform import resize
from scalor import SCALOR
def calculate_IoU(pred, targed):
'''
Calculates the Intersection over Union(Intersection over Union).
'''
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / np.sum(union)
return iou_score
parser = argparse.ArgumentParser(description='SCALOR')
# args = parser.parse_args()
parser.add_argument('-f')# # common.cfg overrides
args = parse.parse(parser)
args.batch_size = 1
device = torch.device("cuda" if not args.nocuda and torch.cuda.is_available() else "cpu")
# data_dir = "images_bw_5"
data_dir = "images_heavily_blurred_bw"
train_data = MOTSynthBlackBG(data_dir, train=False)
train_loader = DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True)
num_train = len(train_data)
model = SCALOR(args)
model = model.to(device)
model.eval()
# args.last_ckpt = './model_gradient_2/ckpt_epoch_11200.pth'
args.last_ckpt = './model_perceptual_gan_v2_6x6/ckpt_epoch_8000.pth'
optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)
# global_step = 0
print(f"Last checkpoint: {args.last_ckpt}")
if args.last_ckpt:
global_step, args.start_epoch = load_ckpt(model, optimizer, args.last_ckpt, device)
args.global_step = global_step
log_tau_gamma = np.log(args.tau_end) / args.tau_ep
annotation_indices = [28, 33, 42, 59]
video_indice = -1
seq_id = 0
predictions_list = []
for i in range(len(annotation_indices) * 36):
sample, counting_gt = train_loader.dataset.__getitem__(i+36)
sample = sample.unsqueeze(0)
if i % 36 == 0:
video_indice += 1
video_id = annotation_indices[video_indice]
seq_id = 0
model.eval()
tau = np.exp(global_step * log_tau_gamma)
tau = max(tau, args.tau_end)
args.tau = tau
global_step += 1
log_phase = True
args.global_step = global_step
args.log_phase = log_phase
imgs = sample.to(device)
print(f"imgs shape: {imgs.shape}", flush=True)
preds = model(imgs)
y_seq, log_like, kl_z_what, kl_z_where, kl_z_depth, \
kl_z_pres, kl_z_bg, log_imp, counting, \
log_disc_list, log_prop_list, scalor_log_list = preds
id_dict = predict(log_disc_list, log_prop_list, video_id, seq_id)
preds = list(id_dict.values())
for pr_id, pr in enumerate(preds):
preds[pr_id]["score"] = sum(preds[pr_id]["score"]) / len(preds[pr_id]["score"])
predictions_list.extend(list(id_dict.values()))
seq_id += 1
with open('predictions_model_perceptual_gan_6x6_003.json', 'w') as handle:
json.dump(predictions_list, handle)
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
11748,
28034,
13,
40085,
198,
6738,
28034,
13,
20471,
13,
26791,... | 2.521105 | 1,303 |
from setuptools import setup
import os
README = os.path.join(os.path.dirname(__file__), 'README.md')
REQUIREMENTS = os.path.join(os.path.dirname(__file__), 'requirements.txt')
setup(
name='soap-as-rest-server',
version='0.0.2',
description='Soap Proxy Module to get data from SOAP Services',
long_description=open(README).read(),
long_description_content_type='text/markdown',
author='Frank Mendonca', author_email='frankmed57@gmail.com',
license='MIT',
keywords=['soap', 'xml', 'json', 'rest'],
install_requires=open(REQUIREMENTS).readlines(),
packages=['soap_as_rest_server'],
zip_safe=False,
platforms='any',
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries'
],
url='https://github.com/frankmendonca/soap-as-rest-server',
python_requires='>=3.6',
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
11748,
28686,
198,
198,
15675,
11682,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
15675,
11682,
13,
9132,
11537,
198,
2200,
49128,
28957,
7... | 2.623501 | 417 |
#!/usr/bin/env python
import sys
import yaml
import operator
import rospy
sys.path.append('/home/z420/ros_ws/src/jp_baxtertry1/scripts')
import tablero
import Baxtermovimiento
from jp_baxtertry1.srv import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
11748,
331,
43695,
198,
11748,
10088,
198,
11748,
686,
2777,
88,
198,
17597,
13,
6978,
13,
33295,
10786,
14,
11195,
14,
89,
27211,
14,
4951,
62,
18504,
14,
1067... | 2.592593 | 81 |
# Copyright 2019 Gabriele Valvano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import telegram_notifier
import logging
import argparse
import socket
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
# telegram bot ---
TELEGRAM_TOKEN_ID = '' # token-id
TELEGRAM_CHAT_ID = '' # chat-id
# ----------------
parser = argparse.ArgumentParser(description='Notifier.')
parser.add_argument("--message", type=str, help='Message for the notifier.', default='Process terminated.')
parser.add_argument("--token_id", type=str, help='Token ID for the chat bot.', default=TELEGRAM_TOKEN_ID)
parser.add_argument("--chat_id", type=str, help='Chat ID for the chat bot.', default=TELEGRAM_CHAT_ID)
parser.add_argument("--hostname", type=str, help='Name of the server running the task.', default=hostname)
parser.add_argument("--ip", type=str, help='IP address of the server running the task.', default=IPAddr)
# just calls the `main` function above
if __name__ == '__main__':
main()
| [
2,
220,
15069,
13130,
12300,
5034,
293,
3254,
85,
5733,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
2... | 3.275488 | 461 |
import psycopg2
conn = psycopg2.connect(database="news")
cursor = conn.cursor()
cursor.execute('''select articles.slug, count(log.path) as a from
articles join log on SUBSTRING( log.path, 10)=articles.slug
group by articles.slug order by a desc limit 3;''')
results = cursor.fetchall()
print('1. What are the most popular three articles of all time?')
for result in results:
print(result[0], result[1])
cursor.execute('''select authors.name, articalsslug.a from authors, articles,
articalsslug where authors.id = articles.author and
articles.slug = articalsslug.slug;''')
results = cursor.fetchall()
print('2. Who are the most popular article authors of all time?')
for result in results:
print(result[0], result[1])
cursor.execute('''select * from (select (nfstatus/ sum( okstatus + nfstatus)
* 100) as Error, date from days group by date, nfstatus) as Error
where Error > 1;''')
results = cursor.fetchall()
print('3. On which days did more than 1% of requests lead to errors?')
for result in results:
print(result[0], result[1])
conn.close()
| [
198,
11748,
17331,
22163,
70,
17,
198,
198,
37043,
796,
17331,
22163,
70,
17,
13,
8443,
7,
48806,
2625,
10827,
4943,
198,
66,
21471,
796,
48260,
13,
66,
21471,
3419,
198,
66,
21471,
13,
41049,
7,
7061,
6,
19738,
6685,
13,
6649,
1018... | 3.066282 | 347 |
# imports
from scipy import optimize, math
import numpy
import ROOT
from array import array
ROOT.gStyle.SetOptStat(1100)
ROOT.gStyle.SetOptTitle(0)
###############
# DEFINITIONS #
###############
# Poisson prob
# Poisson LH
# exp prob
# exp LH
# binned chi2, expected uncertainties
##############
# PARAMETERS #
##############
mu0=3.5
tau0=0.5
ntrials=100
################
# MAIN PROGRAM #
################
# derived constants
xmin=0
xmax=5*tau0
nbins=10
# lists to be used to plot the graphs
ntoys = []
dntoys = []
# list of methods
means = {}
variances = {}
methods = ["moments","MLE","chi2"]
for meth in methods:
means[meth] = 0
variances[meth] = 0
rnd = ROOT.TRandom3()
nt = 100
ntoys.append(nt)
dntoys.append(0)
# trials
estimates = {}
for meth in methods:
estimates[meth] = []
for i in range(0,ntrials):
# toy generation
toys = []
h = ROOT.TH1F("h","",10,0,10)
for it in range(0,nt):
toy = rnd.Exp(tau0)
toys.append(toy)
h.Fill(toy)
for meth in methods:
result=0
if meth=="MLE":
# maximum likelihood estimate
result = optimize.fmin(lh_exp,tau0,args=(toys,),disp=False)
elif meth=="moments":
result = numpy.mean(toys)
elif meth=="chi2":
result = optimize.fmin(chi2_exp,tau0,args=(h,),disp=False)
# print(meth,result)
estimates[meth].append(result)
del h
# check the mean and variance of the estimator for the different methods
for meth in methods:
means[meth] = numpy.mean(estimates[meth])
variances[meth] = numpy.var(estimates[meth])
print ("mean for method %s: %f" % (meth , means[meth]))
print ("variance for method %s: %f" % (meth, variances[meth]))
# draw the distribition of estimator values
c = ROOT.TCanvas("c0","estimators")
c.Divide(len(methods))
i=0
h = []
for meth in methods:
i=i+1
c.cd(i)
h.append(ROOT.TH1F("h%d" % i, "",nbins,xmin,xmax))
for j in range(0,len(estimates[meth])):
h[-1].Fill(estimates[meth][j])
h[-1].Draw()
c.Draw()
# raw_input("Press Enter to continue ...")
| [
2,
17944,
198,
6738,
629,
541,
88,
1330,
27183,
11,
10688,
198,
11748,
299,
32152,
198,
11748,
15107,
2394,
198,
6738,
7177,
1330,
7177,
628,
198,
13252,
2394,
13,
70,
21466,
13,
7248,
27871,
17126,
7,
42060,
8,
198,
13252,
2394,
13,
... | 2.170974 | 1,006 |
from storage.kv_store import KeyValueStorage
class AttributeStore:
"""
Stores attributes as key value pair where the key is hash of the
attribute as stored in ledger and value is the actual value if the attribute
"""
| [
6738,
6143,
13,
74,
85,
62,
8095,
1330,
7383,
11395,
31425,
628,
198,
4871,
3460,
4163,
22658,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
41835,
12608,
355,
1994,
1988,
5166,
810,
262,
1994,
318,
12234,
286,
262,
198,
220,
2... | 3.671875 | 64 |
import pytest
from spacy.language import Language
from spacy.lang.en import English
from spacy.training import Example
from thinc.api import ConfigValidationError
from pydantic import StrictBool
| [
11748,
12972,
9288,
198,
6738,
599,
1590,
13,
16129,
1330,
15417,
198,
6738,
599,
1590,
13,
17204,
13,
268,
1330,
3594,
198,
6738,
599,
1590,
13,
34409,
1330,
17934,
198,
6738,
294,
1939,
13,
15042,
1330,
17056,
7762,
24765,
12331,
198,... | 3.843137 | 51 |
"""
使用字典
可以存储任意类型对象,与列表、集合不同的是,字典的每个元素都是有一个键和一个值组成的键值对
"""
if __name__ == '__main__':
main()
| [
37811,
201,
198,
45635,
18796,
101,
27764,
245,
17739,
116,
201,
198,
20998,
107,
20015,
98,
27764,
246,
43636,
101,
20015,
119,
35707,
237,
163,
109,
119,
161,
252,
233,
43380,
117,
164,
109,
94,
171,
120,
234,
10310,
236,
26344,
245... | 0.801471 | 136 |
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'dayOfProgrammer' function below.
#
# The function is expected to return a STRING.
# The function accepts INTEGER year as parameter.
#
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
year = int(input().strip())
result = dayOfProgrammer(year)
fptr.write(result + '\n')
fptr.close()
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
198,
198,
2,
198,
2,
13248,
262,
705,
820,
5189,
15167,
647,
6,
2163,
2174,
13,
198,
2,
198,
2,
383,
2... | 2.763158 | 152 |
from __future__ import absolute_import
from django.conf.urls import url
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from ..cart.models import Cart, user_is_authenticated
from ..order import handler
from ..order.exceptions import EmptyCart
from ..order.models import Order
from ..order.signals import order_pre_confirm
from ..payment import PaymentFailure
from ..core.app import SatchlessApp
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
198,
6738,
42625,
14208,
13,
28243,
13,
26209,
1330,
37350,
31077,
198,... | 3.798611 | 144 |
"""Tests for the Oocsi for Homeassistant integration."""
| [
37811,
51,
3558,
329,
262,
440,
420,
13396,
329,
5995,
562,
10167,
11812,
526,
15931,
198
] | 3.5625 | 16 |
import setuptools
setuptools.setup(
name="ptranslator",
version="1.5.1",
author="Bogdan Caleta Ivkovic",
author_email="bogdan.caleta@gmail.com",
description="Simple yet effective module for translating that uses Google Translate",
url="https://github.com/Raptr3x/python-translator",
packages=["ptranslator"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': [
'ptranslator = ptranslator.ptranslator:main'
]
},
) | [
11748,
900,
37623,
10141,
201,
198,
201,
198,
2617,
37623,
10141,
13,
40406,
7,
201,
198,
220,
220,
220,
1438,
2625,
20692,
504,
41880,
1600,
201,
198,
220,
220,
220,
2196,
2625,
16,
13,
20,
13,
16,
1600,
201,
198,
220,
220,
220,
... | 2.371841 | 277 |
from compas.geometry import Vector, dot_vectors
from compas_slicer.utilities import remap, remap_unbound
import logging
logger = logging.getLogger('logger')
__all__ = ['set_linear_velocity_constant',
'set_linear_velocity_per_layer',
'set_linear_velocity_by_range',
'set_linear_velocity_by_overhang']
def set_linear_velocity_constant(print_organizer, v=25.0):
"""Sets the linear velocity parameter of the printpoints depending on the selected type.
Parameters
----------
print_organizer: :class:`compas_slicer.print_organization.BasePrintOrganizer`
v: float. Velocity value (in mm/s) to set for printpoints. Defaults to 25 mm/s.
"""
logger.info("Setting constant linear velocity")
for printpoint in print_organizer.printpoints_iterator():
printpoint.velocity = v
def set_linear_velocity_per_layer(print_organizer, per_layer_velocities):
"""Sets the linear velocity parameter of the printpoints depending on the selected type.
Parameters
----------
print_organizer: :class:`compas_slicer.print_organization.BasePrintOrganizer`
per_layer_velocities: list
A list of velocities (floats) with equal length to the number of layers.
"""
logger.info("Setting per-layer linear velocity")
assert len(per_layer_velocities) == print_organizer.number_of_layers, 'Wrong number of velocity values. You need \
to provide one velocity value per layer, on the "per_layer_velocities" list.'
for printpoint, i, j, k in print_organizer.printpoints_indices_iterator():
printpoint.velocity = per_layer_velocities[i]
def set_linear_velocity_by_range(print_organizer, param_func, parameter_range, velocity_range,
bound_remapping=True):
"""Sets the linear velocity parameter of the printpoints depending on the selected type.
Parameters
----------
print_organizer: :class:`compas_slicer.print_organization.BasePrintOrganizer`
param_func: function that takes as argument a :class: 'compas_slicer.geometry.Printpoint': get_param_func(pp)
and returns the parameter value that will be used for the remapping
parameter_range: tuple
An example of a parameter that can be used is the overhang angle, or the layer height.
velocity_range: tuple
The range of velocities where the parameter will be remapped
bound_remapping: bool
If True, the remapping is bound in the domain velocity_range, else it is unbound.
"""
logger.info("Setting linear velocity based on parameter range")
for printpoint in print_organizer.printpoints_iterator():
param = param_func(printpoint)
assert param, 'The param_func does not return any value for calculating the velocity range.'
if bound_remapping:
v = remap(param, parameter_range[0], parameter_range[1], velocity_range[0], velocity_range[1])
else:
v = remap_unbound(param, parameter_range[0], parameter_range[1], velocity_range[0], velocity_range[1])
printpoint.velocity = v
def set_linear_velocity_by_overhang(print_organizer, overhang_range, velocity_range, bound_remapping=True):
"""Set velocity by overhang by using set_linear_velocity_by_range.
An example function for how to use the 'set_linear_velocity_by_range'. In this case the parameter that controls the
velocity is the overhang, measured as a dot product with the horizontal direction.
Parameters
----------
print_organizer: :class:`compas_slicer.print_organization.BasePrintOrganizer`
overhang_range: tuple:
should be within [0.0, 1.0]. For example a reasonable value would be [0.0, 0.5], that would
be remapping overhangs up to 45 degrees
velocity_range: tuple
bound_remapping: bool
"""
# returns values from 0.0 (no overhang) to 1.0 (horizontal overhang)
set_linear_velocity_by_range(print_organizer, param_func, overhang_range, velocity_range, bound_remapping)
if __name__ == "__main__":
pass
| [
6738,
552,
292,
13,
469,
15748,
1330,
20650,
11,
16605,
62,
303,
5217,
198,
6738,
552,
292,
62,
82,
677,
263,
13,
315,
2410,
1330,
816,
499,
11,
816,
499,
62,
403,
7784,
198,
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
113... | 2.877841 | 1,408 |
import torch, timeit, functools
A = torch.randn(15000,15000)
B = torch.randn(15000,15000)
Ag = A.cuda()
Bg = B.cuda()
cpu_timer = timeit.Timer(functools.partial(test,A,B))
cpu_time = cpu_timer.timeit(1)
gpu_timer = timeit.Timer(functools.partial(test,Ag,Bg))
gpu_time = gpu_timer.timeit(1)
print(cpu_time)
# 73.34245827499944
print(gpu_time)
# 0.3741293080001924
| [
11748,
28034,
11,
640,
270,
11,
1257,
310,
10141,
198,
198,
32,
796,
28034,
13,
25192,
77,
7,
1314,
830,
11,
1314,
830,
8,
198,
33,
796,
28034,
13,
25192,
77,
7,
1314,
830,
11,
1314,
830,
8,
198,
10262,
796,
317,
13,
66,
15339,
... | 2.224242 | 165 |
"""Standard names for input dataloader modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `PREDICT`: prediction mode.
* `PREDICT_WITH_GT`: prediction mode with groundtruths in returned variables.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'predict'
PREDICT_WITH_GT = 'predict_with_gt'
| [
37811,
23615,
3891,
329,
5128,
4818,
282,
1170,
263,
12881,
13,
198,
198,
464,
1708,
3210,
8251,
389,
5447,
25,
198,
198,
9,
4600,
51,
3861,
1268,
63,
25,
3047,
4235,
13,
198,
9,
4600,
20114,
1847,
63,
25,
12660,
4235,
13,
198,
9,... | 3.054054 | 148 |
#!/usr/bin/env python
#
# Password manager: handles hashing and comparing for passwords used in LikedSavedDownloaderServer
# I'm no expert so use and trust at your own risk!
#
from passlib.context import CryptContext
# It's not strictly necessary to import these, but I do it here for PyInstaller
# (see https://github.com/pyinstaller/pyinstaller/issues/649)
import argon2
import cffi
import configparser
import passlib.handlers
import passlib.handlers.argon2
import passlib.handlers.sha2_crypt
import passlib.handlers.bcrypt
import sys
import os
# Even if this file gets compromised, it'll still be hard to use for anything
passwordsFilename = "passwords.txt"
password_context = CryptContext(
# Replace this list with the hash(es) you wish to support.
# this example sets pbkdf2_sha256 as the default,
# with additional support for reading legacy des_crypt hashes.
schemes=["argon2", "sha512_crypt", "bcrypt"],
# Automatically mark all but first hasher in list as deprecated.
# (this will be the default in Passlib 2.0)
deprecated="auto",
# Optionally, set the number of rounds that should be used.
# Appropriate values may vary for different schemes,
# and the amount of time you wish it to take.
# Leaving this alone is usually safe, and will use passlib's defaults.
## pbkdf2_sha256__rounds = 29000,
)
passwords = []
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Wrong number of arguments!\n"
"PasswordManager: Adds a password to the passwords file.\n"
"Usage:\n python PasswordManager.py \"your password\"")
else:
createPassword(sys.argv[1])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
30275,
4706,
25,
17105,
49544,
290,
14176,
329,
21442,
973,
287,
406,
17951,
50,
9586,
10002,
263,
10697,
198,
2,
314,
1101,
645,
5887,
523,
779,
290,
3774,
379,
534,
898,... | 3.066055 | 545 |
from ...objects import dp, MySignalEvent
from ...utils import edit_message, new_message, delete_message, sticker_message
from datetime import datetime, date
import time
@dp.my_signal_event_handle('алло')
@dp.my_signal_event_handle('auth')
@dp.my_signal_event_handle('мессага')
@dp.my_signal_event_handle('свалить')
@dp.my_signal_event_handle('луна')
@dp.my_signal_event_handle('повтори')
@dp.my_signal_event_handle('статус')
@dp.my_signal_event_handle('бот')
@dp.my_signal_event_handle('ирисразбан')#не доделано
@dp.my_signal_event_handle('гп') | [
6738,
2644,
48205,
1330,
288,
79,
11,
2011,
11712,
282,
9237,
198,
6738,
2644,
26791,
1330,
4370,
62,
20500,
11,
649,
62,
20500,
11,
12233,
62,
20500,
11,
27735,
62,
20500,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
3128,
198,
11748... | 2.106464 | 263 |
#!/usr/bin/env python
"""
LFU.py: A cache which implements the
least frequently used algorithm.
"""
__author__ = "Yves Weissig"
__email__ = "weissig@uni-mainz.de"
__status__ = "Development"
import random
from AbstractCache import AbstractCache
class LFUCache(AbstractCache):
"""
Represents a cache which uses the least frequently used algorithm.
"""
# A dict which maps obj_id to its frequency
lut = {}
# A dict full of SimpleLFUFrequency's
freq = {}
# Size of this cache, in bytes
cache_size = 10000000000000000000
# The number of used bytes in this cache
used_size = 0
# A dict which contains all stats
stats = {}
def __init__(self, param_cache_size, min_obj_size, max_obj_size):
"""
Just some boring boilerplate code to init the cache.
"""
self.freq[0] = LFUFrequency(0)
self.freq[1] = LFUFrequency(1)
self.cache_size = param_cache_size
self.stats["cache_size"] = param_cache_size
self.stats["cache_size_bytes"] = param_cache_size
self.stats["cache_size_kilobytes"] = param_cache_size / 1024
self.stats["cache_size_megabytes"] = param_cache_size / 1024 / 1024
self.stats["cache_size_gigabytes"] = param_cache_size / 1024 / 1024 / 1024
self.stats["cache_type"] = "SimpleLFU"
self.stats["evicted_objects"] = 0
self.stats["cached_objects"] = 0
self.stats["cached_bytes_written"] = 0
# Needed for "backwards" compability with SimpleBuckets
self._max_size = param_cache_size
def get_stats(self):
"""
Returns the statistical information.
"""
return self.stats
def get_num_cached_objects(self):
"""
Returns the number of cached objects.
"""
return len(self.lut)
def is_cached(self, obj_id):
"""
Returns if the object with the
passed obj_id is cached or not.
"""
return obj_id in self.lut
def get_free_cache_bytes(self, size):
"""
Returns the number of free bytes in this cache.
"""
return self.cache_size - self.used_size
def update_obj_size(self, obj_id, size, delta):
"""
Updates the size of an object in the cache.
"""
# Sanity checks
# assert(obj_id in self.lut)
# assert(obj_id in self.freq[self.lut[obj_id]].items)
if obj_id not in self.lut:
# Makes no sense here, but SimpleLRU behaves the same
#raise Exception("Unable to update size of object ('%s') which "
#"is not cached!" % obj_id)
return
if obj_id not in self.freq[self.lut[obj_id]].items:
raise Exception("Internal error during updating size of object "
"('%s'), the lut points to a wrong frequency bucket!" % obj_id)
# Update size
self.freq[self.lut[obj_id]].items[obj_id].size = size
self.used_size += delta
#self.sanity_check("update_obj_size")
def remove_cached(self, obj_id):
"""
Removes an object from the cache, returns the frequency it was
used and the amount of freed bytes in the cache.
"""
# Sanity checks
# assert(obj_id in self.lut)
# assert(obj_id in self.freq[self.lut[obj_id]].items)
if obj_id not in self.lut:
# raise Exception("Unable to remove an object ('%s') which "
# "is not cached!" % obj_id)
# This shouldn't raise an exception... because if we evict
# the object and a second put is issued through the storage system
# this would lead to an error here, although everything is fine.
return 0
if obj_id not in self.freq[self.lut[obj_id]].items:
raise Exception("Internal error during removing the object ('%s'),"
" the lut points to a wrong frequency bucket!" % obj_id)
# Free bytes and delete object in lut as well as in frequency bucket
_freq = self.lut[obj_id]
_size = self.freq[self.lut[obj_id]].items[obj_id].size
self.used_size -= _size
del self.freq[self.lut[obj_id]].items[obj_id]
del self.lut[obj_id]
#self.sanity_check("remove_cached")
return _freq, _size
def cache_object(self, obj_id, size, xtime, force=True):
"""
Caches an object.
"""
# Don't cache objects which are too big
if size > self.cache_size:
raise Exception("Object '%s' is too big for this cache!" % obj_id)
# Evict objects if needed
current_freq = 0
i = 0
#an_obj_id = None
while self.used_size + size > self.cache_size:
if (current_freq not in self.freq or
self.freq[current_freq] is None or
self.freq[current_freq].items is None or
len(self.freq[current_freq].items) == 0):
current_freq += 1
else:
an_obj_id = random.choice(list(self.freq[current_freq].items.keys()))
if i % 1000 == 0:
print ("Warning, evicted %d objects (us: %d, s: %d, cs: %d, freq: %d, an_obj_id: %s, in lut: %s)" %
(i, self.used_size, size, self.cache_size, current_freq, an_obj_id, an_obj_id in self.lut))
self.remove_cached(an_obj_id)
self.stats["evicted_objects"] += 1
i += 1
# Dirty, dirty fix... sometimes the object is already present
if obj_id in self.lut:
self.remove_cached(obj_id)
# Put the object into the frequency bucket
self.freq[1].items[obj_id] = LFUItem(obj_id, xtime, size)
# Create a reference to the frequency bucket in the lut
self.lut[obj_id] = 1
# Set the used size of the cache
self.used_size += size
# Write statistics
self.stats["cached_objects"] += 1
self.stats["cached_bytes_written"] += size
#self.sanity_check("cache_object")
def get_cached(self, obj_id, xtime):
"""
Retrieves an object from the cache.
"""
if obj_id not in self.lut:
return False
if obj_id not in self.freq[self.lut[obj_id]].items:
raise Exception("Internal error during retrieving the cached object"
" '%s', the lut points to a wrong frequency bucket!" % obj_id)
(_freq, _size) = self.remove_cached(obj_id)
_freq += 1
if _freq not in self.freq or self.freq[_freq] is None:
self.freq[_freq] = LFUFrequency(_freq)
self.freq[_freq].items[obj_id] = LFUItem(obj_id, xtime, _size)
self.lut[obj_id] = _freq
self.used_size += _size
#self.sanity_check("get_cached")
return True
def debug_print(self):
"""
A debug function used to print the contents of the cache.
"""
print ("---------")
print ("num_cached_objects: %s" % self.get_num_cached_objects())
print ("get_free_cache_bytes: %s" % self.get_free_cache_bytes(None))
for key, value in self.freq.items():
print ("Frequency: %s" % key)
print (value.items)
# AbstractCache.register(LFUCache)
if __name__ == "__main__":
# Replay of a small protocol
tmp = LFUCache(2 * 1024)
tmp.debug_print()
tmp.cache_object("a", 1024, 0)
tmp.cache_object("b", 512, 0)
tmp.cache_object("c", 256, 0)
tmp.debug_print()
tmp.get_cached("a", 1)
tmp.get_cached("a", 2)
tmp.get_cached("b", 3)
tmp.get_cached("a", 4)
tmp.get_cached("a", 5)
tmp.get_cached("a", 6)
tmp.get_cached("b", 7)
tmp.debug_print()
tmp.cache_object("d", 512, 0)
tmp.debug_print()
tmp.get_cached("d", 7)
tmp.get_cached("d", 8)
tmp.get_cached("d", 9)
tmp.get_cached("d", 10)
tmp.debug_print()
tmp.cache_object("e", 1024, 0)
tmp.debug_print() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
43,
38989,
13,
9078,
25,
317,
12940,
543,
23986,
262,
198,
293,
459,
6777,
973,
11862,
13,
198,
37811,
198,
198,
834,
9800,
834,
796,
366,
56,
1158,
16152,
328,
1,
198,
... | 2.104204 | 3,925 |
from datetime import datetime
from pathlib import Path
from typing import List, Dict
from autoleagueplay.match_result import MatchResult
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
198,
198,
6738,
1960,
2305,
2064,
1759,
13,
15699,
62,
20274,
1330,
13225,
23004,
628
] | 3.971429 | 35 |
import unittest
from algorithms.root_finding import Bisection
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
16113,
13,
15763,
62,
41070,
1330,
38045,
3213,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.897436 | 39 |
from flask import Blueprint
argumentation = Blueprint('argumentation', __name__, template_folder='templates', static_folder='static')
from . import argumentation_controller
| [
6738,
42903,
1330,
39932,
198,
198,
49140,
341,
796,
39932,
10786,
49140,
341,
3256,
11593,
3672,
834,
11,
11055,
62,
43551,
11639,
11498,
17041,
3256,
9037,
62,
43551,
11639,
12708,
11537,
198,
198,
6738,
764,
1330,
4578,
341,
62,
36500,... | 4.268293 | 41 |
# # # # # WORKING ON PYTHON 3.6.5 # # # # #
# # # # # MADE BY: @rtunazzz | rtuna#4321 # # # # #
# TODO:
# ADD BIO CHANGE SUPPORT
# Add threading support
# Add OCR
print(r'''
____ ____ _ _ ______ __ __ _
| _ \ / __ \ | \ | ||___ / /\ \ \ / /(_)
| |_) || | | || \| | / / / \ \ \_/ / _ ___
| _ < | | | || . ` | / / / /\ \ \ / | | / _ \
| |_) || |__| || |\ | / /__ / ____ \ | | _ | || (_) |
|____/ \____/ |_| \_|/_____|/_/ \_\|_|(_)|_| \___/
''')
print(" • made by: rtuna#4321 | @rtunazzz")
print(" • personal use only")
# # # # # # # # # # # # # # # # # # # # IMPORTING LIBRARIES # # # # # # # # # # # # # # # # # # # #
#External
import tweepy
from dhooks import Webhook, Embed
import datetime
import json
import time
import re
import random
# import threading
# # # # # # # # # # # # # # # # # # # # DEFINING STATIC VARIABLES # # # # # # # # # # # # # # # # # # # #
#TODO Paste here your d iscord webhooks
TWITTER_FILTERED = ""
TWITTER_UNFILTERED = ""
#TODO List here all IDs of accounts you want to monitor
USER_IDS = [
"929793229725110272", #me
"718857559403270144", #Cyber
"886027671993536512", #Offline - Lucas
"863006606124085248", #SOleSorcerer
"1044054193365934081", #Kodai
"990276109383225344", #F3ather
"1092061875318210560", #GaneshBot
"1008988284989534208", #TKS
"1354387910", #NSB
"831219416453042176", #EVE_Robotics
"914897340280053763", #EVEAIO
"936472526472933376", #Backdoor
"940121522269691904", #GhostAIO
"838811219452649473", #SneakerCopter
"887790349699227650", #DestroyerBots
"1094899148845920262", #RuggAIO
"1053046389704409089", #PrismAIO
"1056226100513329152", #DreamAIO
"929817052709134336", #WhatBot
"997644265156116480", #Balkobot
"1032215346189672448", #BlackOutIO
"1126199639135354885", #Sky_AIO
"1035491698254733313", #SoleTerminator
"1001896176428441601", #AdeptBots
"968299339117363200", #WrathBots
]
#Loads crednetials
with open("credentials.json", "r") as f:
credentials = json.loads(f.read())
CONSUMER_KEY = credentials['CONSUMER_KEY']
CONSUMER_SECRET = credentials['CONSUMER_SECRET']
ACCESS_TOKEN = credentials['ACCESS_TOKEN']
ACCESS_TOKEN_SECRET = credentials['ACCESS_TOKEN_SECRET']
#Hex list for webhook colors
HEX_LIST = [
16725342,
16604024,
16736311,
16750950,
16750899,
16763955,
16777062,
13434624,
6750054,
11202769,
5292006,
16740095,
15611090,
16711884,
]
#TODO Setup
keywords = [
'restock',
'password',
'live',
]
# # # # # # # # # # # # # # # # # # # # DEFINING FUNCTIONS # # # # # # # # # # # # # # # # # # # #
##### WEBHOOK FUNCTIONS #####
def notify_twitter(webhook_url, tweet_content, user,tweet_url, profile_pic, screen_name, url=None):
'''Sends Embed to the TwitterMonitor'''
hook = Webhook(url=webhook_url, username=user, avatar_url=profile_pic)
color=random.choice(HEX_LIST)
embed = Embed(
# title = f"New tweet from {user}",
url = tweet_url,
color=color,
timestamp = 'now',
description = tweet_content,
)
embed.set_author(name=screen_name,icon_url=profile_pic,url=f'https://twitter.com/{screen_name}')
# embed.set_footer(text=f'BONZAY Twitter • {datetime.datetime.now().strftime("%Y-%m-%d %H:%M")}',icon_url='https://cdn.discordapp.com/emojis/636516489322561536.png?v=1')
embed.set_footer(text=f'BONZAY Twitter',icon_url='https://cdn.discordapp.com/emojis/636516489322561536.png?v=1')
twitter_url_builder=f'https://twitter.com/{screen_name}'
if url:
embed.add_field('LINK FOUND', value=url, inline=False)
embed.add_field('Links', value=f'[Profile](https://twitter.com/{screen_name}) — [Likes]({twitter_url_builder}/likes) — [Replies]({twitter_url_builder}/with_replies) — [Media]({twitter_url_builder}/media) — [TweetLink]({tweet_url})', inline=False)
hook.send(embed=embed)
##### JSON EXTRACTING FUNCTIONS #####
def get_url(j):
'''Takes in json file and returns a URL if it's in the passed tweet data'''
tweet_url = j["entities"]["urls"][0]["expanded_url"]
if 'twitter.com' not in tweet_url:
return tweet_url
def get_tweet_url(j):
'''Takes in json file with tweet data and returns a tweet url'''
return f"https://twitter.com/{j['user']['screen_name']}/status/{j['id']}"
def get_profile_pic(j):
'''Takes in json file and returns an URL of users profile picture'''
return j["user"]["profile_image_url"]
def get_tweet_content(j):
'''Takes in json file and returns tweet contents'''
tweet_text = j['text']
tweet_url_list = j['entities']['urls']
for val in tweet_url_list:
short_url = val['url']
expanded_url = val['expanded_url']
tweet_text = tweet_text.replace(short_url, expanded_url)
return tweet_text
def get_screen_name(j):
'''Takes in json file and returns users screen name'''
return j['user']['screen_name']
def get_user_url(j):
'''Takes in json of tweet data and returns URL provided in users BIO'''
return j['user']['url']
##### STRING EDITING FUNCTIONS #####
def tweet_description_into_lines(j):
'''Takes json file and parses it into lines of text description'''
tweet_description = j['text']
return tweet_description.split('\n')
def remove_spaces(string):
'''Takes in a string and returns the same string with spaces removed.
Example: "Hi how are you" -> "Hihowareyou"'''
return string.strip().replace(" ", "")
def compile_final_url(user_url, passw):
'''Takes in user_url and password and contstruct final URL with password in it.'''
match = re.search(r"^(https?:\/\/)?[^\/]*", user_url)
stripped_url = match.group(0)
if 'http' in stripped_url:
return stripped_url + f'/?password={passw}'
else:
return f"https://{stripped_url}/?password={passw}"
##### PASSWORD EXTRACTING FUNCTIONS #####
def password_with_colon(line_without_spaces):
'''CASE for "password:" (e.g password:109458101) if found, returns the password'''
if "password:" in line_without_spaces:
match = re.search(r"(?<=:).*",line_without_spaces)
return match.group(0)
def password_with_is(line):
'''CASE for "password is" (e.g. password is 1029124) if found, returns the password'''
if "password is" in line:
match = re.search(r"(?<=is).*",line)
return match.group(0).replace(' ', '')
def password_with_space(line):
'''CASE for password followed by space (e.g. password 09034865924) if found, returns the password'''
if "password" in line:
# match = re.search(r"(?<=password).*",line) # this matches the whole line
match = re.search(r"(?<=password) ?[^ ]*",line) # this matches only to the first space
return match.group(0).replace(" ", '')
##### PASSWORD EXTRACTING FUNCTIONS #####
def build_regex_search(keywords):
'''Builds a regex string to match either of the passed keywords and returns it.'''
re_string = r''
num = 1
for kw in keywords:
if num < len(keywords):
re_base = r'\b(\w*' + kw + r'\w*)\b|'
re_string += re_base
num += 1
else:
re_base = r'\b(\w*' + kw + r'\w*)\b'
re_string += re_base
return re_string
regex_string = build_regex_search(keywords)
# # # # # # # # # # # # # # # # # # # # RUNNING THE CODE # # # # # # # # # # # # # # # # # # # #
if __name__ == "__main__":
listener = StreamListener()
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
stream = tweepy.Stream(auth, listener)
stream.filter(follow=USER_IDS, is_async=True)
print("Started monitoring...")
# # # # # # # # # # # # # # # # # # # # UNUSED # # # # # # # # # # # # # # # # # # # #
'''
def test():
with open("test_tweet.json", "r") as f:
j = json.loads(f.read())
try:
#if there is a url in the tweet, include it
url = get_url(j)
user = j["user"]["name"]
content = get_tweet_content(j)
tweet_url = get_tweet_url(j)
profile_pic = get_profile_pic(j)
screen_name = get_screen_name(j)
if url:
notify_twitter(
tweet_content=content,
user=user,
tweet_url=tweet_url,
profile_pic=profile_pic,
screen_name=screen_name,
url=url
)
else:
raise Exception
except:
pass
lines = tweet_description_into_lines(j)
for line in lines:
line_without_spaces = remove_spaces(line)
try:
passw = password_with_colon(line_without_spaces)
if passw:
user_url = get_user_url(j)
final_url = compile_final_url(user_url, passw)
#TODO Edit variables below so they only load once (probs will be using scheme:
# new tweet -> noitify -> search for password -> notify password)
profile_pic = get_profile_pic(j)
screen_name = get_screen_name(j)
notify_password_url(final_url, screen_name, profile_pic)
else:
raise Exception
except:
try:
passw = password_with_is(line)
print(passw)
if passw:
user_url = get_user_url(j)
final_url = compile_final_url(user_url, passw)
profile_pic = get_profile_pic(j)
screen_name = get_screen_name(j)
notify_password_url(final_url, screen_name, profile_pic)
else:
raise Exception
except:
print("Trying 3")
try:
passw = password_with_space(line)
print(passw)
if passw:
user_url = get_user_url(j)
final_url = compile_final_url(user_url, passw)
profile_pic = get_profile_pic(j)
screen_name = get_screen_name(j)
notify_password_url(final_url, screen_name, profile_pic)
except:
pass
test()
'''
'''
def on_image(j):
pass
# MORE TESTING NEEDED
img = j["entities"]["media"]
#CASE if password with spaces is written in text (ex. "p a s s w o r d")
# elif "password" in line_without_spaces:
# if ":" in line_without_spaces:
# match = re.search(r"(?<=:).*",line_without_spaces)
# passw = match.group(0)
# else:
# passw =
#CASE if pass is written in line
# elif "pass" in line:
# match = re.search(r'passw?o?r?d?:?[^ ]*', line_without_spaces)
# str_match = match.group(0)
'''
| [
2,
1303,
1303,
1303,
1303,
220,
220,
30936,
2751,
6177,
350,
56,
4221,
1340,
513,
13,
21,
13,
20,
220,
220,
220,
220,
1303,
1303,
1303,
1303,
1303,
198,
2,
1303,
1303,
1303,
1303,
220,
220,
337,
19266,
11050,
25,
220,
220,
220,
24... | 2.095669 | 5,310 |
import logging
from django import template
from apps.property.models import GenericProperty
register = template.Library()
logger = logging.getLogger(__name__)
@register.filter
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
1330,
11055,
198,
198,
6738,
6725,
13,
26745,
13,
27530,
1330,
42044,
21746,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834... | 3.714286 | 49 |
import kfp
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e69a6694/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
catboost_train_classifier_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Train_classifier/from_CSV/component.yaml')
catboost_train_regression_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Train_regression/from_CSV/component.yaml')
catboost_predict_classes_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Predict_classes/from_CSV/component.yaml')
catboost_predict_values_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Predict_values/from_CSV/component.yaml')
catboost_predict_class_probabilities_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Predict_class_probabilities/from_CSV/component.yaml')
catboost_to_apple_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/convert_CatBoostModel_to_AppleCoreMLModel/component.yaml')
catboost_to_onnx_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/convert_CatBoostModel_to_ONNX/component.yaml')
if __name__ == '__main__':
kfp_endpoint=None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(catboost_pipeline, arguments={})
| [
11748,
479,
46428,
198,
6738,
479,
46428,
1330,
6805,
628,
198,
354,
4549,
62,
19290,
72,
62,
19608,
292,
316,
62,
404,
796,
6805,
13,
2220,
62,
42895,
62,
6738,
62,
6371,
10786,
5450,
1378,
1831,
13,
12567,
43667,
13,
785,
14,
74,
... | 2.783357 | 697 |
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.RunScriptAction import RunScriptAction
from utils import SessionAction
if __name__ == '__main__':
ElementarySessionExtension().run()
| [
6738,
14856,
1942,
2044,
13,
15042,
13,
16366,
13,
11627,
3004,
1330,
27995,
198,
6738,
14856,
1942,
2044,
13,
15042,
13,
16366,
13,
9237,
33252,
1330,
8558,
33252,
198,
6738,
14856,
1942,
2044,
13,
15042,
13,
28710,
13,
15596,
1330,
73... | 3.694444 | 144 |
# selected = 50
selected = 289326
vals_dict = {"0,0": 1}
if __name__ == "__main__":
main()
| [
198,
198,
2,
6163,
796,
2026,
198,
34213,
796,
2579,
6052,
2075,
198,
198,
12786,
62,
11600,
796,
19779,
15,
11,
15,
1298,
352,
92,
628,
628,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
2... | 2.25 | 48 |
# -*- coding: utf-8 -*-
###
# (C) Copyright [2021] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import mock
import pytest
from ansible_collections.hpe.oneview.tests.unit.utils.hpe_test_utils import OneViewBaseTest
from ansible_collections.hpe.oneview.tests.unit.utils.oneview_module_loader import IdPoolsModule, OneViewModuleValueError
FAKE_MSG_ERROR = 'Fake message error'
URI = '/rest/id-pools/ipv4'
DEFAULT_ID_POOLS = dict(host='127.0.0.1',
example_uri='/rest/id-pools',
uri='/rest/id-pools/ipv4')
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(uri=DEFAULT_ID_POOLS['uri'],
idList=['10.1.0.1', '10.1.0.5'])
)
UPDATE_TEMPLATE = dict(uri='/rest/id-pools/vwwn',
enabled=True)
PARAMS_FOR_UPDATE = dict(
config='config.json',
state='update_pool_type',
data=dict(uri=DEFAULT_ID_POOLS['uri'],
enabled=True)
)
VALIDATE_TEMPLATE = dict(poolType='ipv4',
uri='/rest/id-pools',
idList=['VCGYOAA023', 'VCGYOAA024'])
PARAMS_WITH_VALIDATE = dict(
config='config.json',
state='validate',
data=dict(uri=DEFAULT_ID_POOLS['example_uri'],
poolType='vwwn',
idList=["10:00:2c:6c:28:80:00:00",
"10:00:2c:6c:28:80:00:01"])
)
ALLOCATE_TEMPLATE = dict(host='127.0.0.1',
uri='/rest/id-pools',
poolType='vwwn',
count=2)
PARAMS_WITH_ALLOCATE = dict(
config='config.json',
state='allocate',
data=dict(uri=DEFAULT_ID_POOLS['uri'],
poolType='vwwn',
count=2)
)
COLLECTOR_TEMPLATE = dict(host='127.0.0.1',
uri='/rest/id-pools',
poolType='vwwn',
idList=["10:00:2c:6c:28:80:00:00",
"10:00:2c:6c:28:80:00:01"])
PARAMS_WITH_COLLECTOR = dict(
config='config.json',
state='collect',
data=dict(uri=DEFAULT_ID_POOLS['uri'],
poolType='vwwn',
idList=["10:00:2c:6c:28:80:00:00",
"10:00:2c:6c:28:80:00:01"])
)
@pytest.mark.resource(TestIdPoolsModule='id_pools')
class TestIdPoolsModule(OneViewBaseTest):
"""
OneViewBaseTestCase provides the mocks used in this test case
"""
if __name__ == '__main__':
pytest.main([__file__])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
21017,
198,
2,
357,
34,
8,
15069,
685,
1238,
2481,
60,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 2.020833 | 1,536 |
from redis.connection import PythonParser, HiredisParser
from base import Benchmark
if __name__ == '__main__':
SocketReadBenchmark().run_benchmark()
| [
6738,
2266,
271,
13,
38659,
1330,
11361,
46677,
11,
367,
1202,
271,
46677,
198,
6738,
2779,
1330,
25187,
4102,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
47068,
5569,
44199,
4102,
22446,... | 3.319149 | 47 |
import math
import numpy as np
'''a = [
[14,6,-2,3,12],
[3,15,2,-5,32],
[-7,4,-23,2,-24],
[1,-3,-2,16,14]
]'''
'''a = [
[1,1/2,1/3,1],
[1/2,1/3,1/4,0],
[1/3,1/4,1/5,0],
]'''
a = [
[-7,2,-3,4,-12],
[5,-1,14,-1,13],
[1,9,-7,13,31],
[-12,13,-8,-4,-32]
]
n = len(a)
marcas = [i for i in range(0,n)]
#elimination()
#for i in a:
# print(i)
#intercambioDeFilas(0,2)
| [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
7061,
6,
64,
796,
685,
198,
220,
220,
220,
220,
220,
220,
220,
685,
1415,
11,
21,
12095,
17,
11,
18,
11,
1065,
4357,
198,
220,
220,
220,
220,
220,
220,
220,
685,
1... | 1.383033 | 389 |
import logging
from astropy.io import fits
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tidalclassifier.utils.custom_image_utils import augment, apply_corrections
from tidalclassifier.utils.helper_funcs import ThreadsafeIter, shuffle_df, to_json, remove_file
def construct_image(row, instruct, augmentations=True, debug=False):
"""given a meta-table row, read in the premade image and apply desired transforms"""
im = read_image(row, instruct) # simple read of fits file
if im.shape != (1, 512, 512) and im.shape != (1, 256, 256) and im.shape != (3,256,256) and im.shape != (3,512,512): # TODO: make automatic. But beware: uncropped!
print('shape error with', row)
print(im.shape)
exit(0)
if augmentations: im = augment(im, instruct)
if debug:
print('augmented')
im = apply_corrections(im, instruct)
if debug:
print('corrected')
if instruct['save_pics']:
hdulist = fits.open(instruct['directory'] + row['threshold_filename']) # read original image for base file
hdu = hdulist[0] # open main compartment
hdu.data = im # set main compartment data component to be the final image
hdu.writeto(instruct['directory']+'debug_'+str(row['ID'])+'_'+str(row['FEAT']) + '_' + str(row['CONF'])+'_'+str(np.random.randint(0,1000))+'.fits', overwrite=True)
# random number to avoid augmented overwrites
if debug:
print('saved')
# scaled_plot(np.squeeze(im), plt)
# plt.show()
return im
"""Generators"""
def custom_flow_from_directory(
table,
instruct,
gen_name='default_gen',
write=False,
even_split=True,
p=False,
class_mode='both',
debug=False):
"""Yield (subjects, labels) batch tuples from subjects saved in directory and catalog
Args:
table ([type]): [description]
instruct ([type]): [description]
gen_name (str, optional): Defaults to 'no_write'. [description]
even_split (bool, optional): Defaults to True. [description]
p (bool, optional): Defaults to False. [description]
class_mode (str, optional): Defaults to 'both'. [description]
"""
batch_size = instruct['batch_size']
channels = instruct['channels']
img_width = instruct['img_width']
img_height = instruct['img_height']
name = 'no_write'
if instruct['save_gen_output']:
name = gen_name
# table is a pandas table with only desired files included
# index = 0 # current table index
table_full = table.copy() # check not by reference
if write:
label_fname = instruct['directory'] + name + '_' + str(instruct['run']) + '_label.txt'
remove_file(label_fname) # necessary to keep cleaning files each time, or will append forever!
pic_fname = instruct['directory'] + name + '_' + str(instruct['run']) + '_pic.txt'
remove_file(pic_fname) # necessary to keep cleaning files each time, or will append forever!
while True:
data = np.zeros((batch_size, channels, img_width, img_height))
labels = np.ones(batch_size) * -1
iteration = 0
while iteration < batch_size: # iterate until have completed batch
logging.debug(iteration)
if len(table[table.FEAT != 'N']) == 0:
table = table_full # reset the table if all entries have been used
if len(table[table.FEAT == 'N']) == 0:
table = table_full # reset the table if all entries have been used
if even_split:
feat_switch = np.random.randint(0,2) # high limit is exclusive
if feat_switch == 1:
table_in = table[table.FEAT != 'N']
else:
table_in = table[table.FEAT == 'N']
else:
table_in = table
# table_in contains all images that may possibly be selected in this single image loop (by class, usually)
rand_index = np.random.randint(0,len(table_in))
# print('rand index: ', rand_index)
picture_id = table_in.iloc[rand_index]['picture_id'] # pick a random picture id
table = table[table.picture_id != picture_id] # remove that pic from the OUTER table, don't redraw (yet)
rows = table_in[table_in.picture_id == picture_id] # pick metatable rows with that pic_id
if len(rows) > 1:
exit(1) # if pic id duplicates, exit!
row = rows.squeeze()
im = construct_image(row, instruct) # read image contained in that metatable row
data[iteration, :, :, :] = im # save for X output
feature = rows.iloc[0]['FEAT'] # find feature of current image
labels[iteration] = 1 # assume Y = tidal
if feature == 'N':
labels[iteration] = 0 # if feature is N, change to Y = not tidal
if write: # append record of labels to 'name'
with open(label_fname, "a") as label_file:
label_file.write(str(int(labels[iteration]))+'\n')
with open(pic_fname, "a") as label_file:
label_file.write(str(int(picture_id))+'\n')
iteration += 1
if debug:
logging.info(data.shape)
final_batch_im = data[-1, 0, :, :] # batch, channel, height, width
final_batch_label = labels[-1]
name = gen_name + '_' + row['ID'] + '_' + str(final_batch_label) + '_' + str(np.random.rand())
logging.info(name)
logging.info(final_batch_im.shape)
plt.clf()
plt.imshow(final_batch_im, cmap='gray')
plt.savefig(name + '.png')
logging.info('Mean batch label: {}'.format(labels.mean()))
logging.debug('batch shape: {}'.format(data.shape))
if class_mode is None:
yield data
else: yield (data, labels)
def fold_tables(meta, instruct):
"""
Separate catalog into instruct['folds'] cross-validation folds.
Check that no galaxy appears in both the train table and val table for each single permutation
Shuffles catalog, hence resulting folds are unique for each call
instruct['folds'] controls how many folds to create (e.g. 5 for 5-fold cross-validation)
instruct['tidal_conf'] controls how expert labels are binned into binary classes.
Args:
meta (pd.DataFrame): catalog
instruct (dict): configuration instructions
Raises:
ValueError: if instruct['tidal_conf'] is not a defined option (below)
Returns:
list: where nth item is train table for nth permutation
list: where nth item is validation table for nth permutation
"""
folds = instruct['folds']
# all combinations need these two
conf_4 = np.array(meta.CONF == 4, dtype=bool)
conf_0 = np.array(meta.CONF == 0, dtype=bool)
if instruct['tidal_conf'] == 34:
conf_3 = np.array(meta.CONF == 3, dtype=bool)
tidal_table = meta[conf_3 + conf_4]
nontidal_table = meta[meta.CONF == 0]
elif instruct['tidal_conf'] == 4:
tidal_table = meta[conf_4]
nontidal_table = meta[meta.CONF == 0]
elif instruct['tidal_conf'] == 134:
conf_3 = np.array(meta.CONF == 3, dtype=bool)
conf_1 = np.array(meta.CONF == 1, dtype=bool)
tidal_table = meta[conf_3 + conf_4]
nontidal_table = meta[conf_1+conf_0]
else:
failure_str = 'fatal fold error: instruct tidal_conf not recognised'
raise ValueError(failure_str)
tidal_val_size = int(len(tidal_table)/folds)
nontidal_val_size = int(len(nontidal_table)/folds)
train_tables = ['error' for v in range(folds)]
val_tables = ['error' for v in range(folds)]
for fold in range(folds):
# choose the boundaries of moving window to select as val data
tidal_window_low_edge = fold * tidal_val_size
tidal_window_high_edge = (fold+1) * tidal_val_size
nontidal_window_low_edge = fold * nontidal_val_size
nontidal_window_high_edge = (fold+1) * nontidal_val_size
# validation set is the rows within fold's selected window
# val window should include low edge and exclude high edge
val_tidal_table = tidal_table[tidal_window_low_edge:tidal_window_high_edge]
val_nontidal_table = nontidal_table[nontidal_window_low_edge:nontidal_window_high_edge]
val_table = pd.concat((val_tidal_table, val_nontidal_table))
# train set is all the other rows
# train_below should exclude low_edge
train_tidal_table_below = tidal_table[:tidal_window_low_edge]
train_nontidal_table_below = nontidal_table[:nontidal_window_low_edge]
if fold == (folds - 1): # final row, don't try access above limit!
train_tidal_table_above = pd.DataFrame()
train_nontidal_table_above = pd.DataFrame()
else:
# train_above should include high edge as val window excludes it
train_tidal_table_above = tidal_table[tidal_window_high_edge:]
train_nontidal_table_above = nontidal_table[nontidal_window_high_edge:]
train_table = pd.concat((train_tidal_table_below, train_nontidal_table_below,
train_tidal_table_above, train_nontidal_table_above))
val_table = shuffle_df(val_table)
train_table = shuffle_df(train_table)
val_tables[fold] = val_table
train_tables[fold] = train_table
for fold in range(folds): # verify that no pictures appear twice in any train/test pair
val_pics = val_tables[fold]['picture_id'].unique()
train_pics = train_tables[fold]['picture_id'].unique()
for val_v in val_pics:
for train_v in train_pics:
if val_v == train_v:
print('fold error: duplicate pic detected!')
print(val_v, train_v)
exit(1)
return train_tables, val_tables
| [
11748,
18931,
198,
198,
6738,
6468,
28338,
13,
952,
1330,
11414,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
11748,
... | 2.348805 | 4,309 |
print("LIKHITHA");
print("AM.EN.U4CSE18130");
print("CSE");
| [
4798,
7203,
43,
18694,
39,
2043,
7801,
15341,
198,
4798,
7203,
2390,
13,
1677,
13,
52,
19,
34,
5188,
1507,
12952,
15341,
198,
4798,
7203,
34,
5188,
15341,
198
] | 2.068966 | 29 |
F = open("16_Stikordsregister.table", "r")
lines = F.readlines()
F.close()
a1 = r'<div align="left">'
a2 = r'{| class="wikitable" style="text-align: left;")'
a3 = lines[0].strip()
b = "|-"
c1 = r'|}'
c2 = r'</div>'
for i, line in enumerate(lines[1:]):
line = line.strip()
if line == "#N/A":
continue
if line[:10] == "| Bogstav:":
if i != 0:
print(c1)
print(c2)
print("== " + line[2:12] + " ==")
print(a1)
print(a2)
print(a3)
print(b)
print(line)
elif i == len(lines)-2:
print(c1)
print(c2)
else:
print(b)
print(line)
| [
198,
198,
37,
796,
1280,
7203,
1433,
62,
1273,
1134,
3669,
30238,
13,
11487,
1600,
366,
81,
4943,
198,
6615,
796,
376,
13,
961,
6615,
3419,
198,
37,
13,
19836,
3419,
198,
198,
64,
16,
796,
374,
6,
27,
7146,
10548,
2625,
9464,
5320... | 1.759791 | 383 |
"""
This module contains the Jump Search algorithm.
"""
import math
def jump_search(arr: list, x: int, n: int) -> int:
"""
This function implements the Jump Search algorithm.
"""
step = int(math.sqrt(n))
prev = 0
while arr[min(step, n) - 1] < x:
prev = step
step += int(math.sqrt(n))
if prev >= n:
return -1
while arr[prev] < x:
prev += 1
if prev == min(step, n):
return -1
if arr[prev] == x:
return prev
return -1 | [
37811,
198,
1212,
8265,
4909,
262,
15903,
11140,
11862,
13,
198,
37811,
198,
11748,
10688,
198,
198,
4299,
4391,
62,
12947,
7,
3258,
25,
1351,
11,
2124,
25,
493,
11,
299,
25,
493,
8,
4613,
493,
25,
198,
220,
220,
220,
37227,
198,
... | 2.161157 | 242 |
# Copyright 2020 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import errno
import os
import base64
import time
import requests
import socket
from pathlib import Path
from os import path
from rmaker_lib import serverconfig
from rmaker_lib.exceptions import NetworkError,\
InvalidConfigError,\
InvalidUserError,\
InvalidApiVersionError,\
ExpiredSessionError,\
SSLError,\
RequestTimeoutError
from rmaker_lib.logger import log
CONFIG_DIRECTORY = '.espressif/rainmaker'
CONFIG_FILE = CONFIG_DIRECTORY + '/rainmaker_config.json'
HOME_DIRECTORY = '~/'
CURR_DIR = os.path.dirname(__file__)
CERT_FILE = CURR_DIR + '/../server_cert/server_cert.pem'
class Config:
"""
Config class used to instantiate instances of config to
perform various get/set configuration operations
"""
def set_config(self, data, config_file=CONFIG_FILE):
"""
Set the configuration file.
:params data: Config Data to write to file
:type data: dict
:params config_file: Config filename to write config data to
:type data: str
:raises OSError: If there is an OS issue while creating new directory
for config file
:raises Exception: If there is a File Handling error while saving
config to file
:return: None on Success and Failure
:rtype: None
"""
log.info("Configuring config file.")
file_dir = Path(path.expanduser(HOME_DIRECTORY + CONFIG_DIRECTORY))
file = Path(path.expanduser(HOME_DIRECTORY) + config_file)
if not file.exists():
try:
if not file_dir.exists():
log.debug('Config directory does not exist,'
'creating new directory.')
os.makedirs(path.expanduser(HOME_DIRECTORY) +
CONFIG_DIRECTORY)
except OSError as set_config_err:
log.error(set_config_err)
if set_config_err.errno != errno.EEXIST:
raise set_config_err
try:
with open(path.join(path.expanduser(HOME_DIRECTORY),
config_file), 'w') as config_file:
json.dump(data, config_file)
except Exception as set_config_err:
raise set_config_err
log.info("Configured config file successfully.")
def get_config(self, config_file=CONFIG_FILE):
"""
Get the configuration details from config file.
:params config_file: Config filename to read config data from
:type data: str
:raises Exception: If there is a File Handling error while reading
from config file
:return:
idtoken - Id Token from config saved\n
refreshtoken - Refresh Token from config saved\n
accesstoken - Access Token from config saved\n
:rtype: str
"""
file = Path(path.expanduser(HOME_DIRECTORY) + config_file)
if not file.exists():
raise InvalidUserError
try:
with open(path.join(path.expanduser(HOME_DIRECTORY),
config_file), 'r') as config_file:
data = json.load(config_file)
idtoken = data['idtoken']
refresh_token = data['refreshtoken']
access_token = data['accesstoken']
except Exception as get_config_err:
raise get_config_err
return idtoken, refresh_token, access_token
def get_binary_config(self, config_file=CONFIG_FILE):
"""
Get the configuration details from binary config file.
:params config_file: Config filename to read config data from
:type data: str
:raises Exception: If there is a File Handling error while reading
from config file
:return: Config data read from file on Success, None on Failure
:rtype: str | None
"""
file = Path(path.expanduser(HOME_DIRECTORY) + config_file)
if not file.exists():
return None
try:
with open(file, 'rb') as cfg_file:
data = cfg_file.read()
return data
except Exception as get_config_err:
raise get_config_err
return
def update_config(self, access_token, id_token):
"""
Update the configuration file.
:params access_token: Access Token to update in config file
:type access_token: str
:params id_token: Id Token to update in config file
:type id_token: str
:raises OSError: If there is an OS issue while creating new directory
for config file
:raises Exception: If there is a FILE Handling error while reading
from/writing config to file
:return: None on Success and Failure
:rtype: None
"""
file = Path(path.expanduser(HOME_DIRECTORY) + CONFIG_FILE)
if not file.exists():
try:
os.makedirs(path.expanduser(HOME_DIRECTORY) + CONFIG_DIRECTORY)
except OSError as set_config_err:
if set_config_err.errno != errno.EEXIST:
raise set_config_err
try:
with open(path.join(path.expanduser(HOME_DIRECTORY),
CONFIG_FILE), 'r') as config_file:
config_data = json.load(config_file)
config_data['accesstoken'] = access_token
config_data['idtoken'] = id_token
with open(path.join(path.expanduser(HOME_DIRECTORY),
CONFIG_FILE), 'w') as config_file:
json.dump(config_data, config_file)
except Exception as set_config_err:
raise set_config_err
def get_token_attribute(self, attribute_name, is_access_token=False):
"""
Get access token attributes.
:params attribute_name: Attribute Name
:type attribute_name: str
:params is_access_token: Is Access Token
:type is_access_token: bool
:raises InvalidConfigError: If there is an error in the config
:raises Exception: If there is a File Handling error while reading
from/writing config to file
:return: Attribute Value on Success, None on Failure
:rtype: int | str | None
"""
if is_access_token:
log.debug('Getting access token for attribute ' + attribute_name)
_, _, token = self.get_config()
else:
log.debug('Getting idtoken for attribute ' + attribute_name)
token, _, _ = self.get_config()
token_payload = token.split('.')[1]
if len(token_payload) % 4:
token_payload += '=' * (4 - len(token_payload) % 4)
try:
str_token_payload = base64.b64decode(token_payload).decode("utf-8")
attribute_value = json.loads(str_token_payload)[attribute_name]
except Exception:
raise InvalidConfigError
if attribute_value is None:
raise InvalidConfigError
return attribute_value
def get_access_token(self):
"""
Get Access Token for User
:raises InvalidConfigError: If there is an issue in getting config
from file
:return: Access Token on Success
:rtype: str
"""
_, _, access_token = self.get_config()
if access_token is None:
raise InvalidConfigError
if self.__is_valid_token() is False:
print('Previous Session expired. Initialising new session...')
log.info('Previous Session expired. Initialising new session...')
refresh_token = self.get_refresh_token()
access_token, id_token = self.__get_new_token(refresh_token)
self.update_config(access_token, id_token)
print('Previous Session expired. Initialising new session...'
'Success')
log.info('Previous Session expired. Initialising new session...'
'Success')
return access_token
def get_user_id(self):
"""
Get User Id
:return: Attribute value for attribute name passed
:rtype: str
"""
return self.get_token_attribute('custom:user_id')
def get_refresh_token(self):
"""
Get Refresh Token
:raises InvalidApiVersionError: If current API version is not supported
:return: Refresh Token
:rtype: str
"""
if self.__is_valid_version() is False:
raise InvalidApiVersionError
_, refresh_token, _ = self.get_config()
return refresh_token
def __is_valid_token(self):
"""
Check if access token is valid i.e. login session is still active
or session is expired
:return True on Success and False on Failure
:rtype: bool
"""
log.info("Checking for session timeout.")
exp_timestamp = self.get_token_attribute('exp', is_access_token=True)
current_timestamp = int(time.time())
if exp_timestamp > current_timestamp:
return True
return False
def __is_valid_version(self):
"""
Check if API Version is valid
:raises NetworkError: If there is a network connection issue during
HTTP request for getting version
:raises Exception: If there is an HTTP issue or JSON format issue in
HTTP response
:return: True on Success, False on Failure
:rtype: bool
"""
socket.setdefaulttimeout(10)
log.info("Checking for supported version.")
path = 'apiversions'
request_url = serverconfig.HOST.split(serverconfig.VERSION)[0] + path
try:
log.debug("Version check request url : " + request_url)
response = requests.get(url=request_url, verify=CERT_FILE,
timeout=(5.0, 5.0))
log.debug("Version check response : " + response.text)
response.raise_for_status()
except requests.exceptions.SSLError:
raise SSLError
except requests.exceptions.Timeout:
raise RequestTimeoutError
except requests.exceptions.ConnectionError:
raise NetworkError
except Exception as ver_err:
raise ver_err
try:
response = json.loads(response.text)
except Exception as json_decode_err:
raise json_decode_err
if 'supported_versions' in response:
supported_versions = response['supported_versions']
if serverconfig.VERSION in supported_versions:
supported_versions.sort()
latest_version = supported_versions[len(supported_versions)
- 1]
if serverconfig.VERSION < latest_version:
print('Please check the updates on GitHub for newer'
'functionality enabled by ' + latest_version +
' APIs.')
return True
return False
def __get_new_token(self, refresh_token):
"""
Get new token for User Login Session
:raises NetworkError: If there is a network connection issue during
HTTP request for getting token
:raises Exception: If there is an HTTP issue or JSON format issue in
HTTP response
:return: accesstoken and idtoken on Success, None on Failure
:rtype: str | None
"""
socket.setdefaulttimeout(10)
log.info("Extending user login session.")
path = 'login'
request_payload = {
'refreshtoken': refresh_token
}
request_url = serverconfig.HOST + path
try:
log.debug("Extend session url : " + request_url)
response = requests.post(url=request_url,
data=json.dumps(request_payload),
verify=CERT_FILE,
timeout=(5.0, 5.0))
response.raise_for_status()
log.debug("Extend session response : " + response.text)
except requests.exceptions.SSLError:
raise SSLError
except requests.exceptions.ConnectionError:
raise NetworkError
except requests.exceptions.Timeout:
raise RequestTimeoutError
except Exception:
raise ExpiredSessionError
try:
response = json.loads(response.text)
except Exception:
raise ExpiredSessionError
if 'accesstoken' in response and 'idtoken' in response:
log.info("User session extended successfully.")
return response['accesstoken'], response['idtoken']
return None
def check_user_creds_exists(self):
'''
Check if user creds exist
'''
curr_login_creds_file = os.path.expanduser(HOME_DIRECTORY + CONFIG_FILE)
if os.path.exists(curr_login_creds_file):
return curr_login_creds_file
else:
return False
def get_input_to_end_session(self, email_id):
'''
Get input(y/n) from user to end current session
'''
while True:
user_input = input("This will end your current session for {}. Do you want to continue (Y/N)? :".format(email_id))
if user_input not in ["Y", "y", "N", "n"]:
print("Please provide Y/N only")
continue
elif user_input in ["N", "n"]:
return False
else:
break
return True
def remove_curr_login_creds(self, curr_creds_file=None):
'''
Remove current login creds
'''
log.info("Removing current login creds")
if not curr_creds_file:
curr_creds_file = os.path.expanduser(HOME_DIRECTORY + CONFIG_FILE)
try:
os.remove(curr_creds_file)
log.info("Previous login session ended. Removing current login creds...Success...")
return True
except Exception as e:
log.debug("Removing current login creds from path {}. Failed: {}".format(curr_creds_file, e))
return None | [
2,
15069,
12131,
20386,
601,
361,
11998,
357,
2484,
272,
20380,
8,
350,
9328,
42513,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.184725 | 7,005 |
from .dataset import load_dataset
from .location import TBikeLocation
from .truck import TBikeTruck
NUMBER_OF_GRID = [5, 60]
NUMBER_OF_TRUCKS = [5, 10]
NUMBER_OF_BIKES_IN_EACH_LOCATION = [4, 3]
TRUCK_CAPACITY = 20
TOTAL_TIMES = 12 * 60 # 12 hours
| [
6738,
764,
19608,
292,
316,
1330,
3440,
62,
19608,
292,
316,
198,
6738,
764,
24886,
1330,
23799,
522,
14749,
198,
6738,
764,
83,
30915,
1330,
23799,
522,
51,
30915,
198,
198,
41359,
13246,
62,
19238,
62,
10761,
2389,
796,
685,
20,
11,... | 2.336449 | 107 |
import os
import pickle
def get_queue(config_dir):
"""Get the queue from the queue backup file."""
queue_path = os.path.join(config_dir, 'queue')
if os.path.exists(queue_path):
queue_file = open(queue_path, 'rb')
try:
queue = pickle.load(queue_file)
return queue
except Exception:
print('Queue log file seems to be corrupted. Aborting.')
return None
queue_file.close()
print('There is no queue log file. Aborting.')
return None
| [
11748,
28686,
198,
11748,
2298,
293,
628,
198,
4299,
651,
62,
36560,
7,
11250,
62,
15908,
2599,
198,
220,
220,
220,
37227,
3855,
262,
16834,
422,
262,
16834,
11559,
2393,
526,
15931,
198,
220,
220,
220,
16834,
62,
6978,
796,
28686,
13... | 2.349558 | 226 |
from chains.core.metrics import accuracy
from chains.core.optimizers import AdamOptimizer
from chains.core.preprocessing import one_hot
from chains.front.model import Model
from chains.front.network import BatchNorm
from chains.front.network import Dense, Sequence, SoftmaxClassifier, ReLu
from chains.front.training import MiniBatchTraining
from chains.tools.backup import restore_network
from coursera.course2.w3.c2w3 import CostListener
from coursera.course2.w3.tf_utils import load_dataset
if __name__ == "__main__":
train_x_orig, train_y_orig, test_x_orig, test_y_orig, classes = \
load_dataset()
# Pre-processing
train_x_flat = train_x_orig.reshape(train_x_orig.shape[0], -1).T
test_x_flat = test_x_orig.reshape(test_x_orig.shape[0], -1).T
train_x = train_x_flat / 255.
test_x = test_x_flat / 255.
train_y = one_hot(train_y_orig, 6)
test_y = one_hot(test_y_orig, 6)
n = train_x.shape[0]
# Model
model = model(n)
# Restore
restore_network(model.network, "datasets/c2w3_trained_weights.hdf5")
# Check accuracy
train_predictions = model.predict(train_x)
test_predictions = model.predict(test_x)
print(f"Train accuracy = {accuracy(train_y_orig, train_predictions)}%")
print(f"Test accuracy = {accuracy(test_y_orig, test_predictions)}%")
| [
6738,
14659,
13,
7295,
13,
4164,
10466,
1330,
9922,
198,
6738,
14659,
13,
7295,
13,
40085,
11341,
1330,
7244,
27871,
320,
7509,
198,
6738,
14659,
13,
7295,
13,
3866,
36948,
1330,
530,
62,
8940,
198,
6738,
14659,
13,
8534,
13,
19849,
1... | 2.610236 | 508 |
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import random
#Function to Delete Node
node_list = [0,1,2]
weights = [[0,2,3],[2,0,6],[3,6,0]]
node_list, weights = add(node_list, weights, [1,2,3])
print node_list;
print weights;
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
198,
2,
22203,
284,
23520,
19081,
628,
220,
220,
220,
220,
220,
220,
220,
220,
1... | 2.310924 | 119 |